gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
import string
install_error_pattern = re.compile("Error: (.*)$", re.MULTILINE)
def log_install_errors(ctx, output):
"""
Print warning for Error:
:param ctx:
:param output:
:return: nothing
"""
errors = re.findall(install_error_pattern, output)
for line in errors:
ctx.warning(line)
def number_of_rsp(ctx):
"""
Determine the number of RSP's in the chassis
:param ctx:
:return: the number of RSP's
"""
platforms = ['ASR-902', 'ASR-920']
count = 0
valid_count = ['1', '2']
if ctx._connection.platform in platforms:
count = 1
return count
output = ctx.send("show platform | count RSP")
if output:
m = re.search('Number.*= (\d+)', output)
if m:
count = m.group(1)
if count not in valid_count:
ctx.error("Invalid RSP count: {}".format(count))
else:
count = int(count)
return count
def install_folder(ctx):
"""
Determine the image folder
'File: bootflash:/Image/packages.conf, on: RP0'
'File: consolidated:packages.conf, on: RP0'
:param ctx
:return: the image folder
"""
folder = 'bootflash:/Image'
output = ctx.send("show version running | include packages.conf")
if output:
m = re.search('File: (.*)/?packages.conf', output)
if m:
folder = m.group(1)
folder = re.sub("/$", "", folder)
if folder == 'consolidated:':
folder = 'bootflash:/Image'
return folder
def create_folder(ctx, folder):
"""
Determine the image folder
'File: bootflash:/Image/packages.conf, on: RP0'
'File: consolidated:packages.conf, on: RP0'
:param ctx
:param folder to be created
:return: True: Success, False: Failed
"""
output = ctx.send('dir ' + folder)
m = re.search('%Error opening', output)
if m:
cmd = 'mkdir ' + folder
ctx.send(cmd, wait_for_string="Create directory filename")
ctx.send('\r\n')
else:
return True
output = ctx.send('dir ' + folder)
m = re.search('%Error opening', output)
if m:
return False
else:
return True
def available_space(ctx, device):
"""
Determine the available space on device such as bootflash or stby-bootflash:
:param ctx:
:param device: bootflash / stby-bootflash:
:return: the available space
"""
available = -1
output = ctx.send('dir ' + device)
m = re.search('(\d+) bytes free', output)
if m:
available = int(m.group(1))
return available
def installed_package_name(ctx, pkg_conf):
"""
:param: ctx
:param: pkg_conf such as bootflash:/Image/packages.conf
:return: the installed package name
"""
output = ctx.send('dir ' + pkg_conf)
if not output:
ctx.error("dir {} failed".format(pkg_conf))
return None
m = re.search('No such file', output)
if m:
ctx.info('{} does not exist'.format(pkg_conf))
return None
cmd = "more " + pkg_conf + " | include PackageName"
output = ctx.send(cmd)
m = re.search('pkginfo: PackageName: (.*)$', output)
if m:
img_name = m.group(1)
ctx.info("installed_package_name: installed "
"name = {}".format(img_name))
return img_name
else:
ctx.info("PackageName is not found in {}".format(pkg_conf))
return None
def installed_package_version(ctx):
"""
:param: ctx
:return: the installed package name
"""
# cmd = "more " + pkg_conf + " | include Build:"
# pkginfo: Build: 03.14.03.S.155-1.S3-std
# output = ctx.send(cmd)
# m = re.search('pkginfo: Build: (.*)$', output)
cmd = 'show version | include Cisco IOS XE Software'
# Cisco IOS XE Software, Version 03.13.03.S - Extended Support Release
output = ctx.send(cmd)
m = re.search('Version (.*) -', output)
if m:
bld_version = m.group(1)
ctx.info("installed_package_version: installed "
"version = {}".format(bld_version))
return bld_version
else:
ctx.info("Build version is not found in show version: {}".format(output))
return None
def installed_package_device(ctx):
"""
:param: ctx
:return: device_type with rsp version ie asr900rsp2
"""
cmd = 'show version running | include File:'
# File: bootflash:/Image/asr900rsp2-rpbase.03.13.03.S.154-3.S3-ext.pkg, on: RP0
img_dev = None
output = ctx.send(cmd)
if output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('File: .*(asr.*)-\w+.\d+', line)
if m:
img_dev = m.group(1)
break
ctx.info("installed_package_device: device type = {}".format(img_dev))
return img_dev
def install_package_family(pkg):
"""
:param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin
:return: device_type of the installed image ie asr900
"""
img_dev = None
m = re.search('(asr\d+)\w*', pkg)
if m:
img_dev = m.group(1)
return img_dev
def install_add_remove(ctx, cmd):
"""
Execute the copy command
:param ctx
:param cmd
:return: nothing
"""
message = "Waiting the operation to continue"
ctx.info(message)
ctx.post_status(message)
ctx.send(cmd, wait_for_string="Destination filename")
output = ctx.send("\r\n\r\n\r\n", timeout=3600)
result = re.search("\d+ bytes copied in .* secs", output)
if result:
ctx.info("Command {} finished successfully".format(cmd))
return
else:
log_install_errors(ctx, output)
ctx.error("Command {} failed".format(cmd))
def check_pkg_conf(ctx, folder):
"""
Remove the existing packages
:param ctx
:param folder: i.e. bootflash:/Image
:return: True or False
"""
pkg_conf = folder + '/packages.conf'
output = ctx.send('more ' + pkg_conf + ' | include pkg$')
if not output:
return False
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
if ctx._connection.os_version not in line:
return False
return True
def remove_exist_image(ctx, package):
"""
Remove the existing packages
:param ctx
:param package
:return: True or False
"""
output = ctx.send('dir ' + package)
m = re.search('No such file', output)
if m:
return True
else:
cmd = "del /force {}".format(package)
ctx.send(cmd)
ctx.info("Removing files : {}".format(package))
output = ctx.send(cmd)
m = re.search('No such file', output)
if m:
return True
else:
return False
def remove_exist_subpkgs(ctx, folder, pkg):
"""
Remove residual packages from the earlier installations
:param ctx
:param folder: i.e. bootflash:/Image
:return: True or False
"""
pkg_conf = folder + '/packages.conf'
# Skip if no packages.conf
output = ctx.send('dir ' + pkg_conf)
if not output:
ctx.error("dir {} failed".format(pkg_conf))
return
m = re.search('No such file', output)
if m:
ctx.info('Booted from consolidated mode: '
'{} does not exist'.format(pkg_conf))
return
# Discover package name, version, and image device
img_name = installed_package_name(ctx, pkg_conf)
bld_version = installed_package_version(ctx)
img_device = installed_package_device(ctx)
if not bld_version or not img_device or not img_name:
ctx.error("Not able to determine the residual files")
return
# Remove all the bin files except the current install pkg
if folder != 'bootflash:':
package_name = folder + '/asr*.bin'
remove_exist_image(ctx, package_name)
else:
package_name = folder + '*.bin'
output = ctx.send('dir ' + package_name + ' | include bin')
if not output:
ctx.error("dir {} failed".format(package_name))
return
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('(asr.*\.bin)', line)
if m:
previous_pkg = m.group(0)
if previous_pkg != pkg:
previous_package = folder + '/' + previous_pkg
remove_exist_image(ctx, previous_package)
# Remove the packages.conf*- file
package_name = folder + '/packages.conf*-'
remove_exist_image(ctx, package_name)
# Remove residual asr900*.conf
package_name = folder + '/asr9*.conf'
remove_exist_image(ctx, package_name)
# Remove .pkg files
cmd = 'dir ' + folder + '/*.pkg | include pkg'
# Directory of bootflash:/Image/*.pkg
# 15107 -rw- 41534024 Sep 8 2016 03:55:47 +00:00 asr900rsp2-espbase.03.14.03.S.155-1.S3-std.pkg
output = ctx.send(cmd)
if not output:
return
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('(asr9.*pkg)', line)
if m:
exfile = m.group(1)
package = folder + '/' + exfile
if bld_version not in package or img_device not in package:
remove_exist_image(ctx, package)
return
def check_issu_readiness(ctx, pkg, image_size):
"""
Expand the consolidated file into the image folder
:param: ctx
:param: pkg
:param: image_size
:return: True or False
"""
# check the current package mode
cmd = 'show version | count packages.conf'
output = ctx.send(cmd)
if output:
m = re.search('Number.*= (\d+)', output)
if m:
count = m.group(1)
if count == '0':
ctx.info("The current boot mode is consolidated package.")
return False
else:
ctx.warning("Invalid show version output: {}".format(output))
return False
else:
ctx.warning("Show version command error!")
return False
# check software compatibility
cmd = 'show version | include System image file'
output = ctx.send(cmd)
if output:
m = re.search('System image file is \"(.*)\"', output)
if m:
pkg_conf = m.group(1)
img_name = installed_package_name(ctx, pkg_conf)
if not img_name:
ctx.warning("Installed package name {} is not found.".format(pkg_conf))
return False
else:
ctx.warning("Show version command error!")
return False
else:
ctx.warning("Show version command error!")
return False
m = re.search('asr.*-(.*)\.\d+\.\d+\.\d+.*', pkg)
if m:
pkg_name = m.group(1)
if img_name != pkg_name:
ctx.info("Incompatible packages: {} vs. {}".format(img_name, pkg_name))
return False
else:
ctx.warning("Package name is not found in {}".format(pkg))
return False
# check image types between RSP's
cmd = 'show version rp active running | include Package'
output = ctx.send(cmd)
cmd = 'show version rp standby running | include Package'
stby_output = ctx.send(cmd)
if output and stby_output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
# Package: rpbase, version: 03.16.00.S.155-3.S-ext, status: active
for line in lines:
m = re.search('Package: (.*) status', line)
if m:
img_type = m.group(1)
if img_type not in stby_output:
ctx.warning("Mismatched image types:")
ctx.warning("Active rp version: {}".format(output))
ctx.warning("Standby rp version: {}".format(stby_output))
return False
else:
ctx.warning("Invalid package version format: {}".format(line))
return False
else:
ctx.warning("Show version command error!")
return False
# check the required disk space for ISSU
# bootflash: requires additional 250 MB
# stby-bootflash: requires additional 450 MB
total_size = 250000000 + image_size
flash_free = available_space(ctx, 'bootflash:')
if flash_free < total_size:
ctx.info("Total required / bootflash "
"available: {} / {} bytes".format(total_size, flash_free))
ctx.info("Not enough space in bootflash: to perform ISSU. "
"Setting the Router to boot in sub-package mode.")
return False
total_size = 450000000 + image_size
flash_free = available_space(ctx, 'stby-bootflash:')
if flash_free < total_size:
ctx.info("Total required / stby-bootflash "
"available: {} / {} bytes".format(total_size, flash_free))
ctx.info("Not enough space in stby-bootflash: to perform ISSU. "
"Setting the Router to boot in sub-package mode.")
return False
else:
ctx.info("There is enough space on bootflash and stby-bootflash to perform ISSU")
# check show redundancy
cmd = 'show redundancy | include Configured Redundancy Mode'
output = ctx.send(cmd)
if output:
m = re.search('Configured Redundancy Mode = (.*)', output)
if m:
configed_mode = m.group(1)
if configed_mode != 'sso':
ctx.warning("Configured Redundancy Mode = {}".format(configed_mode))
return False
else:
ctx.warning("Show redundancy command error!")
return False
else:
ctx.warning("Show redundancy command error!")
return False
cmd = 'show redundancy | include Operating Redundancy Mode'
output = ctx.send(cmd)
if output:
m = re.search('Operating Redundancy Mode = (.*)', output)
if m:
operating_mode = m.group(1)
if operating_mode != 'sso':
ctx.warning("Operating Redundancy Mode = {}".format(operating_mode))
return False
else:
ctx.warning("Show redundancy command error!")
return False
else:
ctx.warning("Show redundancy command error!")
return False
cmd = 'show redundancy | include Current Software state'
output = ctx.send(cmd)
if output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
num_of_line = len(lines)
if num_of_line != 2:
ctx.warning("num_of_line = {}".format(num_of_line))
ctx.warning("Current Software state = {}".format(output))
return False
m = re.search('Current Software state = (.*)', lines[0])
if m:
active_state = m.group(1)
if 'ACTIVE' not in active_state:
ctx.warning("show redundancy Active state check has failed")
ctx.warning("active_state = {}".format(active_state))
ctx.warning("Current Software state = {}".format(lines[0]))
return False
else:
ctx.warning("Show redundancy command error!")
return False
m = re.search('Current Software state = (.*)', lines[1])
if m:
stby_state = m.group(1)
if 'STANDBY HOT' not in stby_state:
ctx.warning("show redundancy STANDBY HOT state check has failed")
ctx.warning("stby_state = {}".format(stby_state))
ctx.warning("Current Software state = {}".format(lines[1]))
return False
else:
ctx.warning("Show redundancy command error!")
return False
else:
ctx.warning("Show redundancy command error!")
return False
return True
def xe_show_platform(ctx):
"""
Parse show platform output to extract the RP and SIP status
:param: ctx
:return: dictionary
0 1 2 3 4 5 6
012345678901234567890123456789012345678901234567890123456789012345678
Slot Type State Insert time (ago)
--------- ------------------- --------------------- -----------------
0/0 12xGE-2x10GE-FIXED ok 15:09:04
R1 A900-RSP2A-128 ok, active 14:09:30
"""
platform_info = {}
cmd = 'show platform'
output = ctx.send(cmd)
if output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
sip0 = False
for line in lines:
if not sip0:
m = re.search('--------- ------------------- '
'--------------------- -----------------', line)
if m:
sip0 = True
continue
m = re.search('Slot CPLD Version Firmware Version', line)
if m:
break
Slot = line[:8].strip()
Type = line[10:28].strip()
State = line[30:50].strip()
m1 = re.search('^0\/\d+', Slot)
m2 = re.search('^R\d+', Slot)
if m1 or m2:
platform_info[Slot] = [Type, State]
return platform_info
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import pytz
import numpy as np
import pandas as pd
from datetime import datetime
from unittest import TestCase
from zipline.utils.test_utils import setup_logger
from zipline.sources.data_source import DataSource
import zipline.utils.factory as factory
from zipline.transforms import batch_transform
from zipline.test_algorithms import (BatchTransformAlgorithm,
BatchTransformAlgorithmMinute,
ReturnPriceBatchTransform)
from zipline.algorithm import TradingAlgorithm
from zipline.utils.tradingcalendar import trading_days
from copy import deepcopy
@batch_transform
def return_price(data):
return data.price
class BatchTransformAlgorithmSetSid(TradingAlgorithm):
def initialize(self, sids=None):
self.history = []
self.batch_transform = return_price(
refresh_period=1,
window_length=10,
clean_nans=False,
sids=sids,
compute_only_full=False
)
def handle_data(self, data):
self.history.append(
deepcopy(self.batch_transform.handle_data(data)))
class DifferentSidSource(DataSource):
def __init__(self):
self.dates = pd.date_range('1990-01-01', periods=180, tz='utc')
self.start = self.dates[0]
self.end = self.dates[-1]
self._raw_data = None
self.sids = range(90)
self.sid = 0
self.trading_days = []
@property
def instance_hash(self):
return '1234'
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
def raw_data_gen(self):
# Create differente sid for each event
for date in self.dates:
if date not in trading_days:
continue
event = {'dt': date,
'sid': self.sid,
'price': self.sid,
'volume': self.sid}
self.sid += 1
self.trading_days.append(date)
yield event
class TestChangeOfSids(TestCase):
def setUp(self):
self.sids = range(90)
self.sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1990, 1, 8, tzinfo=pytz.utc)
)
def test_all_sids_passed(self):
algo = BatchTransformAlgorithmSetSid(sim_params=self.sim_params)
source = DifferentSidSource()
algo.run(source)
for i, (df, date) in enumerate(zip(algo.history, source.trading_days)):
self.assertEqual(df.index[-1], date, "Newest event doesn't \
match.")
for sid in self.sids[:i]:
self.assertIn(sid, df.columns)
last_elem = len(df) - 1
self.assertEqual(df[last_elem][last_elem], last_elem)
class TestBatchTransformMinutely(TestCase):
def setUp(self):
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
self.sim_params = factory.create_simulation_parameters(
start=start,
end=end,
)
self.sim_params.emission_rate = 'daily'
self.sim_params.data_frequency = 'minute'
setup_logger(self)
self.source, self.df = \
factory.create_test_df_source(bars='minute')
def test_core(self):
algo = BatchTransformAlgorithmMinute(sim_params=self.sim_params)
algo.run(self.source)
wl = int(algo.window_length * 6.5 * 60)
for bt in algo.history[wl:]:
self.assertEqual(len(bt), wl)
def test_window_length(self):
algo = BatchTransformAlgorithmMinute(sim_params=self.sim_params,
window_length=1, refresh_period=0)
algo.run(self.source)
wl = int(algo.window_length * 6.5 * 60)
np.testing.assert_array_equal(algo.history[:(wl - 1)],
[None] * (wl - 1))
for bt in algo.history[wl:]:
self.assertEqual(len(bt), wl)
class TestBatchTransform(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1990, 1, 8, tzinfo=pytz.utc)
)
setup_logger(self)
self.source, self.df = \
factory.create_test_df_source(self.sim_params)
def test_core_functionality(self):
algo = BatchTransformAlgorithm(sim_params=self.sim_params)
algo.run(self.source)
wl = algo.window_length
# The following assertion depend on window length of 3
self.assertEqual(wl, 3)
# If window_length is 3, there should be 2 None events, as the
# window fills up on the 3rd day.
n_none_events = 2
self.assertEqual(algo.history_return_price_class[:n_none_events],
[None] * n_none_events,
"First two iterations should return None." + "\n" +
"i.e. no returned values until window is full'" +
"%s" % (algo.history_return_price_class,))
self.assertEqual(algo.history_return_price_decorator[:n_none_events],
[None] * n_none_events,
"First two iterations should return None." + "\n" +
"i.e. no returned values until window is full'" +
"%s" % (algo.history_return_price_decorator,))
# After three Nones, the next value should be a data frame
self.assertTrue(isinstance(
algo.history_return_price_class[wl],
pd.DataFrame)
)
# Test whether arbitrary fields can be added to datapanel
field = algo.history_return_arbitrary_fields[-1]
self.assertTrue(
'arbitrary' in field.items,
'datapanel should contain column arbitrary'
)
self.assertTrue(all(
field['arbitrary'].values.flatten() ==
[123] * algo.window_length),
'arbitrary dataframe should contain only "test"'
)
for data in algo.history_return_sid_filter[wl:]:
self.assertIn(0, data.columns)
self.assertNotIn(1, data.columns)
for data in algo.history_return_field_filter[wl:]:
self.assertIn('price', data.items)
self.assertNotIn('ignore', data.items)
for data in algo.history_return_field_no_filter[wl:]:
self.assertIn('price', data.items)
self.assertIn('ignore', data.items)
for data in algo.history_return_ticks[wl:]:
self.assertTrue(isinstance(data, deque))
for data in algo.history_return_not_full:
self.assertIsNot(data, None)
# test overloaded class
for test_history in [algo.history_return_price_class,
algo.history_return_price_decorator]:
# starting at window length, the window should contain
# consecutive (of window length) numbers up till the end.
for i in range(algo.window_length, len(test_history)):
np.testing.assert_array_equal(
range(i - algo.window_length + 2, i + 2),
test_history[i].values.flatten()
)
def test_passing_of_args(self):
algo = BatchTransformAlgorithm(1, kwarg='str',
sim_params=self.sim_params)
self.assertEqual(algo.args, (1,))
self.assertEqual(algo.kwargs, {'kwarg': 'str'})
algo.run(self.source)
expected_item = ((1, ), {'kwarg': 'str'})
self.assertEqual(
algo.history_return_args,
[
# 1990-01-01 - market holiday, no event
# 1990-01-02 - window not full
None,
# 1990-01-03 - window not full
None,
# 1990-01-04 - window now full, 3rd event
expected_item,
# 1990-01-05 - window now full
expected_item,
# 1990-01-08 - window now full
expected_item
])
def run_batchtransform(window_length=10):
sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1995, 1, 8, tzinfo=pytz.utc)
)
source, df = factory.create_test_df_source(sim_params)
return_price_class = ReturnPriceBatchTransform(
refresh_period=1,
window_length=window_length,
clean_nans=False
)
for raw_event in source:
raw_event['datetime'] = raw_event.dt
event = {0: raw_event}
return_price_class.handle_data(event)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/quaternion/blob/main/LICENSE>
__version__ = "2022.2.10.14.20.39"
__doc_title__ = "Quaternion dtype for NumPy"
__doc__ = "Adds a quaternion dtype to NumPy."
__all__ = ['quaternion',
'as_quat_array', 'as_spinor_array',
'as_float_array', 'from_float_array',
'as_vector_part', 'from_vector_part',
'as_rotation_matrix', 'from_rotation_matrix',
'as_rotation_vector', 'from_rotation_vector',
'as_euler_angles', 'from_euler_angles',
'as_spherical_coords', 'from_spherical_coords',
'rotate_vectors', 'allclose',
'rotor_intrinsic_distance', 'rotor_chordal_distance',
'rotation_intrinsic_distance', 'rotation_chordal_distance',
'slerp_evaluate', 'squad_evaluate',
'zero', 'one', 'x', 'y', 'z', 'integrate_angular_velocity',
'squad', 'slerp', 'derivative', 'definite_integral', 'indefinite_integral']
import numpy as np
from .numpy_quaternion import (
quaternion, _eps, slerp_evaluate, squad_evaluate,
# slerp_vectorized, squad_vectorized, slerp, squad,
)
from .quaternion_time_series import (
unflip_rotors, slerp, squad, integrate_angular_velocity, minimal_rotation, angular_velocity,
)
from .calculus import (
derivative, antiderivative, definite_integral, indefinite_integral,
fd_derivative, fd_definite_integral, fd_indefinite_integral,
spline_derivative, spline_definite_integral, spline_indefinite_integral,
)
try:
from .calculus import spline
except:
pass
from .means import (
mean_rotor_in_chordal_metric,
optimal_alignment_in_chordal_metric,
optimal_alignment_in_Euclidean_metric
)
np.quaternion = quaternion
np.sctypeDict['quaternion'] = np.dtype(quaternion)
zero = np.quaternion(0, 0, 0, 0)
one = np.quaternion(1, 0, 0, 0)
x = np.quaternion(0, 1, 0, 0)
y = np.quaternion(0, 0, 1, 0)
z = np.quaternion(0, 0, 0, 1)
rotor_intrinsic_distance = np.rotor_intrinsic_distance
rotor_chordal_distance = np.rotor_chordal_distance
rotation_intrinsic_distance = np.rotation_intrinsic_distance
rotation_chordal_distance = np.rotation_chordal_distance
def as_float_array(a):
"""View the quaternion array as an array of floats
This function is fast (of order 1 microsecond) because no data is
copied; the returned quantity is just a "view" of the original.
The output view has one more dimension (of size 4) than the input
array, but is otherwise the same shape. The components along
that last dimension represent the scalar and vector components of
each quaternion in that order: `w`, `x`, `y`, `z`.
"""
return np.asarray(a, dtype=np.quaternion).view((np.double, 4))
def as_quat_array(a):
"""View a float array as an array of quaternions
The input array must have a final dimension whose size is
divisible by four (or better yet *is* 4), because successive
indices in that last dimension will be considered successive
components of the output quaternion. Each set of 4 components
will be interpreted as the scalar and vector components of a
quaternion in that order: `w`, `x`, `y`, `z`.
This function is usually fast (of order 1 microsecond) because no
data is copied; the returned quantity is just a "view" of the
original. However, if the input array is not C-contiguous
(basically, as you increment the index into the last dimension of
the array, you just move to the neighboring float in memory), the
data will need to be copied which may be quite slow. Therefore,
you should try to ensure that the input array is in that order.
Slices and transpositions will frequently break that rule.
We will not convert back from a two-spinor array because there is
no unique convention for them, so I don't want to mess with that.
Also, we want to discourage users from the slow, memory-copying
process of swapping columns required for useful definitions of
the two-spinors.
"""
a = np.asarray(a, dtype=np.double)
# fast path
if a.shape == (4,):
return quaternion(a[0], a[1], a[2], a[3])
# view only works if the last axis is C-contiguous
if not a.flags['C_CONTIGUOUS'] or a.strides[-1] != a.itemsize:
a = a.copy(order='C')
try:
av = a.view(np.quaternion)
except ValueError as e:
message = (str(e) + '\n '
+ 'Failed to view input data as a series of quaternions. '
+ 'Please ensure that the last dimension has size divisible by 4.\n '
+ 'Input data has shape {0} and dtype {1}.'.format(a.shape, a.dtype))
raise ValueError(message)
# special case: don't create an axis for a single quaternion, to
# match the output of `as_float_array`
if av.shape[-1] == 1:
av = av.reshape(a.shape[:-1])
return av
def from_float_array(a):
return as_quat_array(a)
def from_vector_part(v, vector_axis=-1):
"""Create a quaternion array from an array of the vector parts.
Essentially, this just inserts a 0 in front of each vector part, and
re-interprets the result as a quaternion.
Parameters
----------
v : array_like
Array of vector parts of quaternions. When interpreted as a numpy array,
if the dtype is `quaternion`, the array is returned immediately, and the
following argument is ignored. Otherwise, it it must be a float array with
dimension `vector_axis` of size 3 or 4.
vector_axis : int, optional
The axis to interpret as containing the vector components. The default is
-1.
Returns
-------
q : array of quaternions
Quaternions with vector parts corresponding to input vectors.
"""
v = np.asarray(v)
if v.dtype != np.quaternion:
input_shape = v.shape
if vector_axis != -1:
v = np.moveaxis(v, vector_axis, -1)
if v.shape[-1] == 3:
v = from_float_array(np.insert(v, 0, 0.0, axis=-1))
elif v.shape[-1] == 4:
v = v.copy()
v[..., 0] = 0.0
v = from_float_array(v)
else:
raise ValueError(
"Vector input has shape {0}, which cannot be interpreted as quaternions ".format(input_shape)
+ "with vector axis {0}".format(vector_axis)
)
return v
def as_vector_part(q):
"""Create an array of vector parts from an array of quaternions.
Parameters
----------
q : quaternion array_like
Array of quaternions.
Returns
-------
v : array
Float array of shape `q.shape + (3,)`
"""
q = np.asarray(q, dtype=np.quaternion)
return as_float_array(q)[..., 1:]
def as_spinor_array(a):
"""View a quaternion array as spinors in two-complex representation
This function is relatively slow and scales poorly, because memory
copying is apparently involved -- I think it's due to the
"advanced indexing" required to swap the columns.
"""
a = np.atleast_1d(a)
assert a.dtype == np.dtype(np.quaternion)
# I'm not sure why it has to be so complicated, but all of these steps
# appear to be necessary in this case.
return a.view(np.float64).reshape(a.shape + (4,))[..., [0, 3, 2, 1]].ravel().view(np.complex).reshape(a.shape + (2,))
def as_rotation_matrix(q):
"""Convert input quaternion to 3x3 rotation matrix
For any quaternion `q`, this function returns a matrix `m` such that, for every
vector `v`, we have
m @ v.vec == q * v * q.conjugate()
Here, `@` is the standard python matrix multiplication operator and `v.vec` is
the 3-vector part of the quaternion `v`.
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
m : float array
Output shape is q.shape+(3,3). This matrix should multiply (from
the left) a column vector to produce the rotated column vector.
Raises
------
ZeroDivisionError
If any of the input quaternions have norm 0.0.
"""
if q.shape == () and not isinstance(q, np.ndarray): # This is just a single quaternion
n = q.norm()
if n == 0.0:
raise ZeroDivisionError("Input to `as_rotation_matrix({0})` has zero norm".format(q))
elif abs(n-1.0) < _eps: # Input q is basically normalized
return np.array([
[1 - 2*(q.y**2 + q.z**2), 2*(q.x*q.y - q.z*q.w), 2*(q.x*q.z + q.y*q.w)],
[2*(q.x*q.y + q.z*q.w), 1 - 2*(q.x**2 + q.z**2), 2*(q.y*q.z - q.x*q.w)],
[2*(q.x*q.z - q.y*q.w), 2*(q.y*q.z + q.x*q.w), 1 - 2*(q.x**2 + q.y**2)]
])
else: # Input q is not normalized
return np.array([
[1 - 2*(q.y**2 + q.z**2)/n, 2*(q.x*q.y - q.z*q.w)/n, 2*(q.x*q.z + q.y*q.w)/n],
[2*(q.x*q.y + q.z*q.w)/n, 1 - 2*(q.x**2 + q.z**2)/n, 2*(q.y*q.z - q.x*q.w)/n],
[2*(q.x*q.z - q.y*q.w)/n, 2*(q.y*q.z + q.x*q.w)/n, 1 - 2*(q.x**2 + q.y**2)/n]
])
else: # This is an array of quaternions
n = np.norm(q)
if np.any(n == 0.0):
raise ZeroDivisionError("Array input to `as_rotation_matrix` has at least one element with zero norm")
else: # Assume input q is not normalized
m = np.empty(q.shape + (3, 3))
q = as_float_array(q)
m[..., 0, 0] = 1.0 - 2*(q[..., 2]**2 + q[..., 3]**2)/n
m[..., 0, 1] = 2*(q[..., 1]*q[..., 2] - q[..., 3]*q[..., 0])/n
m[..., 0, 2] = 2*(q[..., 1]*q[..., 3] + q[..., 2]*q[..., 0])/n
m[..., 1, 0] = 2*(q[..., 1]*q[..., 2] + q[..., 3]*q[..., 0])/n
m[..., 1, 1] = 1.0 - 2*(q[..., 1]**2 + q[..., 3]**2)/n
m[..., 1, 2] = 2*(q[..., 2]*q[..., 3] - q[..., 1]*q[..., 0])/n
m[..., 2, 0] = 2*(q[..., 1]*q[..., 3] - q[..., 2]*q[..., 0])/n
m[..., 2, 1] = 2*(q[..., 2]*q[..., 3] + q[..., 1]*q[..., 0])/n
m[..., 2, 2] = 1.0 - 2*(q[..., 1]**2 + q[..., 2]**2)/n
return m
def from_rotation_matrix(rot, nonorthogonal=True):
"""Convert input 3x3 rotation matrix to unit quaternion
For any orthogonal matrix `rot`, this function returns a quaternion `q` such
that, for every pure-vector quaternion `v`, we have
q * v * q.conjugate() == rot @ v.vec
Here, `@` is the standard python matrix multiplication operator and `v.vec` is
the 3-vector part of the quaternion `v`. If `rot` is not orthogonal the
"closest" orthogonal matrix is used; see Notes below.
Parameters
----------
rot : (..., N, 3, 3) float array
Each 3x3 matrix represents a rotation by multiplying (from the left) a
column vector to produce a rotated column vector. Note that this input may
actually have ndims>3; it is just assumed that the last two dimensions have
size 3, representing the matrix.
nonorthogonal : bool, optional
If scipy.linalg is available, use the more robust algorithm of Bar-Itzhack.
Default value is True.
Returns
-------
q : array of quaternions
Unit quaternions resulting in rotations corresponding to input rotations.
Output shape is rot.shape[:-2].
Raises
------
LinAlgError
If any of the eigenvalue solutions does not converge
Notes
-----
By default, if scipy.linalg is available, this function uses Bar-Itzhack's
algorithm to allow for non-orthogonal matrices. [J. Guidance, Vol. 23, No. 6,
p. 1085 <http://dx.doi.org/10.2514/2.4654>] This will almost certainly be quite
a bit slower than simpler versions, though it will be more robust to numerical
errors in the rotation matrix. Also note that Bar-Itzhack uses some pretty
weird conventions. The last component of the quaternion appears to represent
the scalar, and the quaternion itself is conjugated relative to the convention
used throughout this module.
If scipy.linalg is not available or if the optional `nonorthogonal` parameter
is set to `False`, this function falls back to the possibly faster, but less
robust, algorithm of Markley [J. Guidance, Vol. 31, No. 2, p. 440
<http://dx.doi.org/10.2514/1.31730>].
"""
try:
from scipy import linalg
except ImportError:
linalg = False
rot = np.array(rot, copy=False)
shape = rot.shape[:-2]
if linalg and nonorthogonal:
from operator import mul
from functools import reduce
K3 = np.empty(shape+(4, 4))
K3[..., 0, 0] = (rot[..., 0, 0] - rot[..., 1, 1] - rot[..., 2, 2])/3.0
K3[..., 0, 1] = (rot[..., 1, 0] + rot[..., 0, 1])/3.0
K3[..., 0, 2] = (rot[..., 2, 0] + rot[..., 0, 2])/3.0
K3[..., 0, 3] = (rot[..., 1, 2] - rot[..., 2, 1])/3.0
K3[..., 1, 0] = K3[..., 0, 1]
K3[..., 1, 1] = (rot[..., 1, 1] - rot[..., 0, 0] - rot[..., 2, 2])/3.0
K3[..., 1, 2] = (rot[..., 2, 1] + rot[..., 1, 2])/3.0
K3[..., 1, 3] = (rot[..., 2, 0] - rot[..., 0, 2])/3.0
K3[..., 2, 0] = K3[..., 0, 2]
K3[..., 2, 1] = K3[..., 1, 2]
K3[..., 2, 2] = (rot[..., 2, 2] - rot[..., 0, 0] - rot[..., 1, 1])/3.0
K3[..., 2, 3] = (rot[..., 0, 1] - rot[..., 1, 0])/3.0
K3[..., 3, 0] = K3[..., 0, 3]
K3[..., 3, 1] = K3[..., 1, 3]
K3[..., 3, 2] = K3[..., 2, 3]
K3[..., 3, 3] = (rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2])/3.0
if not shape:
q = zero.copy()
eigvals, eigvecs = linalg.eigh(K3.T, eigvals=(3, 3))
q.components[0] = eigvecs[-1]
q.components[1:] = -eigvecs[:-1].flatten()
return q
else:
q = np.empty(shape+(4,), dtype=np.float64)
for flat_index in range(reduce(mul, shape)):
multi_index = np.unravel_index(flat_index, shape)
eigvals, eigvecs = linalg.eigh(K3[multi_index], eigvals=(3, 3))
q[multi_index+(0,)] = eigvecs[-1]
q[multi_index+(slice(1,None),)] = -eigvecs[:-1].flatten()
return as_quat_array(q)
else: # No scipy.linalg or not `nonorthogonal`
diagonals = np.empty(shape+(4,))
diagonals[..., 0] = rot[..., 0, 0]
diagonals[..., 1] = rot[..., 1, 1]
diagonals[..., 2] = rot[..., 2, 2]
diagonals[..., 3] = rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]
indices = np.argmax(diagonals, axis=-1)
q = diagonals # reuse storage space
indices_i = (indices == 0)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 1] = 1 + rot_i[..., 0, 0] - rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 2] = rot_i[..., 0, 1] + rot_i[..., 1, 0]
q[indices_i, 3] = rot_i[..., 0, 2] + rot_i[..., 2, 0]
indices_i = (indices == 1)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 1] = rot_i[..., 1, 0] + rot_i[..., 0, 1]
q[indices_i, 2] = 1 - rot_i[..., 0, 0] + rot_i[..., 1, 1] - rot_i[..., 2, 2]
q[indices_i, 3] = rot_i[..., 1, 2] + rot_i[..., 2, 1]
indices_i = (indices == 2)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q[indices_i, 1] = rot_i[..., 2, 0] + rot_i[..., 0, 2]
q[indices_i, 2] = rot_i[..., 2, 1] + rot_i[..., 1, 2]
q[indices_i, 3] = 1 - rot_i[..., 0, 0] - rot_i[..., 1, 1] + rot_i[..., 2, 2]
indices_i = (indices == 3)
if np.any(indices_i):
if indices_i.shape == ():
indices_i = Ellipsis
rot_i = rot[indices_i, :, :]
q[indices_i, 0] = 1 + rot_i[..., 0, 0] + rot_i[..., 1, 1] + rot_i[..., 2, 2]
q[indices_i, 1] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
q[indices_i, 2] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
q[indices_i, 3] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]
return as_quat_array(q)
def as_rotation_vector(q):
"""Convert input quaternion to the axis-angle representation
Note that if any of the input quaternions has norm zero, no error is
raised, but NaNs will appear in the output.
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
rot : float array
Output shape is q.shape+(3,). Each vector represents the axis of
the rotation, with norm proportional to the angle of the rotation in
radians.
"""
return as_float_array(2*np.log(np.normalized(q)))[..., 1:]
def from_rotation_vector(rot):
"""Convert input 3-vector in axis-angle representation to unit quaternion
Parameters
----------
rot : (Nx3) float array
Each vector represents the axis of the rotation, with norm
proportional to the angle of the rotation in radians.
Returns
-------
q : array of quaternions
Unit quaternions resulting in rotations corresponding to input
rotations. Output shape is rot.shape[:-1].
"""
rot = np.array(rot, copy=False)
quats = np.zeros(rot.shape[:-1]+(4,))
quats[..., 1:] = rot[...]/2
quats = as_quat_array(quats)
return np.exp(quats)
def as_euler_angles(q):
"""Open Pandora's Box
If somebody is trying to make you use Euler angles, tell them no, and
walk away, and go and tell your mum.
You don't want to use Euler angles. They are awful. Stay away. It's
one thing to convert from Euler angles to quaternions; at least you're
moving in the right direction. But to go the other way?! It's just not
right.
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles are naturally in radians.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
alpha_beta_gamma : float array
Output shape is q.shape+(3,). These represent the angles (alpha,
beta, gamma) in radians, where the normalized input quaternion
represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`.
Raises
------
AllHell
...if you try to actually use Euler angles, when you could have
been using quaternions like a sensible person.
"""
alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float64)
n = np.norm(q)
q = as_float_array(q)
alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2])
alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n))
alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2])
return alpha_beta_gamma
def from_euler_angles(alpha_beta_gamma, beta=None, gamma=None):
"""Improve your life drastically
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles naturally must be in radians for this to make any sense.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
alpha_beta_gamma : float or array of floats
This argument may either contain an array with last dimension of
size 3, where those three elements describe the (alpha, beta, gamma)
radian values for each rotation; or it may contain just the alpha
values, in which case the next two arguments must also be given.
beta : None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and third arguments.
gamma : None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and second arguments.
Returns
-------
R : quaternion array
The shape of this array will be the same as the input, except that
the last dimension will be removed.
"""
# Figure out the input angles from either type of input
if gamma is None:
alpha_beta_gamma = np.asarray(alpha_beta_gamma, dtype=np.double)
alpha = alpha_beta_gamma[..., 0]
beta = alpha_beta_gamma[..., 1]
gamma = alpha_beta_gamma[..., 2]
else:
alpha = np.asarray(alpha_beta_gamma, dtype=np.double)
beta = np.asarray(beta, dtype=np.double)
gamma = np.asarray(gamma, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(alpha, beta, gamma).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(beta/2)*np.cos((alpha+gamma)/2) # scalar quaternion components
R[..., 1] = -np.sin(beta/2)*np.sin((alpha-gamma)/2) # x quaternion components
R[..., 2] = np.sin(beta/2)*np.cos((alpha-gamma)/2) # y quaternion components
R[..., 3] = np.cos(beta/2)*np.sin((alpha+gamma)/2) # z quaternion components
return as_quat_array(R)
def as_spherical_coords(q):
"""Return the spherical coordinates corresponding to this quaternion
Obviously, spherical coordinates do not contain as much information as a
quaternion, so this function does lose some information. However, the
returned spherical coordinates will represent the point(s) on the sphere
to which the input quaternion(s) rotate the z axis.
Parameters
----------
q : quaternion or array of quaternions
The quaternion(s) need not be normalized, but must be nonzero
Returns
-------
vartheta_varphi : float array
Output shape is q.shape+(2,). These represent the angles (vartheta,
varphi) in radians, where the normalized input quaternion represents
`exp(varphi*z/2) * exp(vartheta*y/2)`, up to an arbitrary inital
rotation about `z`.
"""
return as_euler_angles(q)[..., 1::-1]
def from_spherical_coords(theta_phi, phi=None):
"""Return the quaternion corresponding to these spherical coordinates
Assumes the spherical coordinates correspond to the quaternion R via
R = exp(phi*z/2) * exp(theta*y/2)
The angles naturally must be in radians for this to make any sense.
Note that this quaternion rotates `z` onto the point with the given
spherical coordinates, but also rotates `x` and `y` onto the usual basis
vectors (theta and phi, respectively) at that point.
Parameters
----------
theta_phi : float or array of floats
This argument may either contain an array with last dimension of
size 2, where those two elements describe the (theta, phi) values in
radians for each point; or it may contain just the theta values in
radians, in which case the next argument must also be given.
phi : None, float, or array of floats
If this array is given, it must be able to broadcast against the
first argument.
Returns
-------
R : quaternion array
If the second argument is not given to this function, the shape
will be the same as the input shape except for the last dimension,
which will be removed. If the second argument is given, this
output array will have the shape resulting from broadcasting the
two input arrays against each other.
"""
# Figure out the input angles from either type of input
if phi is None:
theta_phi = np.asarray(theta_phi, dtype=np.double)
theta = theta_phi[..., 0]
phi = theta_phi[..., 1]
else:
theta = np.asarray(theta_phi, dtype=np.double)
phi = np.asarray(phi, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(theta, phi).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(phi/2)*np.cos(theta/2) # scalar quaternion components
R[..., 1] = -np.sin(phi/2)*np.sin(theta/2) # x quaternion components
R[..., 2] = np.cos(phi/2)*np.sin(theta/2) # y quaternion components
R[..., 3] = np.sin(phi/2)*np.cos(theta/2) # z quaternion components
return as_quat_array(R)
def rotate_vectors(R, v, axis=-1):
"""Rotate vectors by given quaternions
This function is for the case where each quaternion (possibly the only input
quaternion) is used to rotate multiple vectors. If each quaternion is only
rotating a single vector, it is more efficient to use the standard formula
vprime = R * v * R.conjugate()
(Note that `from_vector_part` and `as_vector_part` may be helpful.)
Parameters
----------
R : quaternion array
Quaternions by which to rotate the input vectors
v : float array
Three-vectors to be rotated.
axis : int
Axis of the `v` array to use as the vector dimension. This
axis of `v` must have length 3.
Returns
-------
vprime : float array
The rotated vectors. This array has shape R.shape+v.shape.
Notes
-----
For simplicity, this function converts the input quaternion(s) to matrix form,
and rotates the input vector(s) by the usual matrix multiplication. As noted
above, if each input quaternion is only used to rotate a single vector, this is
not the most efficient approach. The simple formula shown above is faster than
this function, though it should be noted that the most efficient approach (in
terms of operation counts) is to use the formula
v' = v + 2 * r x (s * v + r x v) / m
where x represents the cross product, s and r are the scalar and vector parts
of the quaternion, respectively, and m is the sum of the squares of the
components of the quaternion. If you are looping over a very large number of
quaternions, and just rotating a single vector each time, you might want to
implement that alternative algorithm using numba (or something that doesn't use
python).
"""
R = np.asarray(R, dtype=np.quaternion)
v = np.asarray(v, dtype=float)
if v.ndim < 1 or 3 not in v.shape:
raise ValueError("Input `v` does not have at least one dimension of length 3")
if v.shape[axis] != 3:
raise ValueError("Input `v` axis {0} has length {1}, not 3.".format(axis, v.shape[axis]))
m = as_rotation_matrix(R)
tensordot_axis = m.ndim-2
final_axis = tensordot_axis + (axis % v.ndim)
return np.moveaxis(
np.tensordot(m, v, axes=(-1, axis)),
tensordot_axis, final_axis
)
def isclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
This function is essentially a copy of the `numpy.isclose` function,
with different default tolerances and one minor changes necessary to
deal correctly with quaternions.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> quaternion.isclose([1e10*quaternion.x, 1e-7*quaternion.y], [1.00001e10*quaternion.x, 1e-8*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([True, False])
>>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.00001e10*quaternion.x, 1e-9*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([True, True])
>>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.0001e10*quaternion.x, 1e-9*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([False, True])
>>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y])
array([True, False])
>>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y], equal_nan=True)
array([True, True])
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x-y), atol + rtol * abs(y))
return result[()]
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
try:
dt = np.result_type(y, 1.)
except TypeError:
dt = np.dtype(np.quaternion)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond[()]
def allclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False, verbose=False):
"""Returns True if two arrays are element-wise equal within a tolerance.
This function is essentially a wrapper for the `quaternion.isclose`
function, but returns a single boolean value of True if all elements
of the output from `quaternion.isclose` are True, and False otherwise.
This function also adds the option.
Note that this function has stricter tolerances than the
`numpy.allclose` function, as well as the additional `verbose` option.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
verbose : bool
If the return value is False, all the non-close values are printed,
iterating through the non-close indices in order, displaying the
array values along with the index, with a separate line for each
pair of values.
See Also
--------
isclose, numpy.all, numpy.any, numpy.allclose
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
"""
close = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
result = np.all(close)
if verbose and not result:
a, b = np.atleast_1d(a), np.atleast_1d(b)
a, b = np.broadcast_arrays(a, b)
print('Non-close values:')
for i in np.nonzero(close == False):
print(' a[{0}]={1}\n b[{0}]={2}'.format(i, a[i], b[i]))
return result
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to warm-start TF.Learn Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver
class _WarmStartSettings(
collections.namedtuple("_WarmStartSettings", [
"ckpt_to_initialize_from",
"col_to_prev_vocab",
"col_to_prev_tensor",
"exclude_columns",
])):
"""Settings for warm-starting input layer in models.
Attributes:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
col_to_prev_vocab: [Optional] Dict of `FeatureColumn` to path of the
vocabulary used for the `FeatureColumn` in `ckpt_to_initialize_from`. If
not explicitly provided, the vocabularies are assumed to be same between
previous and present checkpoints.
col_to_prev_tensor: [Optional] Dict of `FeatureColumn` to name of the
variable (corresponding to the `FeatureColumn`) in
`ckpt_to_initialize_from`. If not explicitly provided, the name of the
variable is assumed to be same between previous and present checkpoints.
exclude_columns: [Optional] List of `FeatureColumn`s that should not be
warm-started from provided checkpoint.
Example Uses:
# Feature columns defining transformations on inputs.
sc_vocab_file = tf.feature_column.categorical_column_with_vocabulary_file(
"sc_vocab_file", "new_vocab.txt", vocab_size=100)
sc_vocab_list = tf.feature_column.cateogorical_column_with_vocabulary_list(
"sc_vocab_list", vocabulary_list=["a", "b"])
# Warm-start all weights. The parameters corresponding to "sc_vocab_file" have
# the same name and same vocab as current checkpoint. The parameters
# corresponding to "sc_vocab_list" have the same name.
ws = _WarmStartSettings(ckpt_to_initialize_from="/tmp")
# Warm-start all weights but the parameters corresponding to "sc_vocab_file"
# have a different vocab from the one used in current checkpoint.
ws = _WarmStartSettings(ckpt_to_initialize_from="/tmp",
col_to_prev_vocab={sc_vocab_file: "old_vocab.txt"})
# Warm-start all weights but the parameters corresponding to "sc_vocab_file"
# have a different vocab from the one used in current checkpoint and the
# parameters corresponding to "sc_vocab_list" have a different name from the
# current checkpoint.
ws = _WarmStartSettings(ckpt_to_initialize_from="/tmp",
col_to_prev_vocab={sc_vocab_file: "old_vocab.txt"},
col_to_prev_tensor={sc_vocab_list: "old_tensor_name"})
# Warm-start all weights except those corrresponding to "sc_vocab_file".
ws = _WarmStartSettings(ckpt_to_initialize_from="/tmp",
exclude_columns=[sc_vocab_file])
"""
def __new__(cls,
ckpt_to_initialize_from,
col_to_prev_vocab=None,
col_to_prev_tensor=None,
exclude_columns=None):
if not ckpt_to_initialize_from:
raise ValueError(
"`ckpt_to_initialize_from` MUST be set in _WarmStartSettings")
return super(_WarmStartSettings, cls).__new__(
cls,
ckpt_to_initialize_from,
col_to_prev_vocab or {},
col_to_prev_tensor or {},
exclude_columns or [],)
def _is_variable(x):
return (isinstance(x, variables.Variable) or
isinstance(x, resource_variable_ops.ResourceVariable))
def _infer_var_name(var):
"""Returns name of the `var`.
Args:
var: A list. The list can contain either of the following:
(i) A single `Variable`
(ii) A single `ResourceVariable`
(iii) Multiple `Variable` objects which must be slices of the same larger
variable.
(iv) A single `PartitionedVariable`
Returns:
Name of the `var`
"""
name_to_var_dict = saver.BaseSaverBuilder.OpListToDict(var)
if len(name_to_var_dict) > 1:
raise TypeError("`var` passed as arg violates the constraints.")
return list(name_to_var_dict.keys())[0]
def _warmstart_var(var, prev_ckpt, prev_tensor_name=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) `PartitionedVariable`
(iv) list of `Variable` and/or `PartitionedVariable`: The list may
contain one or more variables that has been sharded. For example:
[Variable('a/part_0'), Variable('b/part_0'), Variable('a/part_1'),
PartitionedVariable([Variable('c/part_0'), Variable('c/part_1')])]
where we have three whole Variables represented ('a', 'b', and 'c').
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
Raises:
ValueError: If prev_tensor_name is not None, but the given var represents
more than one Variable.
TypeError: If var is not one of the allowed types.
"""
if _is_variable(var):
current_var_name = _infer_var_name([var])
elif isinstance(var, variables.PartitionedVariable):
current_var_name = _infer_var_name([var])
var = var._get_variable_list() # pylint: disable=protected-access
elif (isinstance(var, list) and all(
_is_variable(v) or isinstance(v, variables.PartitionedVariable)
for v in var)):
# Convert length-1 lists of vars to single tf.Variables. This ensures that
# checkpoint_utils.init_from_checkpoint() doesn't incorrectly assume
# slice info is present.
if len(var) == 1:
current_var_name = _infer_var_name(var)
var = var[0]
else:
# If we have multiple elements in var, we cannot assume they all
# represent the same Variable.
name_to_var_dict = saver.BaseSaverBuilder.OpListToDict(
var, convert_variable_to_tensor=False)
if prev_tensor_name:
# Providing a prev_tensor_name is only viable if var representes a
# single Variable.
if len(name_to_var_dict) > 1:
raise ValueError("var represented more than one Variable, but "
"prev_tensor_name was provided.")
checkpoint_utils.init_from_checkpoint(prev_ckpt, {
prev_tensor_name: var
})
else:
# OpListToDict gives us roughly what we need, but
# the values in the dict may be PartitionedVariables (which
# init_from_checkpoint does not expect) that we need to convert to
# lists.
name_to_var_dict_fixed = {}
for name, var in six.iteritems(name_to_var_dict):
if isinstance(var, variables.PartitionedVariable):
name_to_var_dict_fixed[name] = var._get_variable_list() # pylint: disable=protected-access
else:
name_to_var_dict_fixed[name] = var
checkpoint_utils.init_from_checkpoint(prev_ckpt, name_to_var_dict_fixed)
return
else:
raise TypeError(
"var MUST be one of the following: a Variable, PartitionedVariable, or "
"list of Variable's and/or PartitionedVariable's, but is {}".format(
type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = current_var_name
checkpoint_utils.init_from_checkpoint(prev_ckpt, {prev_tensor_name: var})
# pylint: disable=protected-access
# Accesses protected members of tf.Variable to reset the variable's internal
# state.
def _warmstart_var_with_vocab(var,
current_vocab_path,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
the given `var` such that values corresponding to individual features in the
vocabulary remain consistent irrespective of changing order of the features
between old and new vocabularies.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
current_vocab_path: Path to the vocab file used for the given `var`.
current_vocab_size: An `int` specifying the number of entries in the current
vocab.
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
Raises:
ValueError: If required args are not provided.
"""
if not (current_vocab_path and current_vocab_size and prev_ckpt and
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
if _is_variable(var):
var = [var]
elif isinstance(var, list) and all(_is_variable(v) for v in var):
var = var
elif isinstance(var, variables.PartitionedVariable):
var = var._get_variable_list()
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = _infer_var_name(var)
for v in var:
v_shape = v.get_shape().as_list()
slice_info = v._get_save_slice_info()
partition_info = None
if slice_info:
partition_info = variable_scope._PartitionInfo(
full_shape=slice_info.full_shape,
var_offset=slice_info.var_offset)
# TODO(vihanjain): Support _WarmstartSettings where class vocabularies need
# remapping too.
init = checkpoint_ops._load_and_remap_matrix_initializer(
ckpt_path=saver.latest_checkpoint(prev_ckpt),
old_tensor_name=prev_tensor_name,
new_row_vocab_size=current_vocab_size,
new_col_vocab_size=v_shape[1],
old_row_vocab_file=prev_vocab_path,
new_row_vocab_file=current_vocab_path,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=current_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer)
new_init_val = ops.convert_to_tensor(
init(shape=v_shape, partition_info=partition_info))
v._initializer_op = state_ops.assign(v, new_init_val)
# pylint: enable=protected-access
def _warmstart_input_layer(cols_to_vars, warmstart_settings):
"""Warm-starts input layer of a model using given settings.
Args:
cols_to_vars: Dict of feature columns to corresponding graph variables.
warmstart_settings: An object of `_WarmStartSettings`.
Typical usage example:
```python
tfcl = tf.contrib.layers
# Define features and transformations.
sc_vocab_list = tf.feature_column.categorical_column_with_vocabulary_list(
"sc_vocab_list", vocabulary_list=["a", "b"])
sc_vocab_file = tf.feature_column.categorical_column_with_vocabulary_file(
"sc_vocab_file", "new_vocab.txt", vocab_size=100)
cross = tf.feature_column.crossed_column(
[sc_vocab_list, sc_vocab_file], hash_bucket_size=5000)
all_cols = set(sc_vocab_list, sc_vocab_file, cross)
batch_features = tf.parse_example(
serialized=serialized_examples,
features=tf.contrib.layers.create_feature_spec_for_parsing(all_cols))
cols_to_vars = {}
tf.feature_column.linear_model(
features=batch_features,
feature_columns=all_cols,
units=1,
cols_to_vars=cols_to_vars)
# Warm-start entire input layer.
ws_settings = _WarmStartSettings(
"/tmp/prev_model_dir",
col_to_prev_vocab={sc_vocab_file: "old_vocab.txt"})
_warmstart_input_layer(cols_to_vars, ws_settings)
# Warm-start bias too.
_warmstart_var(cols_to_vars['bias'], ws_settings.ckpt_to_initialize_from)
```
The above example effectively warm-starts full linear model.
Raises:
ValueError: If a column in cols_to_vars has an entry in
warmstart_settings.cols_to_prev_vocab, but is not an instance of
_VocabularyFileCategoricalColumn or _EmbeddingColumn.
"""
for col, var in six.iteritems(cols_to_vars):
if not isinstance(col, feature_column._FeatureColumn): # pylint: disable=protected-access
raise TypeError(
"Keys in dict `cols_to_vars` must be of type FeatureColumn. Found "
"key of type: {}".format(type(col)))
if col in warmstart_settings.exclude_columns:
logging.info("Skipping warm-starting column: {}".format(col.name))
continue
prev_tensor_name = warmstart_settings.col_to_prev_tensor.get(col)
# pylint: disable=protected-access
is_sparse_vocab_column = isinstance(
col, feature_column._VocabularyFileCategoricalColumn)
is_embedding_vocab_column = (
isinstance(col, feature_column._EmbeddingColumn) and
isinstance(col.categorical_column,
feature_column._VocabularyFileCategoricalColumn))
if is_sparse_vocab_column or is_embedding_vocab_column:
# pylint: enable=protected-access
initializer = None
if is_embedding_vocab_column:
initializer = col.initializer
vocabulary_file = col.categorical_column.vocabulary_file
vocabulary_size = col.categorical_column.vocabulary_size
num_oov_buckets = col.categorical_column.num_oov_buckets
else:
vocabulary_file = col.vocabulary_file
vocabulary_size = col.vocabulary_size
num_oov_buckets = col.num_oov_buckets
prev_vocab_path = warmstart_settings.col_to_prev_vocab.get(
col, vocabulary_file)
logging.info("Warm-starting column: {}; prev_vocab: {}; prev_tensor: {}".
format(col.name, prev_vocab_path, (
prev_tensor_name or "Unchanged")))
_warmstart_var_with_vocab(
var,
current_vocab_path=vocabulary_file,
current_vocab_size=vocabulary_size,
prev_ckpt=warmstart_settings.ckpt_to_initialize_from,
prev_vocab_path=prev_vocab_path,
current_oov_buckets=num_oov_buckets,
prev_tensor_name=prev_tensor_name,
initializer=initializer)
else:
if col in warmstart_settings.col_to_prev_vocab:
raise ValueError("Vocabulary provided for column %s which is not a "
"_VocabularyFileCategoricalColumn or _EmbeddingColumn")
logging.info("Warm-starting column: {}; prev_tensor: {}".format(
col.name, prev_tensor_name or "Unchanged"))
_warmstart_var(var, warmstart_settings.ckpt_to_initialize_from,
prev_tensor_name)
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
try:
from cdecimal import Decimal
except ImportError: # pragma: no cover
from decimal import Decimal
from agate import Table
from agate.aggregations import Sum
from agate.computations import Percent
from agate.data_types import *
from agate.testcase import AgateTestCase
class TestPivot(AgateTestCase):
def setUp(self):
self.rows = (
('joe', 'white', 'male', 20, 'blue'),
('jane', 'white', 'female', 20, 'blue'),
('josh', 'black', 'male', 20, 'blue'),
('jim', 'latino', 'male', 25, 'blue'),
('julia', 'white', 'female', 25, 'green'),
('joan', 'asian', 'female', 25, 'green')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['name', 'race', 'gender', 'age', 'color']
self.column_types = [self.text_type, self.text_type, self.text_type, self.number_type, self.text_type]
def test_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender')
pivot_rows = (
('white', 1, 2),
('black', 1, 0),
('latino', 1, 0),
('asian', 0, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertRowNames(pivot_table, ['white', 'black', 'latino', 'asian'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'])
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['group', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'], key_name='gender')
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['gender', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name_sequence_invalid(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(ValueError):
pivot_table = table.pivot(['race', 'gender'], key_name='foo') # noqa
def test_pivot_no_key(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(pivot='gender')
pivot_rows = (
(3, 3),
)
self.assertColumnNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race')
pivot_rows = (
('white', 3),
('black', 1),
('latino', 1),
('asian', 1)
)
self.assertColumnNames(pivot_table, ['race', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_sum(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', Sum('age'))
pivot_rows = (
('white', 20, 45),
('black', 20, 0),
('latino', 25, 0),
('asian', 0, 25)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_multiple_keys(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'], 'age')
pivot_rows = (
('white', 'male', 1, 0),
('white', 'female', 1, 1),
('black', 'male', 1, 0),
('latino', 'male', 0, 1),
('asian', 'female', 0, 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', '20', '25'])
self.assertRowNames(pivot_table, [
('white', 'male'),
('white', 'female'),
('black', 'male'),
('latino', 'male'),
('asian', 'female'),
])
self.assertColumnTypes(pivot_table, [Text, Text, Number, Number])
def test_pivot_multiple_keys_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'])
pivot_rows = (
('white', 'male', 1),
('white', 'female', 2),
('black', 'male', 1),
('latino', 'male', 1),
('asian', 'female', 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Text, Number])
def test_pivot_default_value(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', default_value=None)
pivot_rows = (
('white', 1, 2),
('black', 1, None),
('latino', 1, None),
('asian', None, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50)),
('female', Decimal(50)),
)
self.assertColumnNames(pivot_table, ['gender', 'Percent'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_pivots(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50), 0),
('female', Decimal(1) / Decimal(6) * Decimal(100), Decimal(1) / Decimal(3) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_kwargs(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count', total=8))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(3) / Decimal(8) * Decimal(100), 0),
('female', Decimal(1) / Decimal(8) * Decimal(100), Decimal(2) / Decimal(8) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
|
|
import numpy as np
import numpy.random as npr
import kayak
from . import *
# These behaviors requires prepending singeltons. Do we want to keep them?
# def test_0d_opplus_2d_scalar_value():
# npr.seed(1)
# for ii in xrange(NUM_TRIALS):
# npX1 = npr.randn(1, 1)
# X1 = kayak.Parameter( npX1 )
# npX2 = np.sum(npr.randn()) # generates a scalar with shape ()
# X2= kayak.Parameter( npX2 )
# # Y = kayak.MatAdd(X1, X2)
# Y = X1+X2
# # Verify that a scalar is reproduced.
# assert close_float(Y.value, npX1 + npX2)
# def test_0d_plus_2d_scalar_grad():
# npr.seed(2)
# for ii in xrange(NUM_TRIALS):
# npX1 = npr.randn(1, 1)
# X1 = kayak.Parameter( npX1 )
# npX2 = np.sum(npr.randn()) # generates a scalar with shape ()
# X2= kayak.Parameter( npX2 )
# Y = X1+X2
# # Verify that the gradient is one.
# assert Y.grad(X1) == 1.0
# assert Y.grad(X2) == 1.0
# assert kayak.util.checkgrad(X1, Y) < MAX_GRAD_DIFF
# assert kayak.util.checkgrad(X2, Y) < MAX_GRAD_DIFF
#
def test_matadd_values_1():
npr.seed(1)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
assert C.shape == np_A.shape
assert np.all( close_float(C.value, np_A+np_B))
def test_matadd_values_2():
npr.seed(2)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
np_C = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.Parameter(np_C)
D = A+B+C
assert D.shape == np_A.shape
assert np.all( close_float(D.value, np_A+np_B+np_C))
def test_matadd_values_3():
npr.seed(3)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(1,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
assert C.shape == (5,6)
assert np.all( close_float(C.value, np_A+np_B))
def test_matadd_values_4():
npr.seed(4)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
assert C.shape == (5,6)
assert np.all( close_float(C.value, np_A+np_B))
def test_matadd_values_5():
npr.seed(5)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(1,6)
np_B = npr.randn(5,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
assert C.shape == (5,6)
assert np.all( close_float(C.value, np_A+np_B))
def test_matadd_values_6():
npr.seed(6)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(1,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
assert C.shape == (5,6)
assert np.all( close_float(C.value, np_A+np_B))
def test_matadd_values_7():
npr.seed(7)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
D = A+B+A
assert D.shape == (5,6)
assert np.all( close_float(D.value, 2*np_A + np_B))
def test_matadd_grad_1():
npr.seed(8)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
D = kayak.MatSum(C)
D.value
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_matadd_grad_2():
npr.seed(9)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
np_C = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.Parameter(np_C)
D = A+B+C
E = kayak.MatSum(D)
E.value
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(C, E) < MAX_GRAD_DIFF
def test_matadd_grad_3():
npr.seed(10)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(1,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
D = kayak.MatSum(C)
D.value
print np_A.shape, D.grad(A).shape
print np_B.shape, D.grad(B).shape
assert D.grad(A).shape == np_A.shape
assert D.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_matadd_grad_4():
npr.seed(11)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,1)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
D = kayak.MatSum(C)
D.value
assert D.grad(A).shape == np_A.shape
assert D.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_matadd_grad_5():
npr.seed(12)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,1)
np_B = npr.randn(1,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
D = kayak.MatSum(C)
D.value
assert D.grad(A).shape == np_A.shape
assert D.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_matadd_grad_6():
npr.seed(13)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(1,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = A+B
D = kayak.MatSum(C)
D.value
assert D.grad(A).shape == np_A.shape
assert D.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_matadd_grad_7():
npr.seed(14)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
D = A+B+A
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert E.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
def test_matadd_grad_8():
npr.seed(15)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
D = A+A
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Model classes that extend the instances functionality for MySQL instances.
"""
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.notification import StartNotification
from trove.common.remote import create_guest_client
from trove.common import utils
from trove.extensions.common.models import load_and_verify
from trove.extensions.common.models import RootHistory
from trove.guestagent.db import models as guest_models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def persisted_models():
return {'root_enabled_history': RootHistory}
class User(object):
_data_fields = ['name', 'host', 'password', 'databases']
def __init__(self, name, host, password, databases):
self.name = name
self.host = host
self.password = password
self.databases = databases
@classmethod
def load(cls, context, instance_id, username, hostname, root_user=False):
load_and_verify(context, instance_id)
if root_user:
validate = guest_models.RootUser()
else:
validate = guest_models.MySQLUser()
validate.name = username
validate.host = hostname
client = create_guest_client(context, instance_id)
found_user = client.get_user(username=username, hostname=hostname)
if not found_user:
return None
database_names = [{'name': db['_name']}
for db in found_user['_databases']]
return cls(found_user['_name'],
found_user['_host'],
found_user['_password'],
database_names)
@classmethod
def create(cls, context, instance_id, users):
# Load InstanceServiceStatus to verify if it's running
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
for user in users:
user_name = user['_name']
host_name = user['_host']
userhost = "%s@%s" % (user_name, host_name)
existing_users, _nadda = Users.load_with_client(
client,
limit=1,
marker=userhost,
include_marker=True)
if (len(existing_users) > 0 and
str(existing_users[0].name) == str(user_name) and
str(existing_users[0].host) == str(host_name)):
raise exception.UserAlreadyExists(name=user_name,
host=host_name)
return client.create_user(users)
@classmethod
def delete(cls, context, instance_id, user):
load_and_verify(context, instance_id)
with StartNotification(context, instance_id=instance_id,
username=user):
create_guest_client(context, instance_id).delete_user(user)
@classmethod
def access(cls, context, instance_id, username, hostname):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
databases = client.list_access(username, hostname)
dbs = []
for db in databases:
dbs.append(Schema(name=db['_name'],
collate=db['_collate'],
character_set=db['_character_set']))
return UserAccess(dbs)
@classmethod
def grant(cls, context, instance_id, username, hostname, databases):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
client.grant_access(username, hostname, databases)
@classmethod
def revoke(cls, context, instance_id, username, hostname, database):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
client.revoke_access(username, hostname, database)
@classmethod
def change_password(cls, context, instance_id, users):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
change_users = []
for user in users:
change_user = {'name': user.name,
'host': user.host,
'password': user.password,
}
change_users.append(change_user)
client.change_passwords(change_users)
@classmethod
def update_attributes(cls, context, instance_id, username, hostname,
user_attrs):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
user_changed = user_attrs.get('name')
host_changed = user_attrs.get('host')
validate = guest_models.MySQLUser()
if host_changed:
validate.host = host_changed
if user_changed:
validate.name = user_changed
user = user_changed or username
host = host_changed or hostname
userhost = "%s@%s" % (user, host)
if user_changed or host_changed:
existing_users, _nadda = Users.load_with_client(
client,
limit=1,
marker=userhost,
include_marker=True)
if (len(existing_users) > 0 and
existing_users[0].name == user and
existing_users[0].host == host):
raise exception.UserAlreadyExists(name=user,
host=host)
client.update_attributes(username, hostname, user_attrs)
class UserAccess(object):
_data_fields = ['databases']
def __init__(self, databases):
self.databases = databases
def load_via_context(cls, context, instance_id):
"""Creates guest and fetches pagination arguments from the context."""
load_and_verify(context, instance_id)
limit = utils.pagination_limit(context.limit, cls.DEFAULT_LIMIT)
client = create_guest_client(context, instance_id)
# The REST API standard dictates that we *NEVER* include the marker.
return cls.load_with_client(client=client, limit=limit,
marker=context.marker, include_marker=False)
class Users(object):
DEFAULT_LIMIT = CONF.users_page_size
@classmethod
def load(cls, context, instance_id):
return load_via_context(cls, context, instance_id)
@classmethod
def load_with_client(cls, client, limit, marker, include_marker):
user_list, next_marker = client.list_users(
limit=limit,
marker=marker,
include_marker=include_marker)
model_users = []
for user in user_list:
mysql_user = guest_models.MySQLUser()
mysql_user.deserialize(user)
if mysql_user.name in cfg.get_ignored_users():
continue
# TODO(hub-cap): databases are not being returned in the
# reference agent
dbs = []
for db in mysql_user.databases:
dbs.append({'name': db['_name']})
model_users.append(User(mysql_user.name,
mysql_user.host,
mysql_user.password,
dbs))
return model_users, next_marker
class Schema(object):
_data_fields = ['name', 'collate', 'character_set']
def __init__(self, name, collate, character_set):
self.name = name
self.collate = collate
self.character_set = character_set
@classmethod
def create(cls, context, instance_id, schemas):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
for schema in schemas:
schema_name = schema['_name']
existing_schema, _nadda = Schemas.load_with_client(
client,
limit=1,
marker=schema_name,
include_marker=True)
if (len(existing_schema) > 0 and
str(existing_schema[0].name) == str(schema_name)):
raise exception.DatabaseAlreadyExists(name=schema_name)
return client.create_database(schemas)
@classmethod
def delete(cls, context, instance_id, schema):
load_and_verify(context, instance_id)
create_guest_client(context, instance_id).delete_database(schema)
class Schemas(object):
DEFAULT_LIMIT = CONF.databases_page_size
@classmethod
def load(cls, context, instance_id):
return load_via_context(cls, context, instance_id)
@classmethod
def load_with_client(cls, client, limit, marker, include_marker):
schemas, next_marker = client.list_databases(
limit=limit,
marker=marker,
include_marker=include_marker)
model_schemas = []
for schema in schemas:
mysql_schema = guest_models.MySQLDatabase()
mysql_schema.deserialize(schema)
if mysql_schema.name in cfg.get_ignored_dbs():
continue
model_schemas.append(Schema(mysql_schema.name,
mysql_schema.collate,
mysql_schema.character_set))
return model_schemas, next_marker
|
|
import math
import re
import random
from collections import defaultdict
import src.settings as var
from src.utilities import *
from src import debuglog, errlog, plog
from src.decorators import cmd, event_listener
from src.messages import messages
from src.events import Event
KILLS = {} # type: Dict[str, str]
TARGETS = {} # type: Dict[str, Set[str]]
@cmd("kill", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("dullahan",))
def dullahan_kill(cli, nick, chan, rest):
"""Kill someone at night as a dullahan until everyone on your list is dead."""
if not TARGETS[nick] & set(list_players()):
pm(cli, nick, messages["dullahan_targets_dead"])
return
victim = get_victim(cli, nick, re.split(" +",rest)[0], False)
if not victim:
return
if victim == nick:
pm(cli, nick, messages["no_suicide"])
return
orig = victim
evt = Event("targeted_command", {"target": victim, "misdirection": True, "exchange": True})
evt.dispatch(cli, var, "kill", nick, victim, frozenset({"detrimental"}))
if evt.prevent_default:
return
victim = evt.data["target"]
KILLS[nick] = victim
msg = messages["wolf_target"].format(orig)
pm(cli, nick, messages["player"].format(msg))
debuglog("{0} ({1}) KILL: {2} ({3})".format(nick, get_role(nick), victim, get_role(victim)))
chk_nightdone(cli)
@cmd("retract", "r", chan=False, pm=True, playing=True, phases=("night",), roles=("dullahan",))
def dullahan_retract(cli, nick, chan, rest):
"""Removes a dullahan's kill selection."""
if nick not in KILLS:
return
if nick in KILLS:
del KILLS[nick]
pm(cli, nick, messages["retracted_kill"])
@event_listener("player_win")
def on_player_win(evt, cli, var, nick, role, winner, survived):
if role != "dullahan":
return
alive = set(list_players())
if nick in var.ENTRANCED:
alive -= var.ROLES["succubus"]
if not TARGETS[nick] & alive:
evt.data["iwon"] = True
@event_listener("del_player")
def on_del_player(evt, cli, var, nick, nickrole, nicktpls, death_triggers):
for h,v in list(KILLS.items()):
if v == nick:
pm(cli, h, messages["hunter_discard"])
del KILLS[h]
elif h == nick:
del KILLS[h]
if death_triggers and nickrole == "dullahan":
pl = evt.data["pl"]
targets = TARGETS[nick] & set(pl)
if targets:
target = random.choice(list(targets))
if "totem" in var.ACTIVE_PROTECTIONS[target]:
var.ACTIVE_PROTECTIONS[target].remove("totem")
cli.msg(botconfig.CHANNEL, messages["dullahan_die_totem"].format(nick, target))
elif "angel" in var.ACTIVE_PROTECTIONS[target]:
var.ACTIVE_PROTECTIONS[target].remove("angel")
cli.msg(botconfig.CHANNEL, messages["dullahan_die_angel"].format(nick, target))
elif "bodyguard" in var.ACTIVE_PROTECTIONS[target]:
var.ACTIVE_PROTECTIONS[target].remove("bodyguard")
for bg in var.ROLES["bodyguard"]:
if var.GUARDED.get(bg) == target:
cli.msg(botconfig.CHANNEL, messages["dullahan_die_bodyguard"].format(nick, target, bg))
evt.params.del_player(cli, bg, True, end_game=False, killer_role=nickrole, deadlist=evt.params.deadlist, original=evt.params.original, ismain=False)
evt.data["pl"] = evt.params.refresh_pl(pl)
break
elif "blessing" in var.ACTIVE_PROTECTIONS[target] or (var.GAMEPHASE == "day" and target in var.ROLES["blessed villager"]):
if "blessing" in var.ACTIVE_PROTECTIONS[target]:
var.ACTIVE_PROTECTIONS[target].remove("blessing")
# don't message the channel whenever a blessing blocks a kill, but *do* let the dullahan know so they don't try to report it as a bug
pm(cli, nick, messages["assassin_fail_blessed"].format(target))
else:
if var.ROLE_REVEAL in ("on", "team"):
role = get_reveal_role(target)
an = "n" if role.startswith(("a", "e", "i", "o", "u")) else ""
cli.msg(botconfig.CHANNEL, messages["dullahan_die_success"].format(nick, target, an, role))
else:
cli.msg(botconfig.CHANNEL, messages["dullahan_die_success_noreveal"].format(nick, target))
debuglog("{0} ({1}) DULLAHAN ASSASSINATE: {2} ({3})".format(nick, nickrole, target, get_role(target)))
evt.params.del_player(cli, target, True, end_game=False, killer_role=nickrole, deadlist=evt.params.deadlist, original=evt.params.original, ismain=False)
evt.data["pl"] = evt.params.refresh_pl(pl)
@event_listener("rename_player")
def on_rename(evt, cli, var, prefix, nick):
kvp = []
for a,b in KILLS.items():
if a == prefix:
a = nick
if b == prefix:
b = nick
kvp.append((a,b))
KILLS.update(kvp)
if prefix in KILLS:
del KILLS[prefix]
kvp = []
for a,b in TARGETS.items():
nl = set()
for n in b:
if n == prefix:
n = nick
nl.add(n)
if a == prefix:
a = nick
kvp.append((a,nl))
TARGETS.update(kvp)
if prefix in TARGETS:
del TARGETS[prefix]
@event_listener("night_acted")
def on_acted(evt, cli, var, nick, sender):
if nick in KILLS:
evt.data["acted"] = True
@event_listener("transition_day", priority=2)
def on_transition_day(evt, cli, var):
for k, d in list(KILLS.items()):
evt.data["victims"].append(d)
evt.data["onlybywolves"].discard(d)
evt.data["killers"][d] = k
del KILLS[k]
@event_listener("exchange_roles")
def on_exchange(evt, cli, var, actor, nick, actor_role, nick_role):
if actor in KILLS:
del KILLS[actor]
if nick in KILLS:
del KILLS[nick]
if actor_role == "dullahan" and nick_role != "dullahan" and actor in TARGETS:
TARGETS[nick] = TARGETS[actor] - {nick}
del TARGETS[actor]
elif nick_role == "dullahan" and actor_role != "dullahan" and nick in TARGETS:
TARGETS[actor] = TARGETS[nick] - {actor}
del TARGETS[nick]
@event_listener("chk_nightdone")
def on_chk_nightdone(evt, cli, var):
spl = set(list_players())
evt.data["actedcount"] += len(KILLS)
for p in var.ROLES["dullahan"]:
if TARGETS[p] & spl:
evt.data["nightroles"].append(p)
@event_listener("transition_night_end", priority=2)
def on_transition_night_end(evt, cli, var):
for dullahan in var.ROLES["dullahan"]:
targets = list(TARGETS[dullahan])
for target in var.DEAD:
if target in targets:
targets.remove(target)
if not targets: # already all dead
pm(cli, dullahan, messages["dullahan_targets_dead"])
continue
random.shuffle(targets)
if dullahan in var.PLAYERS and not is_user_simple(dullahan):
pm(cli, dullahan, messages["dullahan_notify"])
else:
pm(cli, dullahan, messages["dullahan_simple"])
t = messages["dullahan_targets"] if var.FIRST_NIGHT else messages["dullahan_remaining_targets"]
pm(cli, dullahan, t + ", ".join(targets))
@event_listener("role_assignment")
def on_role_assignment(evt, cli, var, gamemode, pl, restart):
# assign random targets to dullahan to kill
if var.ROLES["dullahan"]:
max_targets = math.ceil(8.1 * math.log(len(pl), 10) - 5)
for dull in var.ROLES["dullahan"]:
TARGETS[dull] = set()
dull_targets = Event("dullahan_targets", {"targets": TARGETS}) # support sleepy
dull_targets.dispatch(cli, var, var.ROLES["dullahan"], max_targets)
for dull, ts in TARGETS.items():
ps = pl[:]
ps.remove(dull)
while len(ts) < max_targets:
target = random.choice(ps)
ps.remove(target)
ts.add(target)
@event_listener("myrole")
def on_myrole(evt, cli, var, nick):
role = get_role(nick)
# Remind dullahans of their targets
if role == "dullahan":
targets = list(TARGETS[nick])
for target in var.DEAD:
if target in targets:
targets.remove(target)
random.shuffle(targets)
if targets:
t = messages["dullahan_targets"] if var.FIRST_NIGHT else messages["dullahan_remaining_targets"]
evt.data["messages"].append(t + ", ".join(targets))
else:
evt.data["messages"].append(messages["dullahan_targets_dead"])
@event_listener("revealroles_role")
def on_revealroles_role(evt, cli, var, nickname, role):
if role == "dullahan" and nickname in TARGETS:
targets = TARGETS[nickname] - var.DEAD
if targets:
evt.data["special_case"].append("need to kill {0}".format(", ".join(TARGETS[nickname] - var.DEAD)))
else:
evt.data["special_case"].append("All targets dead")
@event_listener("begin_day")
def on_begin_day(evt, cli, var):
KILLS.clear()
@event_listener("reset")
def on_reset(evt, var):
KILLS.clear()
TARGETS.clear()
# vim: set sw=4 expandtab:
|
|
import sys,os
import csv
import pickle
from math import log, sqrt
from collections import OrderedDict
from mystructs import Rel, RelProb, WordProb
words_path = "/Users/liangchen/Documents/Projects/NLI/word_set.p"
rels_path = "/Users/liangchen/Documents/Projects/NLI/rel_set.p"
line_to_file_path = "/Users/liangchen/Documents/Projects/NLI/eval_line_to_file.p"
train_feature_list_path = "/Users/liangchen/Documents/Projects/NLI/features/train.txt"
eval_feature_list_path = "/Users/liangchen/Documents/Projects/NLI/features/eval.txt"
train_path_prefix = "/Users/liangchen/Documents/Projects/NLI/data/essays/train"
eval_path_prefix = "/Users/liangchen/Documents/Projects/NLI/data/essays/dev"
train_parsed_path = train_path_prefix + "/parsed"
eval_parsed_path = eval_path_prefix + "/parsed"
train_label_path = "/Users/liangchen/Documents/Projects/NLI/data/labels/train/labels.train.csv"
eval_label_path = "/Users/liangchen/Documents/Projects/NLI/data/labels/dev/labels.dev.csv"
labels = []
train_label_dict = {}
eval_label_dict = {}
all_words = OrderedDict()
all_rels = OrderedDict()
cur_line = 0
line_to_file = {}
n_sents = 0
df_word_dict = {}
df_rel_dict = {}
def pause():
raw_input("")
def read_labels():
with open(train_label_path, "rb") as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for row in reader:
train_label_dict[row[0]] = row[-1] #map test id to id name
if row[-1] not in labels:
labels.append(row[-1])
with open(eval_label_path, "rb") as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for row in reader:
eval_label_dict[row[0]] = row[-1]
def add_single_sent_features(sent_filename, file_index):
with open(sent_filename, "r") as file:
print "processing " + sent_filename
try:
while True:
_ = next(file)
head = next(file).strip().split()
rel = next(file).strip()
dep = next(file).strip().split()
# convert all words to lower cases
head = [s.lower() for s in head]
dep = [s.lower() for s in dep]
r = Rel(head[0], dep[0]) #using real words instead of tags
if r not in all_rels:
all_rels[r] = len(all_rels) + 1
if dep[0] not in all_words:
all_words[dep[0]] = len(all_words) + 1
except Exception as e:
pass
def cal_df_in_single_sent(sent_filename):
with open(sent_filename, "r") as file:
w_visited = set()
r_visited = set()
while True:
try:
_ = next(file)
head = next(file).strip().split()
rel = next(file).strip()
dep = next(file).strip().split()
except Exception as e:
break
head = [s.lower() for s in head]
dep = [s.lower() for s in dep]
r = Rel(head[0], dep[0]) #using real words instead of tags
global df_rel_dict, df_word_dict
if r not in r_visited:
r_visited.add(r)
if r in df_rel_dict:
df_rel_dict[r] = df_rel_dict[r] + 1
else:
df_rel_dict[r] = 1
if dep[0] not in w_visited:
w_visited.add(dep[0])
if dep[0] in df_word_dict:
df_word_dict[dep[0]] = df_word_dict[dep[0]] + 1
else:
df_word_dict[dep[0]] = 1
def idf_smooth(df):
global n_sents
return log((1+n_sents) / (1+df)) + 1
def write_single_sent_feature(out_file, label_dict, sent_filename, file_index, map_to_file):
with open(sent_filename, "r") as file:
label = label_dict[file_index]
label_index = int(labels.index(label))
sent_dict = {}
sent_words = 0
while True:
try:
_ = next(file)
head = next(file).strip().split()
rel = next(file).strip()
dep = next(file).strip().split()
sent_words = sent_words + 1
except StopIteration:
break
head = [s.lower() for s in head]
dep = [s.lower() for s in dep]
r = Rel(head[0], dep[0]) #using real words instead of tags
# doesn't consider unknown words for now
if dep[0] in all_words:
if dep[0] in sent_dict:
sent_dict[dep[0]] = sent_dict[dep[0]] + 1
else:
sent_dict[dep[0]] = 1
if r in all_rels:
if r in sent_dict:
sent_dict[r] = sent_dict[r] + 1
else:
sent_dict[r] = 1
indices = [all_words[x] if isinstance(x, basestring) else all_rels[x]+len(all_words) for x in sent_dict] #1-based indexing
##tf-idf (sentence level)
global df_word_dict, df_rel_dict
values = [float(sent_dict[x]) * idf_smooth(float(df_word_dict[x])) if isinstance(x, basestring) else float(sent_dict[x]) * idf_smooth(float(df_rel_dict[x])) for x in sent_dict]
ss_values = sum(map(lambda x:x*x, values))
values = [x/float(sqrt(ss_values)) for x in values]
ind_val_pairs = zip(indices, values)
ind_val_pairs.sort(key=lambda x: x[0])
print "saving features for file: " + sent_filename
with open(out_file, "a") as feature_list_file:
text = str(label_index)
for i, v in ind_val_pairs:
text = text + " " + str(i) + ":" + str(v)
text = text + '\n'
feature_list_file.write(text)
global cur_line, line_to_file
cur_line = cur_line + 1
if map_to_file:
line_to_file[cur_line] = file_index
def main():
global all_words, all_rels, n_sents
read_labels()
if os.path.isfile(words_path) and os.path.isfile(rels_path):
all_words = pickle.load(open(words_path, "rb"))
all_rels = pickle.load(open(rels_path, "rb"))
else:
for subdir, dirs, files in os.walk(train_parsed_path):
for filename in files:
in_filename = os.path.join(subdir,filename)
add_single_sent_features(in_filename, filename[:5])
pickle.dump(all_words, open(words_path, "wb"))
pickle.dump(all_rels, open(rels_path, "wb"))
##clear feature files
open(train_feature_list_path, "w").close()
open(eval_feature_list_path, "w").close()
for subdir, dirs, files in os.walk(train_parsed_path):
for filename in files:
n_sents = n_sents + 1
if n_sents % 10000 == 0:
print n_sents
in_filename = os.path.join(subdir,filename)
cal_df_in_single_sent(in_filename)
print "total number of sentences is: ", n_sents
for subdir, dirs, files in os.walk(train_parsed_path):
for filename in files:
in_filename = os.path.join(subdir,filename)
write_single_sent_feature(train_feature_list_path, train_label_dict, in_filename, filename[:5], False)
for subdir, dirs, files in os.walk(eval_parsed_path):
for filename in files:
in_filename = os.path.join(subdir,filename)
write_single_sent_feature(eval_feature_list_path, eval_label_dict, in_filename, filename[:5], True)
pickle.dump(line_to_file, open(line_to_file_path, "wb"))
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pytest
import data_pipeline.extractor.extractor
import tests.unittest_utils as unittest_utils
import data_pipeline.constants.const as const
import data_pipeline.audit.custom_orm as custom_orm
from data_pipeline.audit.custom_orm import (ProcessControl,
ProcessControlDetail,
SourceSystemProfile)
EXPECTED_PROCESS_CONTROL_INSERT_SQL = """
INSERT INTO ctl.process_control (
process_code,
duration,
comment,
status,
process_starttime,
process_endtime,
infolog,
errorlog,
profile_name,
profile_version,
process_name,
min_lsn,
max_lsn,
filename,
executor_run_id,
executor_status,
source_system_code,
source_system_type,
source_region,
target_system,
target_region,
target_system_type,
object_list,
total_count
) VALUES (
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s
) RETURNING id"""
def get_expected_process_control_values(mocker):
return ['InitSync', 0, '', 'IN_PROGRESS', mocker.ANY, mocker.ANY, '', '',
'myprofile', 1, 'InitSync', 123, 456, mocker.ANY, 0, '',
'myprofile', 'oracle', 'sys', 'myprofile', 'ctl', 'postgres', '', 0]
EXPECTED_PROCESS_CONTROL_DETAIL_INSERT_SQL = """
INSERT INTO ctl.process_control_detail (
process_code,
duration,
comment,
status,
process_starttime,
process_endtime,
infolog,
errorlog,
run_id,
object_schema,
object_name,
source_row_count,
insert_row_count,
update_row_count,
delete_row_count,
bad_row_count,
alter_count,
create_count,
delta_starttime,
delta_endtime,
delta_startlsn,
delta_endlsn,
error_message,
query_condition
) VALUES (
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s
) RETURNING id"""
def get_expected_process_control_detail_insert_values(mocker):
return ['InitSync', 0, '', 'IN_PROGRESS', mocker.ANY, mocker.ANY, '', '',
1, '', '', 0, 1, 2, 0, 0, 3, 4, mocker.ANY, mocker.ANY,
'', '', '', '']
EXPECTED_SOURCE_SYSTEM_PROFILE_INSERT_SQL = """
INSERT INTO ctl.source_system_profile (
profile_name,
version,
source_system_code,
source_region,
target_region,
object_name,
object_seq,
min_lsn,
max_lsn,
active_ind,
history_ind,
applied_ind,
delta_ind,
last_run_id,
last_process_code,
last_status,
last_updated,
last_applied,
last_history_update,
notes
) VALUES (
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s
) RETURNING id"""
EXPECTED_SOURCE_SYSTEM_PROFILE_FIELDS = [
'id',
'profile_name',
'version',
'source_system_code',
'source_region',
'target_region',
'object_name',
'object_seq',
'min_lsn',
'max_lsn',
'active_ind',
'history_ind',
'applied_ind',
'delta_ind',
'last_run_id',
'last_process_code',
'last_status',
'last_updated',
'last_applied',
'last_history_update',
'notes'
]
def get_expected_source_system_profile_values(mocker):
return ['myprofile', 1, 'myprofile', 'sys', 'ctl', '', 0, 123, 456, '',
'', '', '', 0, '', '', mocker.ANY, None, None, '']
mock_no_query_results = None
mock_single_query_results = None
mock_multi_query_results = None
expected_query_result_values = None
expected_query_result_fields = None
@pytest.fixture()
def setup(tmpdir, mocker):
global mock_no_query_results
global mock_single_query_results
global mock_multi_query_results
global expected_query_result_values
global expected_query_result_fields
mockargv_config = unittest_utils.get_default_argv_config(tmpdir)
mockargv = mocker.Mock(**mockargv_config)
unittest_utils.setup_logging(mockargv.workdirectory)
mockcursor_config = {'fetchone.return_value': [1] }
mockcursor = mocker.Mock(**mockcursor_config)
db_config = {'cursor': mockcursor,
'execute_query.side_effect': execute_query_se}
mockdb = mocker.Mock(**db_config)
mock_dbfactory_build = mocker.patch(
'data_pipeline.audit.custom_orm.dbfactory.build')
mock_dbfactory_build.return_value = mockdb
expected_query_result_values = (
1, 'myprofile', 1, 'myprofile', 'sys', 'ctl', '', 0, 0, 0, '',
'', '', '', 0, '', '', '', '', '', '')
expected_query_result_fields = EXPECTED_SOURCE_SYSTEM_PROFILE_FIELDS
assert (len(expected_query_result_values) ==
len(expected_query_result_fields))
mock_no_query_results_config = {
'fetchall.return_value': [],
'get_col_names.return_value': expected_query_result_fields
}
mock_no_query_results = mocker.Mock(**mock_no_query_results_config)
mock_single_query_results_config = {
'fetchall.return_value': [expected_query_result_values],
'get_col_names.return_value': expected_query_result_fields
}
mock_single_query_results = mocker.Mock(**mock_single_query_results_config)
mock_multi_query_results_config = {
'fetchall.return_value': [expected_query_result_values,
expected_query_result_values],
'get_col_names.return_value': expected_query_result_fields
}
mock_multi_query_results = mocker.Mock(**mock_multi_query_results_config)
yield (mockdb, mockargv)
@pytest.fixture()
def setup_test_constructor(tmpdir, mocker):
mockargv_config = unittest_utils.get_default_argv_config(tmpdir)
mockargv = mocker.Mock(**mockargv_config)
unittest_utils.setup_logging(mockargv.workdirectory)
yield (mockargv)
def execute_query_se(query, arraysize, bind_values):
global mock_no_query_results
global mock_single_query_results
global mock_multi_query_results
select_no_result = """
SELECT *
FROM ctl.source_system_profile
WHERE 1=1
AND object_name = %s
AND profile_name = %s
AND active_ind = %s"""
select_sql_single_result = """
SELECT *
FROM ctl.source_system_profile
WHERE 1=1
AND object_name = %s
AND profile_name = %s"""
select_sql_multi_result = """
SELECT *
FROM ctl.source_system_profile
WHERE 1=1
AND profile_name = %s"""
if query == select_no_result:
return mock_no_query_results
elif query == select_sql_single_result:
return mock_single_query_results
elif query == select_sql_multi_result:
return mock_multi_query_results
else:
raise Exception("Unexpected query: {}".format(query))
def test_process_control_insert(mocker, setup):
(mockdb, mockargv) = setup
pc = ProcessControl(mockargv, const.INITSYNC)
pc.insert(
min_lsn=123,
max_lsn=456
)
mockdb.execute.assert_called_with(
EXPECTED_PROCESS_CONTROL_INSERT_SQL,
get_expected_process_control_values(mocker),
log_sql=mocker.ANY
)
def test_process_control_update(mocker, setup):
expected_update_sql = """
UPDATE ctl.process_control SET
comment = %s,
object_name = %s,
process_starttime = %s,
process_endtime = %s,
duration = %s
WHERE id = %s"""
(mockdb, mockargv) = setup
pc = ProcessControl(mockargv, const.INITSYNC)
pc.insert(
min_lsn=123,
max_lsn=456
)
pc.update(comment='foo', object_name='bar')
mockdb.assert_has_calls([
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(EXPECTED_PROCESS_CONTROL_INSERT_SQL,
get_expected_process_control_values(mocker),
log_sql=mocker.ANY),
mocker.call.cursor.fetchone(),
mocker.call.commit(),
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(
expected_update_sql,
['foo', 'bar', mocker.ANY, mocker.ANY, mocker.ANY, 1],
log_sql=mocker.ANY),
mocker.call.commit()
])
def test_process_control_delete(mocker, setup):
expected_delete_sql = """
DELETE
FROM ctl.process_control
WHERE id = %s"""
(mockdb, mockargv) = setup
pc = ProcessControl(mockargv, const.INITSYNC)
pc.insert(
min_lsn=123,
max_lsn=456
)
pc.delete()
mockdb.assert_has_calls([
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(EXPECTED_PROCESS_CONTROL_INSERT_SQL,
get_expected_process_control_values(mocker),
log_sql=mocker.ANY),
mocker.call.cursor.fetchone(),
mocker.call.commit(),
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(expected_delete_sql, [1], log_sql=mocker.ANY),
mocker.call.commit()
])
def test_process_control_detail_insert(mocker, setup):
(mockdb, mockargv) = setup
pc = ProcessControlDetail(mockargv, const.INITSYNC, 1)
pc.insert(
insert_row_count=1,
update_row_count=2,
alter_count=3,
create_count=4,
)
mockdb.execute.assert_called_with(
EXPECTED_PROCESS_CONTROL_DETAIL_INSERT_SQL,
get_expected_process_control_detail_insert_values(mocker),
log_sql=mocker.ANY)
def test_process_control_detail_update(mocker, setup):
expected_update_sql = """
UPDATE ctl.process_control_detail SET
comment = %s,
object_name = %s,
process_starttime = %s,
process_endtime = %s,
duration = %s
WHERE id = %s"""
(mockdb, mockargv) = setup
pc = ProcessControlDetail(mockargv, const.INITSYNC, 1)
pc.insert(
insert_row_count=1,
update_row_count=2,
alter_count=3,
create_count=4,
)
pc.update(comment='foo', object_name='bar')
mockdb.assert_has_calls([
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(
EXPECTED_PROCESS_CONTROL_DETAIL_INSERT_SQL,
get_expected_process_control_detail_insert_values(mocker),
log_sql=mocker.ANY),
mocker.call.cursor.fetchone(),
mocker.call.commit(),
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(expected_update_sql,
['foo', 'bar', mocker.ANY, mocker.ANY, mocker.ANY, 1],
log_sql=mocker.ANY),
mocker.call.commit()
])
def test_process_control_detail_delete(mocker, setup):
expected_delete_sql = """
DELETE
FROM ctl.process_control_detail
WHERE id = %s"""
(mockdb, mockargv) = setup
pc = ProcessControlDetail(mockargv, const.INITSYNC, 1)
pc.insert(
insert_row_count=1,
update_row_count=2,
alter_count=3,
create_count=4,
)
pc.delete()
mockdb.assert_has_calls([
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(
EXPECTED_PROCESS_CONTROL_DETAIL_INSERT_SQL,
get_expected_process_control_detail_insert_values(mocker),
log_sql=mocker.ANY),
mocker.call.cursor.fetchone(),
mocker.call.commit(),
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(expected_delete_sql, [1], log_sql=mocker.ANY),
mocker.call.commit()
])
def test_source_system_profile_select_none(mocker, setup):
(mockdb, mockargv) = setup
ssp = SourceSystemProfile(mockargv)
success = ssp.select(
profile_name='foo',
object_name='bar',
active_ind='N'
)
assert success == False
def test_source_system_profile_select_single(mocker, setup):
(mockdb, mockargv) = setup
ssp = SourceSystemProfile(mockargv)
success = ssp.select(
profile_name='foo',
object_name='bar'
)
assert success == True
def test_source_system_profile_select_multi(mocker, setup):
(mockdb, mockargv) = setup
ssp = SourceSystemProfile(mockargv)
success = ssp.select(
profile_name='foo'
)
assert success == False
def test_source_system_profile_insert(mocker, setup):
(mockdb, mockargv) = setup
ssp = SourceSystemProfile(mockargv)
ssp.insert(
min_lsn=123,
max_lsn=456
)
mockdb.execute.assert_called_with(
EXPECTED_SOURCE_SYSTEM_PROFILE_INSERT_SQL,
get_expected_source_system_profile_values(mocker),
log_sql=mocker.ANY)
def test_source_system_profile_update(mocker, setup):
expected_update_sql = """
UPDATE ctl.source_system_profile SET
object_name = %s,
source_region = %s
WHERE id = %s"""
(mockdb, mockargv) = setup
ssp = SourceSystemProfile(mockargv)
ssp.insert(
min_lsn=123,
max_lsn=456
)
ssp.update(source_region='foo', object_name='bar')
mockdb.assert_has_calls([
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(EXPECTED_SOURCE_SYSTEM_PROFILE_INSERT_SQL,
mocker.ANY, log_sql=mocker.ANY),
mocker.call.cursor.fetchone(),
mocker.call.commit(),
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(expected_update_sql, ['bar', 'foo', 1], log_sql=mocker.ANY),
mocker.call.commit()
])
def test_source_system_profile_delete(mocker, setup):
expected_delete_sql = """
DELETE
FROM ctl.source_system_profile
WHERE id = %s"""
(mockdb, mockargv) = setup
ssp = SourceSystemProfile(mockargv)
ssp.insert(
min_lsn=123,
max_lsn=456
)
ssp.delete()
mockdb.assert_has_calls([
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(EXPECTED_SOURCE_SYSTEM_PROFILE_INSERT_SQL,
mocker.ANY, log_sql=mocker.ANY),
mocker.call.cursor.fetchone(),
mocker.call.commit(),
mocker.call.closed(),
mocker.call.connect(mocker.ANY),
mocker.call.execute(expected_delete_sql, [1], log_sql=mocker.ANY),
mocker.call.commit()
])
def test_process_control_constructor(setup_test_constructor):
(mockargv) = setup_test_constructor
obj = ProcessControl(mockargv, const.INITSYNC)
assert obj is not None
def test_process_control_detail_constructor(setup_test_constructor):
(mockargv) = setup_test_constructor
fake_process_control_id = 1
obj = ProcessControlDetail(mockargv, const.INITSYNC, fake_process_control_id)
assert obj is not None
def test_source_system_profile_constructor(setup_test_constructor):
(mockargv) = setup_test_constructor
obj = SourceSystemProfile(mockargv)
assert obj is not None
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update address command."""
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
class TestUpdateAddress(TestBrokerCommand):
def test_100_update_reverse(self):
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com",
comments="Some address comments")
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com",
"--reverse_ptr", "arecord14.aqd-unittest.ms.com",
"--comments", "Some address comments"] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify()
def test_105_verify_arecord15(self):
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Comments: Some address comments", command)
self.matchoutput(out, "Reverse PTR: arecord14.aqd-unittest.ms.com",
command)
def test_105_search_ptr(self):
command = ["search", "dns",
"--reverse_ptr", "arecord14.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "arecord15.aqd-unittest.ms.com", command)
def test_105_search_override(self):
command = ["search", "dns", "--reverse_override"]
out = self.commandtest(command)
self.matchoutput(out, "arecord15.aqd-unittest.ms.com", command)
def test_108_clear_comments(self):
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com",
comments="")
self.noouttest(["update_address",
"--fqdn", "arecord15.aqd-unittest.ms.com",
"--comments", ""] + self.valid_just_tcm)
self.dsdb_verify()
def test_109_verify_comments(self):
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Comments", command)
def test_110_clear_ptr_override(self):
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com",
"--reverse_ptr", "arecord15.aqd-unittest.ms.com"] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify(empty=True)
def test_115_verify_arecord15(self):
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Reverse", command)
def test_115_verify_search(self):
command = ["search", "dns", "--reverse_override"]
out = self.commandtest(command)
self.matchclean(out, "arecord15.aqd-unittest.ms.com", command)
def test_120_update_ip(self):
ip = self.net["unknown0"].usable[-1]
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com", ip=ip)
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com", "--ip", ip] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify()
def test_125_verify_arecord15(self):
ip = self.net["unknown0"].usable[-1]
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "IP: %s" % ip, command)
def test_129_fix_ip(self):
# Change the IP address back not to confuse other parts of the testsuite
ip = self.net["unknown0"].usable[15]
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com", ip=ip)
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com", "--ip", ip] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify()
def test_130_update_dyndhcp_noop(self):
ip = self.net["dyndhcp0"].usable[12]
command = ["update", "address", "--fqdn", self.dynname(ip),
"--reverse_ptr", self.dynname(ip)] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify(empty=True)
def test_135_verify_dyndhcp(self):
ip = self.net["dyndhcp0"].usable[12]
command = ["show", "fqdn", "--fqdn", self.dynname(ip)]
out = self.commandtest(command)
self.matchclean(out, "Reverse", command)
def test_140_restricted_reverse(self):
command = ["update", "address",
"--fqdn", "arecord17.aqd-unittest.ms.com",
"--reverse_ptr", "reverse2.restrict.aqd-unittest.ms.com"] + self.valid_just_tcm
err = self.statustest(command)
self.matchoutput(err,
"WARNING: Will create a reference to "
"reverse2.restrict.aqd-unittest.ms.com, but trying to "
"resolve it resulted in an error: Name or service "
"not known",
command)
self.dsdb_verify(empty=True)
def test_141_verify_reverse(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "arecord17.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
"Reverse PTR: reverse2.restrict.aqd-unittest.ms.com",
command)
self.matchclean(out, "reverse.restrict.aqd-unittest.ms.com", command)
command = ["search", "dns", "--record_type", "reserved_name"]
out = self.commandtest(command)
self.matchclean(out, "reverse.restrict", command)
self.matchoutput(out, "reverse2.restrict.aqd-unittest.ms.com", command)
def test145_alias_reverse(self):
command = ["update", "address",
"--fqdn", "arecord17.aqd-unittest.ms.com",
"--reverse_ptr", "alias2host.aqd-unittest.ms.com"] + self.valid_just_tcm
self.noouttest(command)
def test146_verify_reverse(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "arecord17.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
"Reverse PTR: alias2host.aqd-unittest.ms.com",
command)
def test150_address_alias_reverse(self):
command = ["update", "address",
"--fqdn", "arecord17.aqd-unittest.ms.com",
"--reverse_ptr", "addralias1.aqd-unittest.ms.com"] + self.valid_just_tcm
self.noouttest(command)
def test151_verify_reverse(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "arecord17.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
"Reverse PTR: addralias1.aqd-unittest.ms.com",
command)
self.matchclean(out, "alias2host.aqd-unittest.ms.com", command)
def test_200_update_dyndhcp(self):
ip = self.net["dyndhcp0"].usable[12]
command = ["update", "address", "--fqdn", self.dynname(ip),
"--reverse_ptr", "unittest20.aqd-unittest.ms.com"] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "The reverse PTR record cannot be set for DNS "
"records used for dynamic DHCP.", command)
def test_200_ip_conflict(self):
ip = self.net["unknown0"].usable[14]
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com", "--ip", ip] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "IP address %s is already used by DNS record "
"arecord14.aqd-unittest.ms.com." % ip, command)
def test_200_update_primary(self):
command = ["update", "address",
"--fqdn", "unittest00.one-nyp.ms.com",
"--ip", self.net["unknown0"].usable[-1]] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "DNS Record unittest00.one-nyp.ms.com is "
"a primary name, and its IP address cannot be "
"changed.", command)
def test_200_update_srvaddr(self):
command = ["update", "address",
"--fqdn", "unittest20.aqd-unittest.ms.com",
"--ip", self.net["unknown0"].usable[-1]] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out,
"DNS Record unittest20.aqd-unittest.ms.com is a "
"service address, use the update_service_address "
"command to change it.",
command)
def test_200_update_used(self):
command = ["update", "address",
"--fqdn", "unittest20-e1.aqd-unittest.ms.com",
"--ip", self.net["unknown0"].usable[-1]] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "DNS Record unittest20-e1.aqd-unittest.ms.com is "
"already used by the following interfaces, and its "
"IP address cannot be changed: "
"unittest20.aqd-unittest.ms.com/eth1.",
command)
def test_300_update_no_ttl(self):
command = ["update", "address",
"--fqdn", "arecord40.aqd-unittest.ms.com",
"--clear_ttl"] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify(empty=True)
def test_320_verify_ttl(self):
command = ["show", "fqdn", "--fqdn", "arecord40.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "TTL", command)
def test_330_update_new_ttl(self):
command = ["update", "address",
"--fqdn", "arecord40.aqd-unittest.ms.com",
"--ttl", "600"] + self.valid_just_tcm
self.noouttest(command)
self.dsdb_verify(empty=True)
def test_340_verify_ttl(self):
command = ["show", "fqdn", "--fqdn", "arecord40.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "TTL: 600", command)
def test_430_update_grn(self):
command = ["update", "address",
"--fqdn", "arecord50.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/unittest"] + self.valid_just_tcm
self.noouttest(command)
def test_440_verify_update_grn(self):
command = ["show", "fqdn",
"--fqdn", "arecord50.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest", command)
def test_450_update_eon_id(self):
command = ["update", "address",
"--fqdn", "arecord51.aqd-unittest.ms.com",
"--eon_id", "2"] + self.valid_just_tcm
self.noouttest(command)
def test_460_verify_update_eon_id(self):
command = ["show", "fqdn",
"--fqdn", "arecord51.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateAddress)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
# NOTE(GheRivero): Copied from oslo_incubator before getting removed in
# Change-Id: If15b77d31a8c615aad8fca30f6dd9928da2d08bb
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo_config import cfg
import oslo_i18n
from oslo_utils import importutils
import six
import stevedore.named
oslo_i18n.install('ironic')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"""Check if opt is in group."""
for value in group._opts.values():
if value['opt'] is opt:
return True
return False
def _guess_groups(opt):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
if 'list_opts' in dir(obj):
group_opts = getattr(obj, 'list_opts')()
# NOTE(GheRivero): Options without a defined group,
# must be registered to the DEFAULT section
fixed_list = []
for section, opts in group_opts:
if not section:
section = 'DEFAULT'
fixed_list.append((section, opts))
return fixed_list
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in sorted(opts_by_module, key=lambda x: x[0]):
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (hostname, fqdn):
if 'host' in name:
return 'ironic'
elif value.endswith(hostname):
return value.replace(hostname, 'ironic')
elif value.endswith(fqdn):
return value.replace(fqdn, 'ironic')
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
else:
_print_type(opt_type, opt_name, opt_default)
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def _print_type(opt_type, opt_name, opt_default):
if opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from nova import exception
from nova.i18n import _
# Define the minimum and maximum version of the API across all of the
# REST API. The format of the version is:
# X.Y where:
#
# - X will only be changed if a significant backwards incompatible API
# change is made which affects the API as whole. That is, something
# that is only very very rarely incremented.
#
# - Y when you make any change to the API. Note that this includes
# semantic changes which may not affect the input or output formats or
# even originate in the API code layer. We are not distinguishing
# between backwards compatible and backwards incompatible changes in
# the versioning system. It must be made clear in the documentation as
# to what is a backwards compatible change and what is a backwards
# incompatible one.
#
# You must update the API version history string below with a one or
# two line description as well as update rest_api_version_history.rst
REST_API_VERSION_HISTORY = """REST API Version History:
* 2.1 - Initial version. Equivalent to v2.0 code
* 2.2 - Adds (keypair) type parameter for os-keypairs plugin
Fixes success status code for create/delete a keypair method
* 2.3 - Exposes additional os-extended-server-attributes
Exposes delete_on_termination for os-extended-volumes
* 2.4 - Exposes reserved field in os-fixed-ips.
* 2.5 - Allow server search option ip6 for non-admin
* 2.6 - Consolidate the APIs for getting remote consoles
* 2.7 - Check flavor type before add tenant access.
* 2.8 - Add new protocol for VM console (mks)
* 2.9 - Exposes lock information in server details.
* 2.10 - Allow admins to query, create and delete keypairs owned by any
user.
* 2.11 - Exposes forced_down attribute for os-services
* 2.12 - Exposes VIF net_id in os-virtual-interfaces
* 2.13 - Add project id and user id information for os-server-groups API
* 2.14 - Remove onSharedStorage from evacuate request body and remove
adminPass from the response body
* 2.15 - Add soft-affinity and soft-anti-affinity policies
* 2.16 - Exposes host_status for servers/detail and servers/{server_id}
* 2.17 - Add trigger_crash_dump to server actions
* 2.18 - Makes project_id optional in v2.1
* 2.19 - Allow user to set and get the server description
* 2.20 - Add attach and detach volume operations for instances in shelved
and shelved_offloaded state
* 2.21 - Make os-instance-actions read deleted instances
* 2.22 - Add API to force live migration to complete
* 2.23 - Add index/show API for server migrations.
Also add migration_type for /os-migrations and add ref link for it
when the migration is an in progress live migration.
* 2.24 - Add API to cancel a running live migration
* 2.25 - Make block_migration support 'auto' and remove
disk_over_commit for os-migrateLive.
* 2.26 - Adds support of server tags
* 2.27 - Adds support for new-style microversion headers while
keeping support for the original style.
* 2.28 - Changes compute_node.cpu_info from string to object
* 2.29 - Add a force flag in evacuate request body and change the
behaviour for the host flag by calling the scheduler.
* 2.30 - Add a force flag in live-migrate request body and change the
behaviour for the host flag by calling the scheduler.
* 2.31 - Fix os-console-auth-tokens to work for all console types.
* 2.32 - Add tag to networks and block_device_mapping_v2 in server boot
request body.
* 2.33 - Add pagination support for hypervisors.
* 2.34 - Checks before live-migration are made in asynchronous way.
os-Migratelive Action does not throw badRequest in case of
pre-checks failure. Verification result is available over
instance-actions.
* 2.35 - Adds keypairs pagination support.
* 2.36 - Deprecates all the API which proxy to another service and fping
API.
* 2.37 - Adds support for auto-allocating networking, otherwise known as
"Get me a Network". Also enforces server.networks.uuid to be in
UUID format.
* 2.38 - Add a condition to return HTTPBadRequest if invalid status is
provided for listing servers.
* 2.39 - Deprecates image-metadata proxy API
* 2.40 - Adds simple tenant usage pagination support.
* 2.41 - Return uuid attribute for aggregates.
* 2.42 - In the context of device tagging at instance boot time,
re-introduce the tag attribute that, due to bugs, was lost
starting with version 2.33 for block devices and starting with
version 2.37 for network interfaces.
* 2.43 - Deprecate os-hosts API
* 2.44 - The servers action addFixedIp, removeFixedIp, addFloatingIp,
removeFloatingIp and os-virtual-interfaces APIs are deprecated.
* 2.45 - The createImage and createBackup APIs no longer return a Location
header in the response for the snapshot image, they now return a
json dict in the response body with an image_id key and uuid
value.
"""
# The minimum and maximum versions of the API supported
# The default api version request is defined to be the
# minimum version of the API supported.
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
_MAX_API_VERSION = "2.45"
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which related to network, images and baremetal
# were deprecated from 2.36.
MAX_PROXY_API_SUPPORT_VERSION = '2.35'
MIN_WITHOUT_PROXY_API_SUPPORT_VERSION = '2.36'
# Starting from microversion 2.39 also image-metadata proxy API is deprecated.
MAX_IMAGE_META_PROXY_API_VERSION = '2.38'
MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION = '2.39'
# NOTE(cyeoh): min and max versions declared as functions so we can
# mock them for unittests. Do not use the constants directly anywhere
# else.
def min_api_version():
return APIVersionRequest(_MIN_API_VERSION)
def max_api_version():
return APIVersionRequest(_MAX_API_VERSION)
def is_supported(req, min_version=_MIN_API_VERSION,
max_version=_MAX_API_VERSION):
"""Check if API request version satisfies version restrictions.
:param req: request object
:param min_version: minimal version of API needed for correct
request processing
:param max_version: maximum version of API needed for correct
request processing
:returns: True if request satisfies minimal and maximum API version
requirements. False in other case.
"""
return (APIVersionRequest(max_version) >= req.api_version_request >=
APIVersionRequest(min_version))
class APIVersionRequest(object):
"""This class represents an API Version Request with convenience
methods for manipulation and comparison of version
numbers that we need to do to implement microversions.
"""
def __init__(self, version_string=None):
"""Create an API version request object.
:param version_string: String representation of APIVersionRequest.
Correct format is 'X.Y', where 'X' and 'Y' are int values.
None value should be used to create Null APIVersionRequest,
which is equal to 0.0
"""
self.ver_major = 0
self.ver_minor = 0
if version_string is not None:
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
version_string)
if match:
self.ver_major = int(match.group(1))
self.ver_minor = int(match.group(2))
else:
raise exception.InvalidAPIVersionString(version=version_string)
def __str__(self):
"""Debug/Logging representation of object."""
return ("API Version Request Major: %s, Minor: %s"
% (self.ver_major, self.ver_minor))
def is_null(self):
return self.ver_major == 0 and self.ver_minor == 0
def _format_type_error(self, other):
return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") %
{"other": other, "cls": self.__class__})
def __lt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) <
(other.ver_major, other.ver_minor))
def __eq__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) ==
(other.ver_major, other.ver_minor))
def __gt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) >
(other.ver_major, other.ver_minor))
def __le__(self, other):
return self < other or self == other
def __ne__(self, other):
return not self.__eq__(other)
def __ge__(self, other):
return self > other or self == other
def matches(self, min_version, max_version):
"""Returns whether the version object represents a version
greater than or equal to the minimum version and less than
or equal to the maximum version.
@param min_version: Minimum acceptable version.
@param max_version: Maximum acceptable version.
@returns: boolean
If min_version is null then there is no minimum limit.
If max_version is null then there is no maximum limit.
If self is null then raise ValueError
"""
if self.is_null():
raise ValueError
if max_version.is_null() and min_version.is_null():
return True
elif max_version.is_null():
return min_version <= self
elif min_version.is_null():
return self <= max_version
else:
return min_version <= self <= max_version
def get_string(self):
"""Converts object to string representation which if used to create
an APIVersionRequest object results in the same version request.
"""
if self.is_null():
raise ValueError
return "%s.%s" % (self.ver_major, self.ver_minor)
|
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import sys
import subprocess
import textwrap
import warnings
from functools import partial
from distutils.sysconfig import get_python_inc
import setuptools
from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
MAJOR = 1
MINOR = 3
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of pywt.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('pywt/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load pywt/__init__.py
import types
from importlib.machinery import SourceFileLoader
loader = SourceFileLoader('pywt.version', 'pywt/version.py')
version = types.ModuleType(loader.name)
loader.exec_module(version)
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='pywt/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM PYWAVELETS SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
with open(filename, 'w') as a:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ["COPY_EXTENDED_ATTRIBUTES_DISABLE"] = "true"
os.environ["COPYFILE_DISABLE"] = "true"
make_ext_path = partial(os.path.join, "pywt", "_extensions")
sources = ["c/common.c", "c/convolution.c", "c/wt.c", "c/wavelets.c", "c/cwt.c"]
sources = list(map(make_ext_path, sources))
source_templates = ["c/convolution.template.c", "c/wt.template.c", "c/cwt.template.c"]
source_templates = list(map(make_ext_path, source_templates))
headers = ["c/templating.h", "c/wavelets_coeffs.h",
"c/common.h", "c/convolution.h", "c/wt.h", "c/wavelets.h", "c/cwt.h"]
headers = list(map(make_ext_path, headers))
header_templates = ["c/convolution.template.h", "c/wt.template.h",
"c/wavelets_coeffs.template.h", "c/cwt.template.h"]
header_templates = list(map(make_ext_path, header_templates))
def get_cython_sources(use_cython):
cython_modules = ['_pywt', '_dwt', '_swt', '_cwt']
cython_sources = [('{0}.pyx' if use_cython else '{0}.c').format(module)
for module in cython_modules]
return cython_modules, cython_sources
c_macros = [("PY_EXTENSION", None), ]
cython_macros = []
#c99_test_c = """#include <complex.h>
#int main(int argc, char** argv) { float complex a; return(0); }"""
if 'USE_C99_COMPLEX' in os.environ:
use_c99 = bool(int(os.environ['USE_C99_COMPLEX']))
else:
# default to False on non-posix platforms
# (MSVC doesn't support C99 complex)
if os.name == 'posix':
use_c99 = True
else:
use_c99 = False
if use_c99:
c_macros += [("HAVE_C99_COMPLEX", None), ]
# avoid compiler warnings: tell Cython to use C99 complex types
cython_macros += [('CYTHON_CCOMPLEX', 1), ]
pxi_defines = dict(HAVE_C99_CPLX=1)
py_defines = dict(_have_c99_complex=1)
else:
pxi_defines = dict(HAVE_C99_CPLX=0)
py_defines = dict(_have_c99_complex=0)
# write a file config.pxi that can be included by other .pyx files to determine
# whether or not C99 complex is supported at compile-time
defines_pxi = os.path.join(
os.path.dirname(__file__), 'pywt', '_extensions', 'config.pxi')
with open(defines_pxi, 'w') as fd:
fd.write("# Autogenerated file containing Cython compile-time defines\n\n")
for k, v in pxi_defines.items():
fd.write('DEF %s = %d\n' % (k.upper(), int(v)))
defines_py = os.path.join(
os.path.dirname(__file__), 'pywt', '_c99_config.py')
with open(defines_py, 'w') as fd:
fd.write("# Autogenerated file containing compile-time definitions\n\n")
for k, v in py_defines.items():
fd.write('%s = %d\n' % (k, int(v)))
cythonize_opts = {'language_level': '3'}
if os.environ.get("CYTHON_TRACE"):
cythonize_opts['linetrace'] = True
cython_macros.append(("CYTHON_TRACE_NOGIL", 1))
# By default C object files are rebuilt for every extension
# C files must be built once only for coverage to work
c_lib = ('c_wt', {'sources': sources,
'depends': source_templates + header_templates + headers,
'include_dirs': [make_ext_path("c"), get_python_inc()],
'macros': c_macros, })
def get_ext_modules(use_cython):
from numpy import get_include as get_numpy_include
cython_modules, cython_sources = get_cython_sources(use_cython)
ext_modules = [
Extension('pywt._extensions.{0}'.format(module),
sources=[make_ext_path(source)],
# Doesn't automatically rebuild if library changes
depends=c_lib[1]['sources'] + c_lib[1]['depends'],
include_dirs=[make_ext_path("c"), get_numpy_include()],
define_macros=c_macros + cython_macros,
libraries=[c_lib[0]],)
for module, source, in zip(cython_modules, cython_sources)
]
return ext_modules
from setuptools.command.develop import develop
class develop_build_clib(develop):
"""Ugly monkeypatching to get clib to build for development installs
See coverage comment above for why we don't just let libraries be built
via extensions.
All this is a copy of the relevant part of `install_for_development`
for current master (Sep 2016) of setuptools.
Note: if you want to build in-place with ``python setup.py build_ext``,
that will only work if you first do ``python setup.py build_clib``.
"""
def install_for_development(self):
self.run_command('egg_info')
# Build extensions in-place (the next 7 lines are the monkeypatch)
import glob
hitlist = glob.glob(os.path.join('build', '*', 'libc_wt.*'))
if hitlist:
# Remove existing clib - running build_clib twice in a row fails
os.remove(hitlist[0])
self.reinitialize_command('build_clib', inplace=1)
self.run_command('build_clib')
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
try:
self.install_site_py() # ensure that target dir is site-safe
except AttributeError:
# setuptools 0.49 removed install_site_py
pass
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
from distutils import log
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
import NumPy or parsing Cython and template files if False).
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg',
'build_sphinx')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install PyWavelets` (last PyWavelets release on PyPI)
"""))
return True
if '--help' in args or '-h' in sys.argv[1]:
print(textwrap.dedent("""
PyWavelets-specific help
------------------------
To install PyWavelets from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest PyWavelets release
from PyPI, use `pip install PyWavelets`.
For help with build/installation issues, please ask on the
PyWavelets mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/PyWavelets/pywt/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `>>> pywt.test()` (run tests for installed PyWavelets
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in args:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates")
return True
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def setup_package():
# Rewrite the version file everytime
write_version_py()
metadata = dict(
name="PyWavelets",
maintainer="The PyWavelets Developers",
maintainer_email="pywavelets@googlegroups.com",
url="https://github.com/PyWavelets/pywt",
download_url="https://github.com/PyWavelets/pywt/releases",
license="MIT",
description="PyWavelets, wavelet transform module",
long_description="""\
PyWavelets is a Python wavelet transforms module that includes:
* nD Forward and Inverse Discrete Wavelet Transform (DWT and IDWT)
* 1D and 2D Forward and Inverse Stationary Wavelet Transform (Undecimated Wavelet Transform)
* 1D and 2D Wavelet Packet decomposition and reconstruction
* 1D Continuous Wavelet Tranfsorm
* Computing Approximations of wavelet and scaling functions
* Over 100 built-in wavelet filters and support for custom wavelets
* Single and double precision calculations
* Real and complex calculations
* Results compatible with Matlab Wavelet Toolbox (TM)
""",
keywords=["wavelets", "wavelet transform", "DWT", "SWT", "CWT", "scientific"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Libraries :: Python Modules"
],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
version=get_version_info()[0],
packages=['pywt', 'pywt._extensions', 'pywt.data'],
package_data={'pywt.data': ['*.npy', '*.npz'],
'pywt': ['tests/*.py', 'tests/data/*.npz',
'tests/data/*.py']},
libraries=[c_lib],
cmdclass={'develop': develop_build_clib, 'test': PyTest},
tests_require=['pytest'],
install_requires=["numpy>=1.17.3"],
python_requires=">=3.7",
)
if "--force" in sys.argv:
run_build = True
sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
if run_build:
# This imports numpy and Cython, so only do that if we're actually
# building and not for, e.g., pip grabbing metadata.
# See gh-397 for details.
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
if not os.path.exists(os.path.join('pywt', '_extensions', '_pywt.c')):
msg = ("Cython must be installed when working with a development "
"version of PyWavelets")
raise RuntimeError(msg)
ext_modules = get_ext_modules(USE_CYTHON)
if USE_CYTHON:
ext_modules = cythonize(ext_modules, compiler_directives=cythonize_opts)
metadata['ext_modules'] = ext_modules
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.container import sync
from swift.common import utils
from swift.common.client import ClientException
utils.HASH_PATH_SUFFIX = 'endcap'
class FakeRing(object):
def __init__(self):
self.replica_count = 3
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
for x in xrange(3)]
def get_nodes(self, account, container=None, obj=None):
return 1, list(self.devs)
class FakeContainerBroker(object):
def __init__(self, path, metadata=None, info=None, deleted=False,
items_since=None):
self.db_file = path
self.metadata = metadata if metadata else {}
self.info = info if info else {}
self.deleted = deleted
self.items_since = items_since if items_since else []
self.sync_point1 = -1
self.sync_point2 = -1
def get_info(self):
return self.info
def is_deleted(self):
return self.deleted
def get_items_since(self, sync_point, limit):
if sync_point < 0:
sync_point = 0
return self.items_since[sync_point:sync_point + limit]
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.sync_point1 = sync_point1
self.sync_point2 = sync_point2
class TestContainerSync(unittest.TestCase):
def test_Iter2FileLikeObject(self):
flo = sync._Iter2FileLikeObject(iter(['123', '4567', '89', '0']))
expect = '1234567890'
got = flo.read(2)
self.assertTrue(len(got) <= 2)
self.assertEquals(got, expect[:len(got)])
expect = expect[len(got):]
got = flo.read(5)
self.assertTrue(len(got) <= 5)
self.assertEquals(got, expect[:len(got)])
expect = expect[len(got):]
self.assertEquals(flo.read(), expect)
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
flo = sync._Iter2FileLikeObject(iter(['123', '4567', '89', '0']))
self.assertEquals(flo.read(), '1234567890')
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
def test_init(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(cs.object_ring is oring)
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
# causing a report but no interval sleep, the second no report but an
# interval sleep.
time_calls = [0]
sleep_calls = []
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # Elapsed time for "under interval" (no)
3602, # Start time
3603, # Is it report time (no)
3603, # Elapsed time for "under interval" (yes)
]
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_sleep(amount):
sleep_calls.append(amount)
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit because 'path' doesn't end
# with .db
return [('path', 'device', 'partition')]
orig_time = sync.time
orig_sleep = sync.sleep
orig_audit_location_generator = sync.audit_location_generator
try:
sync.time = fake_time
sync.sleep = fake_sleep
sync.audit_location_generator = fake_audit_location_generator
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.run_forever()
except Exception, err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.sleep = orig_sleep
sync.audit_location_generator = orig_audit_location_generator
self.assertEquals(time_calls, [9])
self.assertEquals(len(sleep_calls), 2)
self.assertTrue(sleep_calls[0] <= cs.interval)
self.assertTrue(sleep_calls[1] == cs.interval - 1)
self.assertEquals(audit_location_generator_calls, [2])
self.assertEquals(cs.reported, 3602)
def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interm report.
time_calls = [0]
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # End report time
3602, # For elapsed
3602, # Start time
3603, # Is it report time (no)
3604, # End report time
3605, # For elapsed
]
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit because 'path' doesn't end
# with .db
return [('path', 'device', 'partition')]
orig_time = sync.time
orig_audit_location_generator = sync.audit_location_generator
try:
sync.time = fake_time
sync.audit_location_generator = fake_audit_location_generator
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.run_once()
self.assertEquals(time_calls, [6])
self.assertEquals(audit_location_generator_calls, [1])
self.assertEquals(cs.reported, 3602)
cs.run_once()
except Exception, err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.audit_location_generator = orig_audit_location_generator
self.assertEquals(time_calls, [10])
self.assertEquals(audit_location_generator_calls, [2])
self.assertEquals(cs.reported, 3604)
def test_container_sync_not_db(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
self.assertEquals(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
def test_container_sync_not_my_db(self):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c'})
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1000 # Match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_deleted(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c'}, deleted=False)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c'}, deleted=True)
# This complete match will not cause any more container failures
# since the broker indicates deletion
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to or x-container-sync-key
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-key
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 2)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = []
# This complete match will cause a container failure since the
# sync-to won't validate as allowed.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This complete match will succeed completely since the broker
# get_items_since will return no new rows.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 3)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_stop_at(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=['erroneous data'])
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This sync will fail since the items_since data is bad.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
# Set up fake times to make the sync short-circuit as having taken
# too long
fake_times = [
1.0, # Compute the time to move on
100000.0, # Compute if it's time to move on from first loop
100000.0] # Compute if it's time to move on from second loop
def fake_time():
return fake_times.pop(0)
sync.time = fake_time
# This same sync won't fail since it will look like it took so long
# as to be time to move on (before it ever actually tries to do
# anything).
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.time = orig_time
def test_container_first_loop(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, 1)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for full syncing, ordinal is 0
# and all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because the two sync points haven't deviated enough yet
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because container_sync_row will fail since the row has no
# 'deleted' key
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because delete_object fails
self.assertEquals(cs.container_failures, 2)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
def fake_delete_object(*args, **kwargs):
pass
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because delete_object succeeds
self.assertEquals(cs.container_failures, 2)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_second_loop(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
# We'll ensure the first loop is always skipped by keeping the two
# sync points equal
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for second loop, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
def fake_delete_object(*args, **kwargs):
pass
sync.hash_path = fake_hash_path
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because row is missing 'deleted' key
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because row now has 'deleted' key and delete_object
# succeeds
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_sync_row_delete(self):
orig_delete_object = sync.delete_object
try:
def fake_delete_object(path, name=None, headers=None, proxy=None):
self.assertEquals(path, 'http://sync/to/path')
self.assertEquals(name, 'object')
self.assertEquals(headers,
{'x-container-sync-key': 'key', 'x-timestamp': '1.2'})
self.assertEquals(proxy, 'http://proxy')
sync.delete_object = fake_delete_object
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.proxy = 'http://proxy'
# Success
self.assertTrue(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 1)
exc = []
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(Exception('test exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 1)
self.assertEquals(len(exc), 1)
self.assertEquals(str(exc[-1]), 'test exception')
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 1)
self.assertEquals(len(exc), 2)
self.assertEquals(str(exc[-1]), 'test client exception')
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(ClientException('test client exception',
http_status=404))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because the object wasn't even found
self.assertTrue(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test client exception: 404')
finally:
sync.delete_object = orig_delete_object
def test_container_sync_row_put(self):
orig_shuffle = sync.shuffle
orig_put_object = sync.put_object
orig_direct_get_object = sync.direct_get_object
try:
sync.shuffle = lambda x: x
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
self.assertEquals(sync_to, 'http://sync/to/path')
self.assertEquals(name, 'object')
self.assertEquals(headers, {'x-container-sync-key': 'key',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': 'etagvalue'})
self.assertEquals(contents.read(), 'contents')
self.assertEquals(proxy, 'http://proxy')
sync.put_object = fake_put_object
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.proxy = 'http://proxy'
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'other-header': 'other header value',
'etag': '"etagvalue"', 'x-timestamp': '1.2'},
iter('contents'))
sync.direct_get_object = fake_direct_get_object
# Success as everything says it worked
self.assertTrue(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 1)
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': '"etagvalue"'},
iter('contents'))
sync.direct_get_object = fake_direct_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
self.assertTrue(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
exc = []
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
exc.append(Exception('test exception'))
raise exc[-1]
sync.direct_get_object = fake_direct_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertEquals(len(exc), 1)
self.assertEquals(str(exc[-1]), 'test exception')
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.direct_get_object = fake_direct_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertEquals(len(exc), 4)
self.assertEquals(str(exc[-1]), 'test client exception')
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'other-header': 'other header value',
'x-timestamp': '1.2', 'etag': '"etagvalue"'},
iter('contents'))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=401)
class FakeLogger(object):
def __init__(self):
self.err = ''
self.exc = ''
def info(self, err, *args, **kwargs):
self.err = err
def exception(self, exc, *args, **kwargs):
self.exc = exc
sync.direct_get_object = fake_direct_get_object
sync.put_object = fake_put_object
cs.logger = FakeLogger()
# Fail due to 401
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertTrue(cs.logger.err.startswith('Unauth '))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertTrue(cs.logger.err.startswith('Not found '))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=503)
sync.put_object = fake_put_object
# Fail due to 503
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertTrue(cs.logger.exc.startswith('ERROR Syncing '))
finally:
sync.shuffle = orig_shuffle
sync.put_object = orig_put_object
sync.direct_get_object = orig_direct_get_object
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of14']
class action_id(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = action_id.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = action_id()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("action_id {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(action_id):
subtypes = {}
type = 65535
def __init__(self, experimenter=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_checksum(bsn):
type = 65535
experimenter = 6035143
subtype = 4
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_checksum()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_checksum {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[4] = bsn_checksum
class bsn_gentable(bsn):
type = 65535
experimenter = 6035143
subtype = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_gentable()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 5)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_gentable {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[5] = bsn_gentable
class bsn_mirror(bsn):
type = 65535
experimenter = 6035143
subtype = 1
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_mirror()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_mirror {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_mirror
class bsn_set_tunnel_dst(bsn):
type = 65535
experimenter = 6035143
subtype = 2
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_tunnel_dst()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_set_tunnel_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[2] = bsn_set_tunnel_dst
class copy_ttl_in(action_id):
type = 12
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_in()
_type = reader.read("!H")[0]
assert(_type == 12)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_in {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[12] = copy_ttl_in
class copy_ttl_out(action_id):
type = 11
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_out()
_type = reader.read("!H")[0]
assert(_type == 11)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_out {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[11] = copy_ttl_out
class dec_mpls_ttl(action_id):
type = 16
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = dec_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 16)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[16] = dec_mpls_ttl
class dec_nw_ttl(action_id):
type = 24
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = dec_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 24)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[24] = dec_nw_ttl
class group(action_id):
type = 22
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = group()
_type = reader.read("!H")[0]
assert(_type == 22)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("group {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[22] = group
class nicira(experimenter):
subtypes = {}
type = 65535
experimenter = 8992
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = nicira.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = nicira()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
obj.subtype = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("nicira {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[8992] = nicira
class nicira_dec_ttl(nicira):
type = 65535
experimenter = 8992
subtype = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_dec_ttl()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!H")[0]
assert(_subtype == 18)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("nicira_dec_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
nicira.subtypes[18] = nicira_dec_ttl
class output(action_id):
type = 0
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = output()
_type = reader.read("!H")[0]
assert(_type == 0)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("output {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[0] = output
class pop_mpls(action_id):
type = 20
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_mpls()
_type = reader.read("!H")[0]
assert(_type == 20)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[20] = pop_mpls
class pop_pbb(action_id):
type = 27
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_pbb()
_type = reader.read("!H")[0]
assert(_type == 27)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_pbb {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[27] = pop_pbb
class pop_vlan(action_id):
type = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_vlan()
_type = reader.read("!H")[0]
assert(_type == 18)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[18] = pop_vlan
class push_mpls(action_id):
type = 19
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_mpls()
_type = reader.read("!H")[0]
assert(_type == 19)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("push_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[19] = push_mpls
class push_pbb(action_id):
type = 26
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_pbb()
_type = reader.read("!H")[0]
assert(_type == 26)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("push_pbb {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[26] = push_pbb
class push_vlan(action_id):
type = 17
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_vlan()
_type = reader.read("!H")[0]
assert(_type == 17)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("push_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[17] = push_vlan
class set_field(action_id):
type = 25
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_field()
_type = reader.read("!H")[0]
assert(_type == 25)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("set_field {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[25] = set_field
class set_mpls_ttl(action_id):
type = 15
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 15)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("set_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[15] = set_mpls_ttl
class set_nw_ttl(action_id):
type = 23
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 23)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("set_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[23] = set_nw_ttl
class set_queue(action_id):
type = 21
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_queue()
_type = reader.read("!H")[0]
assert(_type == 21)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("set_queue {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action_id.subtypes[21] = set_queue
|
|
from django.test import TestCase
from chain.core.models import Unit, Metric, Device, ScalarSensor, Site, \
PresenceSensor, Person, Metadata
from chain.core.models import GeoLocation
from chain.core.api import HTTP_STATUS_SUCCESS, HTTP_STATUS_CREATED
from chain.core.hal import HALDoc
from chain.core import resources
from django.utils.timezone import now
from datetime import timedelta
import json
BASE_API_URL = '/'
ACCEPT_TAIL = 'application/xhtml+xml,application/xml;q=0.9,\
image/webp,*/*;q=0.8'
class ChainTestCase(TestCase):
# added this option so that we can disable adding the data by default
# while we're testing the influx data migration
write_scalar_data = True
def setUp(self):
self.unit = Unit(name='C')
self.unit.save()
self.temp_metric = Metric(name='temperature')
self.temp_metric.save()
self.setpoint_metric = Metric(name='setpoint')
self.setpoint_metric.save()
self.geo_locations = [
GeoLocation(elevation=50, latitude=42.847, longitude=72.917),
GeoLocation(elevation=-23.8, latitude=40.847, longitude=42.917)
]
for loc in self.geo_locations:
loc.save()
self.metadata = []
self.sites = [
Site(name='Test Site 1',
geo_location=self.geo_locations[0],
raw_zmq_stream='tcp://example.com:8372'),
Site(name='Test Site 2',
geo_location=self.geo_locations[1],
raw_zmq_stream='tcp://example.com:8172')
]
for site in self.sites:
site.save()
self.metadata.append(Metadata(key="Test",
value="Test Metadata 1",
timestamp=now().isoformat(),
content_object=site))
self.metadata.append(Metadata(key="Test",
value="Test Metadata 2",
timestamp=now().isoformat(),
content_object=site))
num_devices = 2 * len(self.sites)
self.devices = [Device(name='Thermostat %d' % i,
site=self.sites[i % len(self.sites)])
for i in range(0, num_devices)]
num_people = 2 * len(self.sites)
# self.people = [Person(first_name='John',
# last_name = 'Doe %d' % i,
# site=self.sites[i % len(self.sites)])
# for i in range(0, num_people)]
# for person in self.people:
# person.save()
self.sensors = []
for device in self.devices:
device.save()
self.metadata.append(Metadata(key="Test",
value="Test Metadata 1",
timestamp=now().isoformat(),
content_object=device))
self.metadata.append(Metadata(key="Test",
value="Test Metadata 2",
timestamp=now().isoformat(),
content_object=device))
self.sensors.append(ScalarSensor(device=device,
metric=self.temp_metric,
unit=self.unit))
self.sensors.append(ScalarSensor(device=device,
metric=self.setpoint_metric,
unit=self.unit))
self.scalar_data = []
for sensor in self.sensors:
sensor.save()
self.metadata.append(Metadata(key="Test",
value="Test Metadata 1",
timestamp=now().isoformat(),
content_object=sensor))
self.metadata.append(Metadata(key="Test",
value="Test Metadata 1",
timestamp=now().isoformat(),
content_object=sensor))
self.scalar_data.append({
'sensor': sensor,
'timestamp': now() - timedelta(minutes=2),
'value': 22.0})
self.scalar_data.append({
'sensor': sensor,
'timestamp': now() - timedelta(minutes=1),
'value': 23.0})
if self.write_scalar_data:
for data in self.scalar_data:
resources.influx_client.post_data(data['sensor'].device.site.id,
data['sensor'].device.id,
data['sensor'].id,
data['sensor'].metric,
data['value'],
data['timestamp'])
for metadata in self.metadata:
metadata.save()
def get_resource(self, url, mime_type='application/hal+json',
expect_status_code=HTTP_STATUS_SUCCESS,
check_mime_type=True,
check_vary_header=True,
should_cache=None):
accept_header = mime_type + ',' + ACCEPT_TAIL
response = self.client.get(url,
HTTP_ACCEPT=accept_header,
HTTP_HOST='localhost')
self.assertEqual(response.status_code, expect_status_code)
if check_mime_type:
self.assertEqual(response['Content-Type'], mime_type)
if check_vary_header:
# all resource responses should have the "Vary" header, which tells
# intermediate caching servers that it needs to include the Accept
# header in its cache lookup key
self.assertIn(response['Vary'], "Accept")
if should_cache is not None:
if should_cache:
self.assertIn("max-age", response['Cache-Control'])
else:
self.assertFalse(response.has_header('Cache-Control'))
if response['Content-Type'] == 'application/hal+json':
return HALDoc(json.loads(response.content))
elif response['Content-Type'] == 'application/json':
return json.loads(response.content)
else:
return response.content
def create_resource(self, url, resource):
return self.post_resource(url, resource, HTTP_STATUS_CREATED)
def update_resource(self, url, resource):
return self.post_resource(url, resource, HTTP_STATUS_SUCCESS)
def post_resource(self, url, resource, expected_status):
mime_type = 'application/hal+json'
accept_header = mime_type + ',' + ACCEPT_TAIL
response = self.client.post(url, json.dumps(resource),
content_type=mime_type,
HTTP_ACCEPT=accept_header,
HTTP_HOST='localhost')
self.assertEqual(response.status_code, expected_status)
self.assertEqual(response['Content-Type'], mime_type)
if mime_type == 'application/hal+json':
response_data = json.loads(response.content)
if isinstance(response_data, list):
return [HALDoc(d) for d in response_data]
else:
return HALDoc(response_data)
elif mime_type == 'application/json':
return json.loads(response.content)
else:
return response.content
def get_sites(self, **kwargs):
root = self.get_resource(BASE_API_URL)
sites_url = root.links['ch:sites'].href
return self.get_resource(sites_url, **kwargs)
def get_a_site(self, **kwargs):
'''GETs a site through the API for testing'''
sites = self.get_sites()
self.assertIn('items', sites.links)
self.assertIn('href', sites.links.items[0])
site_url = sites.links.items[0].href
# following the link like a good RESTful client
return self.get_resource(site_url, **kwargs)
def get_devices(self, **kwargs):
site = self.get_a_site()
return self.get_resource(site.links['ch:devices'].href, **kwargs)
# def get_a_person(self):
# site = self.get_a_site()
# people = self.get_resource(site.links['ch:people'].href)
# return self.get_resource(people.links['items'][0].href)
#
def get_a_device(self, **kwargs):
'''GETs a device through the API for testing'''
devices = self.get_devices()
return self.get_resource(devices.links.items[0].href, **kwargs)
def get_sensors(self, **kwargs):
device = self.get_a_device()
return self.get_resource(device.links['ch:sensors'].href, **kwargs)
def get_a_sensor(self, **kwargs):
sensors = self.get_sensors()
return self.get_resource(sensors.links.items[0].href, **kwargs)
def create_a_sensor_of_type(self, sensor_type):
device = self.get_a_device()
sensors = self.get_resource(device.links['ch:sensors'].href)
sensor_url = sensors.links['createForm'].href
new_sensor = {
'sensor-type': sensor_type,
'metric': 'rfid',
'unit': 'N/A',
}
return self.create_resource(sensor_url, new_sensor)
def get_a_sensor_of_type(self, sensor_type):
sensors = self.get_sensors()
for link in sensors.links.items:
sensor = self.get_resource(link.href)
if sensor['sensor-type'] == sensor_type:
return sensor
return self.create_a_sensor_of_type(sensor_type)
def get_metadata(self):
site = self.get_a_site()
return self.get_resource(site.links['ch:metadata'].href)
def get_site_device_sensor(self):
return [self.get_a_site(), self.get_a_device(), self.get_a_sensor()]
|
|
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
# resizing
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.5"
import re
import io
from PIL import Image, ImageFile, _binary
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
import sys
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def has_ghostscript():
if gs_windows_binary:
return True
if not sys.platform.startswith('win'):
import subprocess
try:
gs = subprocess.Popen(['gs', '--version'], stdout=subprocess.PIPE)
gs.stdout.read()
return True
except OSError:
# no ghostscript
pass
return False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
# Hack to support hi-res rendering
scale = int(scale) or 1
# orig_size = size
# orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
# resolution is dependent on bbox and size
res = (float((72.0 * size[0]) / (bbox[2]-bbox[0])),
float((72.0 * size[1]) / (bbox[3]-bbox[1])))
# print("Ghostscript", scale, size, orig_size, bbox, orig_bbox, res)
import os
import subprocess
import tempfile
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, 'name') and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# ignore length and offset!
# ghostscript can read it
# copy whole file to read in ghostscript
with open(infile_temp, 'wb') as f:
# fetch length of fp
fp.seek(0, 2)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100*1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
# Build ghostscript command
command = ["gs",
"-q", # quiet mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%fx%f" % res, # set input DPI (dots per inch)
"-dNOPAUSE -dSAFER", # don't pause between pages,
# safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % outfile, # output file
"-c", "%d %d translate" % (-bbox[0], -bbox[1]),
# adjust for image origin
"-f", infile, # input file
]
if gs_windows_binary is not None:
if not gs_windows_binary:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(outfile)
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except:
pass
return im
class PSFile(object):
"""
Wrapper for bytesio object that treats either CR or LF as end of line.
"""
def __init__(self, fp):
self.fp = fp
self.char = None
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def readline(self):
s = self.char or b""
self.char = None
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
self.char = self.fp.read(1)
# line endings can be 1 or 2 of \r \n, in either order
if self.char in b"\r\n":
self.char = None
return s.decode('latin-1')
def _accept(prefix):
return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
mode_map = {1: "L", 2: "LAB", 3: "RGB"}
def _open(self):
(length, offset) = self._find_offset(self.fp)
# Rewrap the open file pointer in something that will
# convert line endings and decode to latin-1.
try:
if bytes is str:
# Python2, no encoding conversion necessary
fp = open(self.fp.name, "Ur")
else:
# Python3, can use bare open command.
fp = open(self.fp.name, "Ur", encoding='latin-1')
except:
# Expect this for bytesio/stringio
fp = PSFile(self.fp)
# go to offset - start of "%!PS"
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline().strip('\r\n')
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(i)) for i in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0, 0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline().strip('\r\n')
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[:1] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[:11] == "%ImageData:":
# Encoded bitmapped image.
x, y, bi, mo = s[11:].split(None, 7)[:4]
if int(bi) != 8:
break
try:
self.mode = self.mode_map[int(mo)]
except:
break
self.size = int(x), int(y)
return
s = fp.readline().strip('\r\n')
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def _find_offset(self, fp):
s = fp.read(160)
if s[:4] == b"%!PS":
# for HEAD without binary preview
fp.seek(0, 2)
length = fp.tell()
offset = 0
elif i32(s[0:4]) == 0xC6D3D0C5:
# FIX for: Some EPS file not handled correctly / issue #302
# EPS can contain binary data
# or start directly with latin coding
# more info see:
# http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
offset = i32(s[4:8])
length = i32(s[8:12])
else:
raise SyntaxError("not an EPS file")
return (length, offset)
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def load_seek(self, *args, **kwargs):
# we can't incrementally load, so force ImageFile.parser to
# use our custom load method by defining this method.
pass
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream(object):
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
fp = NoCloseStream(fp)
if sys.version_info[0] > 2:
fp = io.TextIOWrapper(fp, encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
|
|
# Copyright 2016 Infinite Connection
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.shortcuts import render, get_object_or_404, redirect
from django_tables2 import RequestConfig
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
import datetime
from .models import *
from users.models import *
from .forms import *
import random
import bleach
from urllib.parse import urlparse
from users.common import sanitize_html, _render, has_access_topic, notify
def home(request):
tags = Tag.objects.filter(main=True)
for tag in tags:
messages = Message.objects.filter(topic__tags__id__exact=tag.id).order_by('posted').reverse()
if messages:
tag.last_message = messages[0]
else:
tag.last_message = None
return _render(request, 'forum/home.html', {
'tags': tags,
})
def tag(request, pk, page=1):
tag = get_object_or_404(Tag.objects.all(), pk=pk)
post_it_topics = Topic.objects.filter(tags__id__exact=tag.id, post_it=True).order_by('created')
regular_topics = Topic.objects.filter(tags__id__exact=tag.id, post_it=False).order_by('created').reverse()
if tag.only_for.all():
allowed = False
for allowed_group in tag.only_for.all():
if allowed_group in request.user.groups.all():
allowed = True
if not allowed:
return redirect(home)
# Determine which page to display and how many pages exist in tag
page = int(page)
num_pages = len(regular_topics) // 10 + 1
# Select the topics standing on the right page
try:
display_topics = regular_topics[(page-1)*10:page*10]
except:
try:
display_topics = regular_topics[(page-1)*10:]
except:
display_topics = []
def set_last_message(topic_set):
for topic in topic_set:
messages = Message.objects.filter(topic=topic).order_by('posted').reverse()
if messages:
topic.last_message = messages[0]
else:
topic.last_message = None
set_last_message(post_it_topics)
set_last_message(regular_topics)
return _render(request, 'forum/tag.html', {
'tag': tag,
'post_it_topics': post_it_topics,
'regular_topics': display_topics,
'page': page,
'num_pages': range(1, num_pages + 1),
})
def mail(request, page=1):
user = request.user
if user.is_anonymous():
return redirect(home)
profile = get_object_or_404(Profile.objects.all(), user=user)
topics = Topic.objects.filter(private_viewers__id=user.id).order_by('updated').reverse()
# Determine which page to display and how many pages exist in tag
page = int(page)
num_pages = len(topics) // 10 + 1
# Select the topics standing on the right page
try:
display_topics = topics[(page-1)*10:page*10]
except:
try:
display_topics = topics[(page-1)*10:]
except:
display_topics = []
return _render(request, 'forum/mail.html', {
'user': user,
'profile': profile,
'topics': display_topics,
'page': page,
'num_pages': range(1, num_pages + 1),
})
def new_mail(request, pk=0):
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
form.instance.author = request.user
form.instance.save()
first_message = Message(topic=form.instance, author=request.user, content=sanitize_html(form.cleaned_data['first_message']))
first_message.save()
for viewer in form.cleaned_data['private_viewers']:
form.instance.private_viewers.add(viewer)
if not request.user in form.cleaned_data['private_viewers']:
form.instance.private_viewers.add(request.user)
notify('mail_new_mail', form.instance.pk, 1, request.user)
return redirect(topic, pk=form.instance.pk)
else:
if pk != 0:
form = MailForm({'private_viewers': pk})
else:
form = MailForm()
return _render(request, 'forum/new_topic.html', {
'form': form,
})
def new_topic(request):
if request.method == 'POST':
form = TopicForm(request.POST)
if form.is_valid():
form.instance.author = request.user
form.instance.save()
first_message = Message(topic=form.instance, author=request.user, content=sanitize_html(form.cleaned_data['first_message']))
first_message.save()
for tag in form.cleaned_data['tags']:
form.instance.tags.add(tag)
notify('topic_new_topic', form.instance.pk, 1, request.user)
return redirect(topic, pk=form.instance.pk)
else:
form = TopicForm()
return _render(request, 'forum/new_topic.html', {
'form': form,
})
def topic(request, pk, edit_message=None, page=1):
# Get topic
topic = get_object_or_404(Topic.objects.all(), pk=pk)
# Get tags and private viewers
tags = topic.tags.all()
private_viewers = topic.private_viewers.all()
# Check that signed in user has access to topic
if not has_access_topic(topic, request.user):
return redirect(home)
# Get messages in topic
messages = Message.objects.filter(topic=topic).order_by('posted')
# If given a message id to edit, determine on which page it stands
if edit_message:
msgid = int(edit_message)
pos = 0
for message in messages:
if message.pk == msgid:
break
pos += 1
page = pos // 10 + 1
# Determine which page to display and how many pages exist in topic
page = int(page)
num_pages = len(messages) // 10 + 1
# Select the messages standing on the right page
try:
display_messages = messages[(page-1)*10:page*10]
except:
try:
display_messages = messages[(page-1)*10:]
except:
display_messages = []
# Determine if 'edited' date must be shown for each message
for msg in display_messages:
delta = msg.edited - msg.posted if msg.edited > msg.posted else msg.posted - msg.edited
msg.show_edited = delta >= datetime.timedelta(seconds=1)
# Some initialization
edit_message_pk = None
edit_message_form = None
# Prepare new message form
new_message_form = NewMessageForm()
if request.method == 'GET':
if edit_message:
# If an edit link has been clicked, prepare edit message form and remove new message form
edit_message = messages.filter(pk=edit_message)
if edit_message:
edit_message = edit_message[0]
if edit_message.author == request.user or request.user.has_perm('forum.edit_not_owned_message'):
edit_message_pk = edit_message.pk
edit_message_form = EditMessageForm({'content': edit_message.content})
new_message_form = None
else:
if 'edit_message_pk' in request.POST:
# Edit message form submitted
edit_message_form = EditMessageForm(request.POST)
if edit_message_form.is_valid() and not topic.closed:
message = messages.filter(pk=request.POST['edit_message_pk'])
if message:
message = message[0]
if message.author == request.user or request.user.has_perm('forum.edit_not_owned_message'):
message.content = sanitize_html(edit_message_form.cleaned_data['content'])
message.edited = datetime.datetime.now()
message.save()
edit_message_form = None
if topic.private_viewers.all():
notify('mail_new_or_edited_message', pk, page, request.user)
else:
notify('topic_new_or_edited_message', pk, page, request.user)
return redirect('topic', pk=topic.pk, page=request.POST['page'])
else:
# New message form submitted
new_message_form = NewMessageForm(request.POST)
if new_message_form.is_valid() and not topic.closed:
new_message_form.instance.author = request.user
new_message_form.instance.topic = topic
new_message_form.instance.content = sanitize_html(new_message_form.instance.content)
new_message_form.instance.save()
if topic.private_viewers.all():
notify('mail_new_or_edited_message', pk, page, request.user)
else:
notify('topic_new_or_edited_message', pk, page, request.user)
new_message_form = NewMessageForm()
return redirect('topic', pk=topic.pk, page=request.POST['page'])
# Render
return _render(request, 'forum/topic.html', {
'topic': topic,
'tags': tags,
'private_viewers': private_viewers,
'messages': display_messages,
'page': page,
'num_pages': range(1, num_pages + 1),
'new_message_form': new_message_form,
'edit_message_pk': edit_message_pk,
'edit_message_form': edit_message_form,
'signed_in_user_can_edit_all': request.user.has_perm('forum.edit_not_owned_message'),
})
|
|
import importlib
import inspect
import sys
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.utils.functional import Promise
from blessings import Terminal
from cbv.models import Project, ProjectVersion, Module, Klass, Inheritance, KlassAttribute, ModuleAttribute, Method
t = Terminal()
class LazyAttribute(object):
functions = {
'gettext': 'gettext_lazy',
'reverse': 'reverse_lazy',
'ugettext': 'ugettext_lazy',
}
def __init__(self, promise):
func, self.args, self.kwargs, _ = promise.__reduce__()[1]
try:
self.lazy_func = self.functions[func.__name__]
except KeyError:
msg = f"'{func.__name__}' not in known lazily called functions"
raise ImproperlyConfigured(msg)
def __repr__(self):
arguments = []
for arg in self.args:
if isinstance(arg, str):
arguments.append(f"'{arg}'")
else:
arguments.append(arg)
for key, value in self.kwargs:
if isinstance(key, str):
key = f"'{key}'"
if isinstance(value, str):
value = f"'{value}'"
arguments.append(f"{key}: {value}")
func = self.lazy_func
arguments = ', '.join(arguments)
return f'{func}({arguments})'
class Command(BaseCommand):
args = ''
help = 'Wipes and populates the CBV inspection models.'
banned_attr_names = (
'__all__',
'__builtins__',
'__class__',
'__dict__',
'__doc__',
'__file__',
'__module__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__weakref__',
)
def handle(self, *args, **options):
# Delete ALL of the things.
ProjectVersion.objects.filter(
project__name__iexact='Django',
version_number=django.get_version(),
).delete()
Inheritance.objects.filter(
parent__module__project_version__project__name__iexact='Django',
parent__module__project_version__version_number=django.get_version(),
).delete()
# Setup Project
self.project_version = ProjectVersion.objects.create(
project=Project.objects.get_or_create(name='Django')[0],
version_number=django.get_version(),
)
self.klasses = {}
self.attributes = {}
self.klass_imports = {}
# Set sources appropriate to this version
self.sources = []
for source in settings.CBV_SOURCES.keys():
try:
self.sources.append(importlib.import_module(source))
except ImportError:
pass
print(t.red('Tree traversal'))
for source in self.sources:
self.process_member(source, source.__name__)
self.create_inheritance()
self.create_attributes()
def ok_to_add_module(self, member, parent):
if member.__package__ is None or not any((member.__name__.startswith(source.__name__) for source in self.sources)):
return False
return True
def ok_to_add_klass(self, member, parent):
if any((member.__name__.startswith(source.__name__) for source in self.sources)): # TODO: why?
return False
try:
if inspect.getsourcefile(member) != inspect.getsourcefile(parent):
if parent.__name__ in member.__module__:
self.add_new_import_path(member, parent)
return False
except TypeError:
return False
return True
def ok_to_add_method(self, member, parent):
if inspect.getsourcefile(member) != inspect.getsourcefile(parent):
return False
if not inspect.isclass(parent):
msg = 'def {}(...): IGNORED because {} is not a class.'.format(
member.__name__,
parent.__name__,
)
print(t.red(msg))
return False
# Use line inspection to work out whether the method is defined on this
# klass. Possibly not the best way, but I can't think of another atm.
lines, start_line = inspect.getsourcelines(member)
parent_lines, parent_start_line = inspect.getsourcelines(parent)
if start_line < parent_start_line or start_line > parent_start_line + len(parent_lines):
return False
return True
def ok_to_add_attribute(self, member, member_name, parent):
if inspect.isclass(parent) and member in object.__dict__.values():
return False
if member_name in self.banned_attr_names:
return False
return True
ok_to_add_klass_attribute = ok_to_add_module_attribute = ok_to_add_attribute
def get_code(self, member):
# Strip unneeded whitespace from beginning of code lines
lines, start_line = inspect.getsourcelines(member)
whitespace = len(lines[0]) - len(lines[0].lstrip())
for i, line in enumerate(lines):
lines[i] = line[whitespace:]
# Join code lines into one string
code = ''.join(lines)
# Get the method arguments
arguments = inspect.formatargspec(*inspect.getfullargspec(member))
return code, arguments, start_line
def get_docstring(self, member):
return inspect.getdoc(member) or ''
def get_value(self, member):
return f"'{member}'" if isinstance(member, str) else str(member)
def get_filename(self, member):
# Get full file name
filename = inspect.getfile(member)
# Find the system path it's in
sys_folder = max([p for p in sys.path if p in filename], key=len)
# Get the part of the file name after the folder on the system path.
filename = filename[len(sys_folder):]
# Replace `.pyc` file extensions with `.py`
if filename[-4:] == '.pyc':
filename = filename[:-1]
return filename
def get_line_number(self, member):
try:
return inspect.getsourcelines(member)[1]
except TypeError:
return -1
def add_new_import_path(self, member, parent):
import_path = parent.__name__
try:
current_import_path = self.klass_imports[member]
except KeyError:
self.klass_imports[member] = parent.__name__
else:
self.update_shortest_import_path(member, current_import_path, import_path)
try:
existing_member = Klass.objects.get(
module__project_version__project__name__iexact='Django',
module__project_version__version_number=django.get_version(),
name=member.__name__)
except Klass.DoesNotExist:
return
if self.update_shortest_import_path(member, existing_member.import_path, import_path):
existing_member.import_path = import_path
existing_member.save()
def update_shortest_import_path(self, member, current_import_path, new_import_path):
new_length = len(new_import_path.split('.'))
current_length = len(current_import_path.split('.'))
if new_length < current_length:
self.klass_imports[member] = new_import_path
return True
return False
def process_member(self, member, member_name, parent=None, parent_node=None):
# BUILTIN
if inspect.isbuiltin(member):
return
# MODULE
if inspect.ismodule(member):
# Only traverse under hierarchy
if not self.ok_to_add_module(member, parent):
return
filename = self.get_filename(member)
print(t.yellow('module ' + member.__name__), filename)
# Create Module object
this_node = Module.objects.create(
project_version=self.project_version,
name=member.__name__,
docstring=self.get_docstring(member),
filename=filename
)
go_deeper = True
# CLASS
elif inspect.isclass(member) and inspect.ismodule(parent):
if not self.ok_to_add_klass(member, parent):
return
self.add_new_import_path(member, parent)
import_path = self.klass_imports[member]
start_line = self.get_line_number(member)
print(t.green('class ' + member_name), start_line)
this_node = Klass.objects.create(
module=parent_node,
name=member_name,
docstring=self.get_docstring(member),
line_number=start_line,
import_path=import_path
)
self.klasses[member] = this_node
go_deeper = True
# METHOD
elif inspect.ismethod(member) or inspect.isfunction(member):
decorated = False
# py2 decoration
if hasattr(member, 'func'):
member = member.func
decorated = True
if hasattr(member, 'im_func') and getattr(member.im_func, 'func_closure', None):
member = member.im_func
decorated = True
while getattr(member, 'func_closure', None):
member = member.func_closure[-1].cell_contents
decorated = True
# py3 decoration
while getattr(member, '__wrapped__', None):
member = member.__wrapped__
decorated = True
# Checks
if not self.ok_to_add_method(member, parent):
return
print(' def ' + member_name)
code, arguments, start_line = self.get_code(member)
# Make the Method
this_node = Method.objects.create(
klass=parent_node,
name=member_name,
docstring=self.get_docstring(member),
code=code,
kwargs=arguments[1:-1],
line_number=start_line,
)
go_deeper = False
# (Class) ATTRIBUTE
elif inspect.isclass(parent):
# Replace lazy function call with an object representing it
if isinstance(member, Promise):
member = LazyAttribute(member)
if not self.ok_to_add_klass_attribute(member, member_name, parent):
return
value = self.get_value(member)
attr = (member_name, value)
start_line = self.get_line_number(member)
try:
self.attributes[attr] += [(parent_node, start_line)]
except KeyError:
self.attributes[attr] = [(parent_node, start_line)]
print(' {key} = {val}'.format(key=attr[0], val=attr[1]))
go_deeper = False
# (Module) ATTRIBUTE
elif inspect.ismodule(parent):
if not self.ok_to_add_module_attribute(member, member_name, parent):
return
start_line = self.get_line_number(member)
this_node = ModuleAttribute.objects.create(
module=parent_node,
name=member_name,
value=self.get_value(member),
line_number=start_line,
)
print('{key} = {val}'.format(key=this_node.name, val=this_node.value))
go_deeper = False
# INSPECTION. We have to go deeper ;)
if go_deeper:
# Go through members
for submember_name, submember_type in inspect.getmembers(member):
self.process_member(
member=submember_type,
member_name=submember_name,
parent=member,
parent_node=this_node
)
def create_inheritance(self):
print('')
print(t.red('Inheritance'))
for klass, representation in self.klasses.items():
print('')
print(t.green(representation.__str__()), end=' ')
direct_ancestors = inspect.getclasstree([klass])[-1][0][1]
for i, ancestor in enumerate(direct_ancestors):
if ancestor in self.klasses:
print('.', end=' ')
Inheritance.objects.create(
parent=self.klasses[ancestor],
child=representation,
order=i
)
print('')
def create_attributes(self):
print('')
print(t.red('Attributes'))
# Go over each name/value pair to create KlassAttributes
for name_and_value, klasses in self.attributes.items():
# Find all the descendants of each Klass.
descendants = set()
for klass, start_line in klasses:
for child in klass.get_all_children():
descendants.add(child)
# By removing descendants from klasses, we leave behind the
# klass(s) where the value was defined.
remaining_klasses = [k_and_l for k_and_l in klasses if k_and_l[0] not in descendants]
# Now we can create the KlassAttributes
name, value = name_and_value
for klass, line in remaining_klasses:
KlassAttribute.objects.create(
klass=klass,
line_number=line,
name=name,
value=value
)
print(f'{klass}: {name} = {value}')
|
|
#!/usr/bin/env python
"""
calc_pot_alignment.py Jeff Doak jeff.w.doak@gmail.com
Calculates the electrostatic alignment between defected and perfect-crystal
supercells. (This script is newer/better than calc_pot_alignment.py).
Possible command line options:
calc_pot_alignment.py <path to defect dir> <path to host dir> <atom #>
calc_pot_alignment.py <def dir> <host dir> <atom #> <list of atom type names>
calc_pot_alignment.py <def dir> <host dir> <dummy #> <name list> <defect atom position>
calc_pot_alignment.py <def dir> <host dir> <dummy #> <name list>
<defect atom position> <corresponding host position>
calc_pot_alignment.py <def dir> <host dir> <dummy #> <name list>
<defect atom position> <corresponding host position> quiet
calc_pot_alignment.py <def dir> <host dir> <dummy #> <name list>
<defect atom position> <corresponding host position> switch quiet
Here, <path to defect dir> and <path to host dir> are the relative paths from
the current directory to the directories containing the defect and host
crystal calculations, respectively; <atom #> is the index of the defect atom's
position in the list of atoms in the defect crystal POSCAR; <list of atom type
names> is a space-separated list of the element symbols (or any string) with
one element for each atom type in the defect crystal POSCAR; <dummy #> is any
integer (this parameter is not used when defect atom positions are given
explicitly); <defect atom position> is a space-separated list of the x, y,
and z coordinates of the defect atom's (or vacancy's) position in the defect
crystal, given in direct coordinates; and <corresponding host position> is a
space-separated list of x, y, and z coordinates (in direct coordinates of the
host crystal) for the position in the host POSCAR which corresponds to the
position of the defect in the defect crystal.
The fourth command is useful when the atom positions in the defect
crystal have been shifted relative to the host crystal to place the defect at
the origin of the defect POSCAR.
The fifth command suppresses all output except for the electrostatic potential
alignment. This is useful for automated calculations.
The sixth command is rarely useful. It moves the defect atom from the bottom
of the list of atoms in the defect poscar or vice versa.
in the same order.
"""
import re
import sys
import numpy as np
from unitcell import UnitCell
def get_el_pots(outcar_name, n_atoms):
"""
Read in atom-averaged electrostatic potentials from an OUTCAR file.
Parameters
----------
outcar_name : str
Path to OUTCAR file.
n_atoms : int
Number of atoms in the calculation corresponding to the OUTCAR file.
Returns
-------
charged_pots : numpy array
Array of electrostatic potentials averaged around each atom (units of
eV).
np.shape(charged_pots) == (n_atoms,)
"""
with open(outcar_name, 'r') as outcar:
lines = outcar.readlines()
charged_pots = np.zeros(n_atoms)
for i in range(len(lines)):
if lines[i].startswith(" (the norm of the test charge is"):
j = 1
k = 0
while k < n_atoms:
line = lines[i+j].split()
while len(line) > 0:
temp = line.pop(0)
try:
k = int(temp)
except ValueError:
el_str = str(k+1)+r"([-][0-9][0-9]*[.][0-9][0-9]*)"
el_reg = re.compile(el_str)
k += 1
charged_pots[k-1] = float(el_reg.match(temp).group(1))
else:
charged_pots[k-1] = float(line.pop(0))
j += 1
break
return charged_pots
# Set up input file names
chargedpos = str(sys.argv[1])+"POSCAR"
neutralpos = str(sys.argv[2])+"POSCAR"
chargedout = str(sys.argv[1])+"OUTCAR"
neutralout = str(sys.argv[2])+"OUTCAR"
# number of defect atom in poscar is given as optional 3rd argument, if
# the defect has a position different from 0, 0, 0
# Read in POSCAR of charged calculation
poscar = UnitCell(chargedpos)
if len(sys.argv) > 3:
center = poscar.atom_positions[int(sys.argv[3])-1]
else:
center = np.array([0.0, 0.0, 0.0])
neut_center = np.array(center)
if len(sys.argv) > 3+poscar.num_atom_types:
names = [str(i) for i in sys.argv[4:4+poscar.num_atom_types]]
poscar.set_atom_names(names)
if len(sys.argv) > 4+poscar.num_atom_types:
center = [float(i) for i in
sys.argv[4+poscar.num_atom_types:7+poscar.num_atom_types]]
neut_center = np.array(center)
if len(sys.argv) > 7+poscar.num_atom_types:
neut_center = [float(i) for i in
sys.argv[7+poscar.num_atom_types:10+poscar.num_atom_types]]
poscar.convention = "D"
poscar.shift(0.5-center[0], 0.5-center[1], 0.5-center[2], "D")
poscar.in_cell()
poscar.scale = 1.0
poscar.convention = "C"
# Move defect atom to top of list if it isn't first and the switch flag is
# used
if str(sys.argv[-2]) == "switch":
if int(sys.argv[3]) == 1:
temp_pos = np.delete(poscar.atom_positions, int(sys.argv[3])-1, 0)
temp_pos = np.insert(temp_pos,
len(poscar.atom_positions)-1,
poscar.atom_positions[int(sys.argv[3])-1],
0,
)
poscar.atom_positions = temp_pos
elif int(sys.argv[3]) == len(poscar.atom_positions):
temp_pos = np.delete(poscar.atom_positions, int(sys.argv[3])-1, 0)
temp_pos = np.insert(temp_pos, 0, poscar.atom_positions[int(sys.argv[3])-1], 0)
poscar.atom_positions = temp_pos
# Read in POSCAR of neutral calculation
perfect = UnitCell(neutralpos)
perfect.convention = "D"
perfect.shift(0.5-neut_center[0], 0.5-neut_center[1], 0.5-neut_center[2], "D")
perfect.in_cell()
perfect.scale = 1.0
perfect.convention = "C"
# Adjust center to center of cell in cartesian coordinates
center = np.dot(poscar.cell_vec.transpose(), np.array([0.5, 0.5, 0.5]))
neut_center = np.dot(perfect.cell_vec.transpose(), np.array([0.5, 0.5, 0.5]))
# Read in OUTCAR of charged calculation
charged_pots = get_el_pots(chargedout, poscar.num_atoms)
# Reorder electrostatic potentials if defect atom isn't the first, and the
# switch flag is used
if str(sys.argv[-2]) == "switch":
if int(sys.argv[3]) == 1:
temp_pots = np.delete(charged_pots, int(sys.argv[3])-1)
temp_pots = np.insert(temp_pots, len(charged_pots)-1, charged_pots[int(sys.argv[3])-1])
charged_pots = temp_pots
elif int(sys.argv[3]) == len(poscar.atom_positions):
temp_pots = np.delete(charged_pots, int(sys.argv[3])-1)
temp_pots = np.insert(temp_pots, 0, charged_pots[int(sys.argv[3])-1])
charged_pots = temp_pots
# Read in OUTCAR of neutral calculation
neutral_pots = get_el_pots(neutralout, perfect.num_atoms)
def list_fit(index, list1, list2):
"""
Evaluate the fitness function for 2 lists off by 1 element. Assume
len(list2) > len(list1).
"""
list2 = np.delete(list2, index, 0)
return np.linalg.norm(list2-list1)
def list_fit_2(i1, i2, list1, list2):
list2 = np.delete(list2, [i1, i2], 0)
return np.linalg.norm(list2-list1)
# Find missing atom in defect/host cell
insert_index = []
if len(poscar.atom_positions)+1 == len(perfect.atom_positions):
# There is a vacancy defect
for i in range(len(neutral_pots)):
insert_index.append(list_fit(i, charged_pots, neutral_pots))
neutral_pots = np.delete(neutral_pots, np.argsort(insert_index)[0])
perfect.atom_positions = np.delete(perfect.atom_positions, np.argsort(insert_index)[0], 0)
elif len(poscar.atom_positions) == len(perfect.atom_positions)+1:
# There is an interstitial defect
for i in range(len(charged_pots)):
insert_index.append(list_fit(i, neutral_pots, charged_pots))
charged_pots = np.delete(charged_pots, np.argsort(insert_index)[0])
poscar.atom_positions = np.delete(poscar.atom_positions, np.argsort(insert_index)[0], 0)
elif len(poscar.atom_positions)+2 == len(perfect.atom_positions):
# There is a multi-vacancy defect
for i in range(len(neutral_pots)-1):
insert_index.append([])
for j in range(i+1, len(neutral_pots)):
insert_index[i].append(list_fit_2(i, j, charged_pots, neutral_pots))
min_index = np.argmin(np.array(insert_index).flatten())
min_indices = np.unravel_index(min_index, (len(insert_index), len(insert_index[0])))
#print min_indices[0]
#print min_index, min_indices, insert_index[min_indices[0]][min_indices[1]]
#print perfect.atom_positions[min_indices[0]]-perfect.atom_positions[min_indices[1]]
neutral_pots = np.delete(neutral_pots, min_indices)
perfect.atom_positions = np.delete(perfect.atom_positions, min_indices, 0)
delta_pots = charged_pots - neutral_pots
# Average electrostatic potential difference outside 1/2 def-def distance
r_min = np.linalg.norm(poscar.cell_vec[0])/2. #huge assumption that the cell is cubic
radii = [np.linalg.norm(i-center) for i in poscar.atom_positions]
sphere_pots = []
for i in range(len(radii)):
if radii[i] >= r_min:
sphere_pots.append(delta_pots[i])
# Print average electrostatic potential difference between defect and host cells
# as a function of distance away from the defect
if str(sys.argv[-1]) != "quiet":
sorted_radii = np.sort(radii)
sorted_pots = [delta_pots[i] for i in np.argsort(radii)]
sorted_names = [poscar.atom_names[i] for i in np.argsort(radii)]
for i in range(len(sorted_radii)):
print sorted_radii[i], sorted_pots[i], sorted_names[i]
# Print cell-averaged electrostatic potential difference
if str(sys.argv[-1]) != "quiet":
print ""
print "Average over atoms outside radius", r_min, "A centered around defect"
print "$\Delta V_{el} (eV)$ &", "# atoms in average &", "Std. Dev. (eV)"
print np.mean(sphere_pots), len(sphere_pots), np.std(sphere_pots)
else:
print np.mean(sphere_pots)
exit()
|
|
#!/usr/bin/env python
'''
PypeR (PYthon-piPE-R)
PypeR is free software subjected to the GPL license 3.0. and comes with
ABSOLUTELY NO WARRANT. This package provides a light-weight interface to use R
in Python by pipe. It can be used on multiple platforms since it is written in
pure python.
Prerequisites:
1. Python 2.3 or later is required.
Usage:
The usage of this packages is very simple. Examples are presented in the
file "test.py" in the distribution package.
PypeR provide a class "R" to wrap the R language. An instance of the R
class is used to manage an R process. Different instances can use different
R installations. On POSIX systems (including the Cygwin environment on
Windows), it is even possible to use an R installed on a remote computer.
Basicly, there are four ways to use an instance of the R class.
1. Use the methods of the instance
methods include:
run:This method is used to pass an R command string to the R process,
the return value is a string - the standard output from R. Note
that the return value usually includes the R expression (a
series of R codes) themselves and the output of the R
expression. If the real result value is wanted, use the
function "get" instead.
assign: Assign a value to an R variable. No return value.
get: Get the result of an R expression.
remove: Remove a R variable.
2. Call the instance as a function
The instance is callable. If called as a function, it behaves just
same as its "run" method.
3. Use the instance as a Python dictionary
The instance can mimic some operations on a python dictionary,
typically, to assign values to R variables, to retrieve values for any
R expression, or delete an R variable. These two operations do same
jobs as the methods "assign", "get", and "remove".
4. Access R variables as if they are the attributes of the instance.
If the variable name cannot be found in the instance or its class, the
instance will try to get/set/remove it in R. This way is similar to 3,
but with more limitations, e.g., the R variable name cannot contain any
DOT (.)
Considering that any code block in R is an expression, the "get" method (or
the form of retrieving values from a dictionary) can be used to run a
number of R commands with the final result returned.
Note that PypeR do NOT validate/convert a variable name when pass it to R.
If a variable name with a leading underscore ("_"), although it legal in
python, is passed to R, an RError will be raised.
Conversions:
Python -> R
None -> NULL, NaN -> NaN, Inf -> Inf
R -> Python (numpy)
NULL -> None, NA -> None, NaN -> None (NaN), Inf -> None (Inf)
DEBUG model:
Since the child process (R) can be easily killed by any ocassional error in
the codes passed to it, PypeR is set to "DEBUG" model by default. This
means that any code blocks send to R will be wrapped in the function
"try()", which will prevent R from crashing. To disable the "DEBUG" model,
the user can simple set the variable "_DEBUG_MODE" in the R class or in its
instance to False.
To model the behavior of the "get" method of a Python dictionary, the
method "get" allows wild values for variables that does not exists in R.
Then the R expression will always be wrapped in "try()" to avoid R crashing
if the method "get" is called.
'''
# the module "subprocess" requires Python 2.4
import os
import sys
import time
import re
import tempfile
from types import *
__version__ = '1.1.2'
if sys.version < '2.3': # actually python >= 2.3 is required by tempfile.mkstemp used in this module !!!
set = frozenset = tuple
basestring = str
elif sys.version < '2.4':
from sets import Set as set, ImmutableSet as frozenset
if sys.version < '3.0':
_mystr = _mybytes = lambda s: s
_in_py3 = False
else:
from functools import reduce
long, basestring, unicode = int, str, str
_mybytes = lambda s: bytes(s, 'utf8') # 'ascii')
_mystr = lambda s: str(s, 'utf8')
_in_py3 = True
try:
import pandas
has_pandas = True
except:
has_pandas = False
try:
import numpy
has_numpy = True
except:
has_numpy = False
_has_subp = False
if sys.platform == 'cli': # for IronPython
from System.Diagnostics import Process
PIPE, _STDOUT = None, None
def Popen(CMD, *a, **b):
'''
CMD is a list - a command and its arguments
'''
p = Process()
p.StartInfo.UseShellExecute = False
p.StartInfo.RedirectStandardInput = True
p.StartInfo.RedirectStandardOutput = True
p.StartInfo.RedirectStandardError = True
p.StartInfo.FileName = CMD[0]
p.StartInfo.Arguments = ' '.join(CMD[1:])
p.Start()
return(p)
def sendAll(p, s):
# remove ending newline since WriteLine will add newline at the end of s!
if s.endswith('\r\n'):
s = s[:-2]
elif s.endswith('\n'):
s = s[:-1]
p.StandardInput.WriteLine(_mybytes(s))
def readLine(p, dump_stdout=False, *a, **b):
rv = _mystr(p.StandardOutput.ReadLine()) + '\n' # add newline since ReadLine removed it.
if dump_stdout:
sys.stdout.write(rv)
sys.stdout.flush()
return(rv)
else:
try:
import subprocess
_has_subp = True
Popen, PIPE, _STDOUT = subprocess.Popen, subprocess.PIPE, subprocess.STDOUT
except: # Python 2.3 or older
PIPE, _STDOUT = None, None
def Popen(CMD, *a, **b):
class A:
None
p = A()
p.stdin, p.stdout = os.popen4(' '.join(CMD))
return(p)
def sendAll(p, s):
p.stdin.write(_mybytes(s))
#os.write(p.stdin.fileno(), s)
p.stdin.flush()
def readLine(p, dump_stdout=False, *a, **b):
rv = _mystr(p.stdout.readline())
if dump_stdout:
sys.stdout.write(rv)
sys.stdout.flush()
return(rv)
def NoneStr(obj): return('NULL')
def BoolStr(obj):
return(obj and 'TRUE' or 'FALSE')
def ReprStr(obj):
return(repr(obj))
if has_numpy:
def FloatStr(f):
if f is numpy.NaN or f is numpy.nan:
return('NaN') # or 'NA'
if has_pandas and pandas.isnull(f):
return('NaN')
if numpy.isposinf(f):
return('Inf')
if numpy.isneginf(f):
return('-Inf')
return(repr(f))
else:
FloatStr = repr
def LongStr(obj):
rv = repr(obj)
if rv[-1] == 'L':
rv = rv[:-1]
return(rv)
def ComplexStr(obj):
return(repr(obj).replace('j', 'i'))
def UniStr(obj):
return(repr(obj.encode('utf8')))
def ByteStr(obj):
return(repr(obj)[1:])
#return obj.decode()
def SeqStr(obj, head='c(', tail=')', enclose=True):
if not enclose: # don't add head and tail
return(','.join(map(Str4R, obj)))
if not obj:
return(head + tail)
# detect types
if isinstance(obj, set):
obj = list(obj)
obj0 = obj[0]
tp0 = type(obj0)
simple_types = [str, bool, int, long, float, complex]
num_types = [int, long, float, complex]
is_int = tp0 in (int, long) # token for explicit converstion to integer in R since R treat an integer from stdin as double
if tp0 not in simple_types:
head = 'list('
else:
tps = isinstance(obj0, basestring) and [StringType] or isinstance(obj0, bool) and [BooleanType] or num_types
for i in obj[1:]:
tp = type(i)
if tp not in tps:
head = 'list('
is_int = False
break
elif is_int and tp not in (int, long):
is_int = False
# convert
return((is_int and 'as.integer(' or '') + head + ','.join(map(Str4R, obj)) + tail + (is_int and ')' or ''))
def DictStr(obj):
return('list(' + ','.join(['%s=%s' % (Str4R(a[0]), Str4R(a[1])) for a in obj.items()]) + ')')
# 'b':boolean, 'i':integer, 'u':unsigned int, 'f':float, c complex-float
# 'S'/'a':string, 'U':unicode, 'V':raw data. 'O':string?
_tpdic = {'i':'as.integer(c(%s))', 'u':'as.integer(c(%s))', 'f':'as.double(c(%s))', 'c':'as.complex(c(%s))',
'b':'c(%s)', 'S':'c(%s)', 'a':'c(%s)', 'U':'c(%s)', 'V':'list(%s)', 'O':'as.character(c(%s))'}
def getVec(ary):
# used for objects from numpy and pandas
tp = ary.dtype.kind
if len(ary.shape) > 1:
ary = ary.reshape(reduce(lambda a,b=1: a*b, ary.shape))
ary = ary.tolist()
if tp != 'V':
return(_tpdic.get(tp, 'c(%s)') % SeqStr(ary, enclose=False))
# record array
ary = list(map(SeqStr, ary)) # each record will be mapped to vector or list
return(_tpdic.get(tp, 'list(%s)') % (', '.join(ary))) # use str here instead of repr since it has already been converted to str by SeqStr
def NumpyNdarrayStr(obj):
shp = obj.shape
if len(shp) == 1: # to vector
tp = obj.dtype
if tp.kind != 'V':
return(getVec(obj))
# One-dimension record array will be converted to data.frame
def mapField(f):
ary = obj[f]
tp = ary.dtype.kind
return('"%s"=%s' % (f, _tpdic.get(tp, 'list(%s)') % SeqStr(ary.tolist(), enclose=False)))
return('data.frame(%s)' % (', '.join(map(mapField, tp.names))))
elif len(shp) == 2: # two-dimenstion array will be converted to matrix
return('matrix(%s, nrow=%d, byrow=TRUE)' % (getVec(obj), shp[0]))
else: # to array
dim = list(shp[-2:]) # row, col
dim.extend(shp[-3::-1])
newaxis = list(range(len(shp)))
newaxis[-2:] = [len(shp)-1, len(shp)-2]
return('array(%s, dim=c(%s))' % (getVec(obj.transpose(newaxis)), repr(dim)[1:-1]))
def PandasSerieStr(obj):
return('data.frame(%s=%s, row.names=%s)' % (obj.name, getVec(obj.values), getVec(obj.index)))
def PandasDataFrameStr(obj):
# DataFrame will be converted to data.frame, have to explicitly name columns
#return 'data.frame(%s, row.names=%s)' % (', '.join(map(lambda a,b=obj:a+'='+getVec(obj[a]), obj)), getVec(obj.index))
s = ', '.join(map(lambda a,b=obj: '"%s"=%s' % (str(a), getVec(obj[a])), obj))
return('data.frame(%srow.names=%s)' % (s and s+', ', getVec(obj.index)))
s = ''
for col in obj:
s = s + col + '=' + getVec(obj[col]) + ', '
# print 'data.frame(%s row.names=%s)' % (s, getVec(obj.index))
return('data.frame(%s row.names=%s)' % (s, getVec(obj.index)))
def OtherStr(obj):
if hasattr(obj, '__iter__'): # for iterators
if hasattr(obj, '__len__') and len(obj) <= 10000:
return(SeqStr(list(obj)))
else: # waiting for better solution for huge-size containers
return(SeqStr(list(obj)))
return(repr(obj))
str_func = {type(None): NoneStr, bool: BoolStr, long: LongStr, int: repr, float: FloatStr, complex: ComplexStr,
unicode: UniStr, str: repr, list: SeqStr, tuple: SeqStr, set: SeqStr, frozenset: SeqStr, dict: DictStr} # str will override uncode in Python 3
base_tps = [type(None), bool, int, long, float, complex, str, unicode, list, tuple, set, frozenset, dict] # use type(None) instead of NoneType since the latter cannot be found in the types module in Python 3
if has_numpy:
str_func[numpy.ndarray] = NumpyNdarrayStr
base_tps.append(numpy.ndarray)
if has_pandas:
str_func.update({pandas.Series: PandasSerieStr, pandas.DataFrame: PandasDataFrameStr})
base_tps.extend([pandas.Series, pandas.DataFrame])
base_tps.reverse()
if _in_py3:
base_tps.append(bytes)
str_func[bytes] = ByteStr
def Str4R(obj):
'''
convert a Python basic object into an R object in the form of string.
'''
#return str_func.get(type(obj), OtherStr)(obj)
# for objects known by PypeR
if type(obj) in str_func:
return(str_func[type(obj)](obj))
# for objects derived from basic data types
for tp in base_tps:
if isinstance(obj, tp):
return(str_func[tp](obj))
# for any other objects
return(OtherStr(obj))
class RError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return(repr(self.value))
class R(object): # "del r.XXX" fails on FePy-r7 (IronPython 1.1 on .NET 2.0.50727.42) if using old-style class
'''
A Python class to enclose an R process.
'''
__Rfun = r'''.getRvalue4Python__ <- function(x, use_dict=NULL, has_numpy=FALSE, has_pandas=FALSE) {
if (has_pandas) has_numpy <- TRUE
if (has_numpy) {
headstr <- 'numpy.array('
tailstr <- ')' }
else headstr <- tailstr <- ''
SpecialLocs <- function(x) { # find locations of special values: NULL, NA, NaN, Inf
rlt <- list()
if (!has_numpy) {
idx <- which(is.null(x) | is.na(x) | is.nan(x) | is.infinite(x))
if (length(idx) > 0) list$None <- idx
}
else {
idx <- which(is.null(x) | is.na(x))
if (length(idx) > 0) rlt$None <- idx
idx <- which(is.nan(x))
if (length(idx) > 0) rlt$numpy.NaN <- idx
idx <- which(is.infinite(x))
if (length(idx) > 0) {
v <- x[idx]
iidx <- which(v > 0)
if (length(iidx) > 0) rlt$numpy.Inf <- idx[iidx]
iidx <- which(v < 0)
if (length(iidx) > 0) rlt['-numpy.Inf'] <- idx[iidx]
}
}
return(rlt)
}
SpecialVals <- function(x, valoc) {
for (val in names(valoc)) x[valoc[[val]]] <- val
return(x)
}
NullStr <- function(x) 'None'
VectorStr <- function(x) {
#nms <- names(x)
#if (!is.null(nms) && length(nms)>0) return(ListStr(as.list(x)))
complx <- is.complex(x)
special_locs <- SpecialLocs(x)
if (is.character(x)) {
x <- gsub('\\\\', '\\\\\\\\', x)
x <- gsub('"', '\\\\"', x)
x <- paste('"', x, '"', sep='') }
else if (is.logical(x)) x <- ifelse(x, 'True', 'False')
if (length(special_locs) > 0) x <- SpecialVals(x, special_locs)
if (length(x)==1) x <- paste(x) # convert to character using paste, "gettext", or "as.character"
else x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (complx) x <- gsub('i', 'j', x)
return(x) }
MatrixStr <- function(x) {
complx <- is.complex(x)
special_locs <- SpecialLocs(x)
if (is.character(x)) x <- matrix(paste('"', x, '"', sep=''), nrow=nrow(x))
else if (is.logical(x)) x <- ifelse(x, 'True', 'False')
if (length(special_locs) > 0) x <- SpecialVals(x, special_locs)
x <- apply(x, 1, function(r) paste('[', paste(r, collapse=','), ']', sep=''))
x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (complx) x <- gsub('i', 'j', x)
return(x) }
ArrayStr <- function(x) {
complx <- is.complex(x)
ndim <- length(dim(x))
if (ndim == 1) return(VectorStr(x))
if (ndim == 2) return(MatrixStr(x))
# ndim >= 3
if (is.character(x)) x <- array(paste('"', x, '"', sep=''), dim=dim(x))
else if (is.logical(x)) x <- ifelse(x, 'True', 'False')
# do col first
x <- apply(x, seq(dim(x))[-2], function(r) paste('[', paste(r, collapse=','), ']', sep=''))
for (i in seq(ndim-2))
x <- apply(x, seq(dim(x))[-1], function(r) paste('[', paste(r, collapse=','), ']', sep=''))
x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (complx) x <- gsub('i', 'j', x)
return(x) }
DataFrameStr <- function(x) {
if (ncol(x) == 0) {
if (has_pandas) return('pandas.DataFrame()')
if (has_numpy) return('numpy.array([])')
return('[]')}
if (has_numpy) {
cnms <- colnames(x) # get column names
ctp <- list()
for (i in seq(x)) {
xi <- as.vector(x[[i]])
special_locs <- SpecialLocs(xi)
if (is.character(xi)) {
ctp[i] <- sprintf('("%s", "|S%d")', cnms[i], if (length(xi) > 0) max(nchar(xi)) else 0 )
xi <- paste('"', xi, '"', sep='') }
else if (is.logical(xi)) {
xi <- ifelse(xi, 'True', 'False')
ctp[i] <- paste('("', cnms[i], '", "<?")' ) }
else if (is.integer(xi)) {
xi <- paste(xi)
ctp[i] <- paste('("', cnms[i], '", "<q")' ) }
else if (is.double(xi)) {
xi <- paste(xi)
ctp[i] <- paste('("', cnms[i], '", "<g")' ) }
else if (is.complex(xi)) {
xi <- gsub('i', 'j', paste(xi))
ctp[i] <- paste('("', cnms[i], '", "<G")') }
if (length(special_locs) > 0) xi <- SpecialVals(xi, special_locs)
if (nrow(x) > 0) x[[i]] <- xi }
tailstr <- paste(', dtype=[', paste(ctp, collapse=','), ']', tailstr, sep='') }
else if (nrow(x) > 0)
for (i in seq(x)) {
xi <- as.vector(x[[i]])
special_locs <- SpecialLocs(xi)
if (is.character(xi)) xi <- paste('"', xi, '"', sep='')
else if (is.logical(xi)) xi <- ifelse(xi, 'True', 'False')
else if (is.integer(xi)) xi <- paste(xi)
else if (is.double(xi)) xi <- paste(xi)
else if (is.complex(xi)) xi <- gsub('i', 'j', paste(xi))
if (length(special_locs) > 0) xi <- SpecialVals(xi, special_locs)
if (nrow(x) > 0) x[[i]] <- xi }
x <- as.matrix(x)
x <- apply(x, 1, function(r) paste('(', paste(r, collapse=','), if(length(r)<2) ',)' else ')', sep=''))
x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (has_pandas) x <- paste('pandas.DataFrame(', x, ')', sep='')
return(x) }
ListStr <- function(x) {
nms <- names(x) # get column names
x <- sapply(x, Str4Py)
return(zipVecWithName(x, nms))}
zipVecWithName <- function(x, nms) {
if (!is.null(nms) && length(nms)>0) {
nms <- paste('"', nms, '"', sep='')
x <- sapply(seq(nms), function(i) paste('(', nms[i], ',', x[i], ')') )
if (identical(use_dict, TRUE)) x <- paste('dict([', paste(x, collapse=','), '])', sep='')
else if (identical(use_dict, FALSE)) x <- paste('[', paste(x, collapse=','), ']', sep='')
else { # should be NULL or something else
if (any(duplicated(nms))) x <- paste('[', paste(x, collapse=','), ']', sep='')
else x <- paste('dict([', paste(x, collapse=','), '])', sep='') } }
else x <- paste('[', paste(x, collapse=','), ']', sep='')
return(x) }
Str4Py <- function(x) {
# no considering on NA, Inf, ...
# use is.XXX, typeof, class, mode, storage.mode, sprintf
if (is.factor(x)) x <- as.vector(x)
rlt <- {
if (is.null(x)) NullStr(x)
else if (is.vector(x) && !is.list(x)) VectorStr(x)
else if (is.matrix(x) || is.array(x)) ArrayStr(x)
else if (is.data.frame(x)) DataFrameStr(x)
else if (is.list(x)) ListStr(x)
else Str4Py(as.character(x)) } # other objects will be convert to character (instead of NullStr), or use "gettext"
return(rlt) }
Str4Py(x) }
# initalize library path for TCL/TK based environment on Windows, e.g. Python IDLE
.addLibs <- function() {
ruser <- Sys.getenv('R_USER')
userpath <- Sys.getenv('R_LIBS_USER')
libpaths <- .libPaths()
for (apath in userpath) {
if (length(grep(apath, libpaths)) > 0) next
if (file.exists(apath)) .libPaths(apath)
else {
d <- '/Documents'
if (substr(ruser, nchar(ruser)-nchar(d)+1, nchar(ruser)) != d) {
apath <- paste(ruser,d, substr(apath, nchar(ruser)+1, nchar(apath)), sep='')
if (file.exists(apath)) .libPaths(apath)} } } }
if(identical(.Platform$OS.type, 'windows')) .addLibs()
rm(.addLibs)
'''
_DEBUG_MODE = True
def __init__(self, RCMD='R', max_len=1000, use_numpy=True, use_pandas=True, use_dict=None,
host='localhost', user=None, ssh='ssh', return_err=True, dump_stdout=False):
'''
RCMD: The name of a R interpreter, path information should be included
if it is not in the system search path.
use_numpy: Used as a boolean value. A False value will disable numpy
even if it has been imported.
use_pandas: Used as a boolean value. A False value will disable pandas
even if it has been imported.
use_dict: A R named list will be returned as a Python dictionary if
"use_dict" is True, or a list of tuples (name, value) if "use_dict"
is False. If "use_dict" is None, the return value will be a
dictionary if there is no replicated names, or a list if replicated
names found.
host: The computer name (or IP) on which the R interpreter is
installed. The value "localhost" means that R locates on the the
localhost computer. On POSIX systems (including Cygwin environment
on Windows), it is possible to use R on a remote computer if the
command "ssh" works. To do that, the user needs to set this value,
and perhaps the parameter "user".
user: The user name on the remote computer. This value needs to be set
only if the user name on the remote computer is different from the
local user. In interactive environment, the password can be input
by the user if prompted. If running in a program, the user needs to
be able to login without typing password!
ssh: The program to login to remote computer.
return_err: redirect stderr to stdout
dump_stdout:
prints output from R directly to sys.stdout, useful for long running
routines which print progress during execution.
'''
# use self.__dict__.update to register variables since __setattr__ is
# used to set variables for R. tried to define __setattr in the class,
# and change it to __setattr__ for instances at the end of __init__,
# but it seems failed.
# -- maybe this only failed in Python2.5? as warned at
# http://wiki.python.org/moin/NewClassVsClassicClass:
# "Warning: In 2.5, magic names (typically those with a double
# underscore (DunderAlias) at both ends of the name) may look at the
# class rather than the instance even for old-style classes."
self.__dict__.update({'prog': None,
'has_numpy': use_numpy and has_numpy,
'has_pandas': use_pandas and has_pandas,
'Rfun': self.__class__.__Rfun,
'max_len': max_len,
'use_dict': use_dict,
'dump_stdout': dump_stdout,
'localhost': host == 'localhost',
'newline': sys.platform == 'win32' and '\r\n' or '\n',
'sendAll' : sendAll # keep a reference to the global function "sendAll" which will be used by __del__
})
RCMD = [RCMD] #shlex.split(RCMD) - shlex do not work properly on Windows! #re.split(r'\s', RCMD)
if not self.localhost:
RCMD.insert(0, host)
if user:
RCMD.insert(0, '-l%s' % user)
RCMD.insert(0, ssh)
# args = ('--vanilla',) # equal to --no-save, --no-restore, --no-site-file, --no-init-file and --no-environ
args = ('--quiet', '--no-save', '--no-restore') # "--slave" cannot be used on Windows!
for arg in args:
if arg not in RCMD:
RCMD.append(arg)
if _has_subp and hasattr(subprocess, 'STARTUPINFO'):
info = subprocess.STARTUPINFO()
try:
if hasattr(subprocess, '_subprocess'):
info.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess._subprocess.SW_HIDE
else:
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
except:
info = None
else:
info = None
# create stderr to replace None for py2exe:
# http://www.py2exe.org/index.cgi/Py2ExeSubprocessInteractions
if sys.platform != 'win32':
childstderr = None
else:
if hasattr(sys.stderr, 'fileno'):
childstderr = sys.stderr
elif hasattr(sys.stderr, '_file') and hasattr(sys.stderr._file, 'fileno'):
childstderr = sys.stderr._file
else: # Give up and point child stderr at nul
childstderr = file('nul', 'a')
self.__dict__['prog'] = Popen(RCMD, stdin=PIPE, stdout=PIPE, stderr=return_err and _STDOUT or childstderr, startupinfo=info)
self.__call__(self.Rfun)
def __runOnce(self, CMD, use_try=None):
'''
CMD: a R command string
'''
use_try = use_try or self._DEBUG_MODE
newline = self.newline
tail_token = 'R command at time: %s' % repr(time.time())
# tail_token_r = re.sub(r'[\(\)\.]', r'\\\1', tail_token)
tail_cmd = 'print("%s")%s' % (tail_token, newline)
tail_token = tail_token.replace(' ', '\\s').replace('.', '\\.').replace('+', '\\+')
re_tail = re.compile(r'>\sprint\("%s"\)\r?\n\[1\]\s"%s"\r?\n$' % (tail_token, tail_token))
if len(CMD) <= self.max_len or not self.localhost:
fn = None
CMD = (use_try and 'try({%s})%s%s' or '%s%s%s') % (CMD.replace('\\', '\\\\'), newline, tail_cmd)
else:
fh, fn = tempfile.mkstemp()
os.fdopen(fh, 'wb').write(_mybytes(CMD))
if sys.platform == 'cli':
os.close(fh) # this is necessary on IronPython
fn = fn.replace('\\', '/')
CMD = (use_try and 'try({source("%s")})%sfile.remove(%r)%s%s' or '%s%s%s') % (fn, newline, fn, newline, tail_cmd)
self.sendAll(self.prog, CMD)
rlt = ''
while not re_tail.search(rlt):
try:
rltonce = readLine(self.prog, dump_stdout=self.dump_stdout)
if rltonce:
rlt = rlt + rltonce
except:
break
else:
rlt = re_tail.sub('', rlt)
if rlt.startswith('> '):
rlt = rlt[2:]
# if fn is not None: os.unlink(fn)
return(rlt)
def __call__(self, CMDS=[], use_try=None):
'''
Run a (list of) R command(s), and return the output message from the STDOUT of R.
CMDS: an R command string or a list of R commands
'''
rlt = []
if isinstance(CMDS, basestring): # a single command
rlt.append(self.__runOnce(CMDS, use_try=use_try))
else: # should be a list of commands
# for CMD in CMDS:
# rlt.append(self.__runOnce(CMD, use_try=use_try))
rlt.append(self.__runOnce('; '.join(CMDS), use_try=use_try)) # now, ['sink("output.txt")', ..., 'sink()'] is allowed!
if len(rlt) == 1:
rlt = rlt[0]
return(rlt)
def __getitem__(self, obj, use_try=None, use_dict=None): # to model a dict: "r['XXX']"
'''
Get the value of an R variable or expression. The return value is
converted to the corresponding Python object.
obj: a string - the name of an R variable, or an R expression
use_try: use "try" function to wrap the R expression. This can avoid R
crashing if the obj does not exist in R.
use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value)
'''
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
use_try = use_try or self._DEBUG_MODE
if use_dict is None:
use_dict = self.use_dict
cmd = '.getRvalue4Python__(%s, use_dict=%s, has_numpy=%s, has_pandas=%s)' % (obj, use_dict is None and 'NULL' or use_dict and 'TRUE' or 'FALSE', self.has_numpy and 'TRUE' or 'FALSE', self.has_pandas and 'TRUE' or 'FALSE')
rlt = self.__call__(cmd, use_try=use_try)
head = (use_try and 'try({%s})%s[1] ' or '%s%s[1] ') % (cmd, self.newline)
# sometimes (e.g. after "library(fastICA)") the R on Windows uses '\n' instead of '\r\n'
head = rlt.startswith(head) and len(head) or len(head) - 1
tail = rlt.endswith(self.newline) and len(rlt) - len(self.newline) or len(rlt) - len(self.newline) + 1 # - len('"')
try:
rlt = eval(eval(rlt[head:tail])) # The inner eval remove quotes and recover escaped characters.
except:
raise RError(rlt)
return(rlt)
def __setitem__(self, obj, val): # to model a dict: "r['XXX'] = YYY"
'''
Assign a value (val) to an R variable (obj).
obj: a string - the name of an R variable
val: a python object - the value to be passed to an R object
'''
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
self.__call__('%s <- %s' % (obj, Str4R(val)))
def __delitem__(self, obj): # to model a dict: "del r['XXX']"
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
self.__call__('rm(%s)' % obj)
def __del__(self): # to model "del r"
if self.prog:
try:
self.sendAll(self.prog, 'q("no")'+self.newline)
except:
pass
self.prog = None
def __getattr__(self, obj, use_dict=None): # to model object attribute: "r.XXX"
'''
obj: a string - the name of an R variable
use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value)
'''
# Overriding __getattr__ is safer than __getattribute__ since it is
# only called as a last resort i.e. if there are no attributes in the
# instance that match the name
if obj in self.__dict__:
return(self.__dict__[obj])
if obj in self.__class__.__dict__:
return(self.__class__.__dict__[obj])
try:
if use_dict is None:
use_dict = self.use_dict
rlt = self.__getitem__(obj, use_dict=use_dict)
except:
raise # RError('No this object!')
return(rlt)
def __setattr__(self, obj, val): # to model object attribute: "r.XXX = YYY"
if obj in self.__dict__ or obj in self.__class__.__dict__: # or obj.startswith('_'):
self.__dict__[obj] = val # for old-style class
#object.__setattr__(self, obj, val) # for new-style class
else:
self.__setitem__(obj, val)
def __delattr__(self, obj): # to model object attribute: "del r.XXX"
if obj in self.__dict__:
del self.__dict__[obj]
else:
self.__delitem__(obj)
def get(self, obj, default=None, use_dict=None): # to model a dict: "r.get('XXX', 'YYY')"
'''
obj: a string - the name of an R variable, or an R expression
default: a python object - the value to be returned if failed to get data from R
use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value). If use_dict is
None, the value of self.use_dict will be used instead.
'''
try:
rlt = self.__getitem__(obj, use_try=True, use_dict=use_dict)
except:
if True: # val is not None:
rlt = default
else:
raise RError('No this object!')
return(rlt)
run, assign, remove = __call__, __setitem__, __delitem__
# for a single-round duty:
def runR(CMDS, Robj='R', max_len=1000, use_numpy=True, use_pandas=True, use_dict=None, host='localhost', user=None, ssh='ssh'):
'''
Run a (list of) R command(s), and return the output from the STDOUT.
CMDS: a R command string or a list of R commands.
Robj: can be a shell command (like /usr/bin/R), or the R class.
max_len: define the upper limitation for the length of command string. A
command string will be passed to R by a temporary file if it is longer
than this value.
use_numpy: Used as a boolean value. A False value will disable numpy even
if it has been imported.
use_pandas: Used as a boolean value. A False value will disable pandas even
if it has been imported.
use_dict: named list will be returned a dict if use_dict is True, otherwise
it will be a list of tuples (name, value).
host: The computer name (or IP) on which the R interpreter is
installed. The value "localhost" means that the R locates on the
the localhost computer. On POSIX systems (including Cygwin
environment on Windows), it is possible to use R on a remote
computer if the command "ssh" works. To do that, the user need set
this value, and perhaps the parameter "user".
user: The user name on the remote computer. This value need to be set
only if the user name is different on the remote computer. In
interactive environment, the password can be input by the user if
prompted. If running in a program, the user need to be able to
login without typing password!
ssh: The program to login to remote computer.
'''
if isinstance(Robj, basestring):
Robj = R(RCMD=Robj, max_len=max_len, use_numpy=use_numpy, use_pandas=use_pandas, use_dict=use_dict, host=host, user=user, ssh=ssh, dump_stdout=dump_stdout)
rlt = Robj.run(CMDS=CMDS)
if len(rlt) == 1:
rlt = rlt[0]
return(rlt)
|
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .call_variants."""
import collections
import errno
import sys
from unittest import mock
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from third_party.nucleus.io import tfrecord
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import variant_utils
from deepvariant import call_variants
from deepvariant import modeling
from deepvariant import testdata
from deepvariant import tf_utils
from deepvariant.protos import deepvariant_pb2
FLAGS = flags.FLAGS
# NB. This entire collection of tests will be invoked with '--use_tpu=' 'true'
# and 'false' by the BUILD file, and a tpu device will be allocated when
# necessary.
def setUpModule():
testdata.init()
# For tests that don't actually want to read a real checkpoint,
# return a fake one. The estimator understands None to mean
# that all the variables should be left uninitialized.
_LEAVE_MODEL_UNINITIALIZED = None
# Return the stream of batched images from a dataset.
def _get_infer_batches(tf_dataset, model, batch_size):
"""Provides batches of pileup images from this dataset.
This instantiates an iterator on the dataset, and returns the
image, variant, alt_allele_indices, features in batches. It calls
model.preprocess_images on the images (but note that we will be moving
that step into model_fn for the Estimator api).
Args:
tf_dataset: DeepVariantInput.
model: DeepVariantModel.
batch_size: int. The batch size.
Returns:
(image, variant, alt_allele_indices)
Raises:
ValueError: if the dataset has the wrong mode.
"""
if tf_dataset.mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError('tf_dataset.mode is {} but must be PREDICT.'.format(
tf_dataset.mode))
params = dict(batch_size=batch_size)
features = tf.compat.v1.data.make_one_shot_iterator(
tf_dataset(params)).get_next()
images = features['image']
if tf_dataset.tensor_shape:
# tensor_shape will be None if the input was an empty file.
images = model.preprocess_images(images)
variant = features['variant']
alt_allele_indices = features['alt_allele_indices']
return images, variant, alt_allele_indices
class CallVariantsEndToEndTests(
six.with_metaclass(parameterized.TestGeneratorMetaclass,
tf.compat.v1.test.TestCase)):
def setUp(self):
self.checkpoint_dir = tf.compat.v1.test.get_temp_dir()
def assertCallVariantsEmitsNRecordsForInceptionV3(self, filename,
num_examples):
outfile = test_utils.test_tmpfile('inception_v3.call_variants.tfrecord')
model = modeling.get_model('inception_v3')
checkpoint_path = _LEAVE_MODEL_UNINITIALIZED
call_variants.call_variants(
examples_filename=filename,
checkpoint_path=checkpoint_path,
model=model,
output_file=outfile,
batch_size=4,
max_batches=None)
call_variants_outputs = list(
tfrecord.read_tfrecords(outfile, deepvariant_pb2.CallVariantsOutput))
# Check that we have the right number of output protos.
self.assertEqual(len(call_variants_outputs), num_examples)
def assertCallVariantsEmitsNRecordsForConstantModel(self, filename,
num_examples):
checkpoint_path = _LEAVE_MODEL_UNINITIALIZED
outfile = test_utils.test_tmpfile('call_variants.tfrecord')
model = modeling.get_model('constant')
call_variants.call_variants(
examples_filename=filename,
checkpoint_path=checkpoint_path,
model=model,
output_file=outfile,
batch_size=4,
max_batches=None,
master='',
use_tpu=FLAGS.use_tpu)
call_variants_outputs = list(
tfrecord.read_tfrecords(outfile, deepvariant_pb2.CallVariantsOutput))
# Check that we have the right number of output protos.
self.assertEqual(len(call_variants_outputs), num_examples)
def test_call_end2end_with_empty_shards(self):
# Get only up to 10 examples.
examples = list(
tfrecord.read_tfrecords(
testdata.GOLDEN_CALLING_EXAMPLES, max_records=10))
# Write to 15 shards, which means there will be multiple empty shards.
source_path = test_utils.test_tmpfile('sharded@{}'.format(15))
tfrecord.write_tfrecords(examples, source_path)
self.assertCallVariantsEmitsNRecordsForConstantModel(
source_path, len(examples))
def test_call_end2end_empty_first_shard(self):
# Get only up to 10 examples.
examples = list(
tfrecord.read_tfrecords(
testdata.GOLDEN_CALLING_EXAMPLES, max_records=10))
empty_first_file = test_utils.test_tmpfile('empty_1st_shard-00000-of-00002')
tfrecord.write_tfrecords([], empty_first_file)
second_file = test_utils.test_tmpfile('empty_1st_shard-00001-of-00002')
tfrecord.write_tfrecords(examples, second_file)
self.assertCallVariantsEmitsNRecordsForConstantModel(
test_utils.test_tmpfile('empty_1st_shard@2'), len(examples))
def test_call_end2end_zero_record_file_for_inception_v3(self):
zero_record_file = test_utils.test_tmpfile('zero_record_file')
tfrecord.write_tfrecords([], zero_record_file)
self.assertCallVariantsEmitsNRecordsForInceptionV3(
test_utils.test_tmpfile('zero_record_file'), 0)
def _call_end2end_helper(self, examples_path, model, shard_inputs):
examples = list(tfrecord.read_tfrecords(examples_path))
if shard_inputs:
# Create a sharded version of our golden examples.
source_path = test_utils.test_tmpfile('sharded@{}'.format(3))
tfrecord.write_tfrecords(examples, source_path)
else:
source_path = examples_path
# If we point the test at a headless server, it will often be 2x2,
# which has 8 replicas. Otherwise a smaller batch size is fine.
if FLAGS.use_tpu:
batch_size = 8
else:
batch_size = 4
if model.name == 'constant':
# For the constant model we can run everything.
max_batches = None
else:
# For all other models we only run a single batch for inference.
max_batches = 1
outfile = test_utils.test_tmpfile('call_variants.tfrecord')
call_variants.call_variants(
examples_filename=source_path,
checkpoint_path=_LEAVE_MODEL_UNINITIALIZED,
model=model,
output_file=outfile,
batch_size=batch_size,
max_batches=max_batches,
master='',
use_tpu=FLAGS.use_tpu,
)
call_variants_outputs = list(
tfrecord.read_tfrecords(outfile, deepvariant_pb2.CallVariantsOutput))
return call_variants_outputs, examples, batch_size, max_batches
@parameterized.parameters(model for model in modeling.production_models())
@flagsaver.flagsaver
def test_call_end2end_with_labels(self, model):
FLAGS.debugging_true_label_mode = True
(call_variants_outputs, examples, batch_size,
max_batches) = self._call_end2end_helper(testdata.GOLDEN_TRAINING_EXAMPLES,
model, False)
# Check that we have the right number of output protos.
self.assertEqual(
len(call_variants_outputs),
batch_size * max_batches if max_batches else len(examples))
# Checks that at least some of the `true_label`s are filled.
self.assertTrue(
any(cvo.debug_info.true_label > 0 for cvo in call_variants_outputs))
@parameterized.parameters((model, shard_inputs, include_debug_info)
for shard_inputs in [False, True]
for model in modeling.production_models()
for include_debug_info in [False, True])
@flagsaver.flagsaver
def test_call_end2end(self, model, shard_inputs, include_debug_info):
FLAGS.include_debug_info = include_debug_info
(call_variants_outputs, examples, batch_size,
max_batches) = self._call_end2end_helper(testdata.GOLDEN_CALLING_EXAMPLES,
model, shard_inputs)
# Check that we have the right number of output protos.
self.assertEqual(
len(call_variants_outputs),
batch_size * max_batches if max_batches else len(examples))
# Check that our CallVariantsOutput (CVO) have the following critical
# properties:
# - we have one CVO for each example we processed.
# - the variant in the CVO is exactly what was in the example.
# - the alt_allele_indices of the CVO match those of its corresponding
# example.
# - there are 3 genotype probabilities and these are between 0.0 and 1.0.
# We can only do this test when processing all of the variants (max_batches
# is None), since we processed all of the examples with that model.
if max_batches is None:
six.assertCountEqual(self, [cvo.variant for cvo in call_variants_outputs],
[tf_utils.example_variant(ex) for ex in examples])
# Check the CVO debug_info: not filled if include_debug_info is False;
# else, filled by logic based on CVO.
if not include_debug_info:
for cvo in call_variants_outputs:
self.assertEqual(cvo.debug_info,
deepvariant_pb2.CallVariantsOutput.DebugInfo())
else:
for cvo in call_variants_outputs:
self.assertEqual(cvo.debug_info.has_insertion,
variant_utils.has_insertion(cvo.variant))
self.assertEqual(cvo.debug_info.has_deletion,
variant_utils.has_deletion(cvo.variant))
self.assertEqual(cvo.debug_info.is_snp,
variant_utils.is_snp(cvo.variant))
self.assertEqual(cvo.debug_info.predicted_label,
np.argmax(cvo.genotype_probabilities))
self.assertEqual(len(cvo.debug_info.logits), 3)
self.assertEqual(len(cvo.debug_info.prelogits), 2048)
def example_matches_call_variants_output(example, call_variants_output):
return (tf_utils.example_variant(example) == call_variants_output.variant
and tf_utils.example_alt_alleles_indices(
example) == call_variants_output.alt_allele_indices.indices)
for call_variants_output in call_variants_outputs:
# Find all matching examples.
matches = [
ex for ex in examples
if example_matches_call_variants_output(ex, call_variants_output)
]
# We should have exactly one match.
self.assertEqual(len(matches), 1)
example = matches[0]
# Check that we've faithfully copied in the alt alleles (though currently
# as implemented we find our example using this information so it cannot
# fail). Included here in case that changes in the future.
self.assertEqual(
list(tf_utils.example_alt_alleles_indices(example)),
list(call_variants_output.alt_allele_indices.indices))
# We should have exactly three genotype probabilities (assuming our
# ploidy == 2).
self.assertEqual(len(call_variants_output.genotype_probabilities), 3)
# These are probabilities so they should be between 0 and 1.
self.assertTrue(
0 <= gp <= 1 for gp in call_variants_output.genotype_probabilities)
# pylint: disable=g-complex-comprehension
@parameterized.parameters(
(model, bad_format)
for model in modeling.production_models()
for bad_format in [six.b(''), six.b('png')])
# pylint: enable=g-complex-comprehension
def test_call_variants_with_invalid_format(self, model, bad_format):
# Read one good record from a valid file.
example = next(tfrecord.read_tfrecords(testdata.GOLDEN_CALLING_EXAMPLES))
# Overwrite the image/format field to be an invalid value
# (anything but 'raw').
example.features.feature['image/format'].bytes_list.value[0] = bad_format
source_path = test_utils.test_tmpfile('make_examples_output.tfrecord')
tfrecord.write_tfrecords([example], source_path)
outfile = test_utils.test_tmpfile('call_variants_invalid_format.tfrecord')
with self.assertRaises(ValueError):
call_variants.call_variants(
examples_filename=source_path,
checkpoint_path=_LEAVE_MODEL_UNINITIALIZED,
model=model,
output_file=outfile,
batch_size=1,
max_batches=1,
use_tpu=FLAGS.use_tpu)
@parameterized.parameters(model for model in modeling.production_models())
def test_call_variants_with_no_shape(self, model):
# Read one good record from a valid file.
example = next(tfrecord.read_tfrecords(testdata.GOLDEN_CALLING_EXAMPLES))
# Remove image/shape.
del example.features.feature['image/shape']
source_path = test_utils.test_tmpfile('make_examples_out_noshape.tfrecord')
tfrecord.write_tfrecords([example], source_path)
with six.assertRaisesRegex(
self, ValueError,
'Invalid image/shape: we expect to find an image/shape '
'field with length 3.'):
ds = call_variants.prepare_inputs(source_path)
_ = list(_get_infer_batches(ds, model=model, batch_size=1))
def test_call_variants_with_empty_input(self):
source_path = test_utils.test_tmpfile('empty.tfrecord')
tfrecord.write_tfrecords([], source_path)
# Make sure that prepare_inputs don't crash on empty input.
ds = call_variants.prepare_inputs(source_path)
m = modeling.get_model('constant')
# The API specifies that OutOfRangeError is thrown in this case.
batches = list(_get_infer_batches(ds, model=m, batch_size=1))
with self.test_session() as sess:
sess.run(tf.compat.v1.local_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
try:
_ = sess.run(batches)
except tf.errors.OutOfRangeError:
pass
class CallVariantsUnitTests(
six.with_metaclass(parameterized.TestGeneratorMetaclass, tf.test.TestCase)):
@classmethod
def setUpClass(cls):
cls.examples = list(
tfrecord.read_tfrecords(testdata.GOLDEN_CALLING_EXAMPLES))
cls.variants = [tf_utils.example_variant(ex) for ex in cls.examples]
cls.model = modeling.get_model('constant')
@parameterized.parameters(
('not_sharded', 'not_sharded'),
('sharded@3', 'sharded@3'),
('sharded@3', 'sharded-?????-of-00003'),
('asterisks@2', 'asterisks-*-of-00002'),
)
def test_prepare_inputs(self, filename_to_write, file_string_input):
source_path = test_utils.test_tmpfile(filename_to_write)
tfrecord.write_tfrecords(self.examples, source_path)
# file_string_input could be a comma-separated list. Add the prefix to all
# of them, and join it back to a string.
file_string_input = ','.join(
[test_utils.test_tmpfile(f) for f in file_string_input.split(',')])
with self.test_session() as sess:
sess.run(tf.compat.v1.local_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
ds = call_variants.prepare_inputs(file_string_input)
_, variants, _ = _get_infer_batches(ds, model=self.model, batch_size=1)
seen_variants = []
try:
while True:
seen_variants.extend(sess.run(variants))
except tf.errors.OutOfRangeError:
pass
six.assertCountEqual(self, self.variants,
variant_utils.decode_variants(seen_variants))
@parameterized.parameters(
(None, [3.592555731302127e-5, 0.99992620944976807, 3.78809563699178e-5]),
(2, [0.0, 1.0, 0.0]),
(10, [3.59096e-5, 0.9999262094, 3.7881e-5]),
)
def test_round_gls(self, precision, expected):
test_data = [3.592555731302127e-5, 0.99992620944976807, 3.78809563699178e-5]
actual = call_variants.round_gls(test_data, precision)
self.assertEqual(actual, expected)
@parameterized.parameters('auto', 'cpu')
def test_call_variants_non_accelerated_execution_runs(self,
execution_hardware):
if FLAGS.use_tpu:
# predict batch size must be divisible by number of replicas.
batch_size = 2
else:
batch_size = 1
outfile = test_utils.test_tmpfile('call_variants_cpu_only.tfrecord')
call_variants.call_variants(
examples_filename=testdata.GOLDEN_CALLING_EXAMPLES,
checkpoint_path=_LEAVE_MODEL_UNINITIALIZED,
model=self.model,
execution_hardware=execution_hardware,
max_batches=1,
batch_size=batch_size,
output_file=outfile,
use_tpu=FLAGS.use_tpu)
@parameterized.parameters(
dict(hardware_env='auto', devices=['cpu'], expect_exception=False),
dict(hardware_env='auto', devices=['gpu'], expect_exception=False),
dict(hardware_env='auto', devices=['tpu'], expect_exception=False),
dict(hardware_env='cpu', devices=['cpu'], expect_exception=False),
dict(hardware_env='cpu', devices=['gpu'], expect_exception=False),
dict(hardware_env='cpu', devices=['tpu'], expect_exception=False),
dict(hardware_env='accelerator', devices=['cpu'], expect_exception=True),
dict(hardware_env='accelerator', devices=['gpu'], expect_exception=False),
dict(hardware_env='accelerator', devices=['tpu'], expect_exception=False),
dict(
hardware_env='accelerator',
devices=['cpu', 'gpu'],
expect_exception=False),
dict(
hardware_env='accelerator',
devices=['cpu', 'tpu'],
expect_exception=False),
dict(
hardware_env='accelerator',
devices=['cpu', 'gpu', 'tpu'],
expect_exception=False),
)
def test_call_variants_execution_hardware(self, hardware_env, devices,
expect_exception):
# We cannot access the full _DeviceAttribute as it's not exported. So use a
# namedtuple with the same field names instead.
device = collections.namedtuple('_DeviceAttribute', ['name', 'device_type'])
# Mocking the list_devices call means the framework attempts to use a bogus
# TPU device, which fails, so don't do that. Handle the TPU case elsewhere.
if 'tpu' in devices or FLAGS.use_tpu:
return
with mock.patch.object(call_variants.tf.compat.v1.Session,
'list_devices') as mock_ld:
mock_ld.return_value = [
device(name=dt + '/' + str(i), device_type=dt.upper())
for i, dt in enumerate(devices)
]
# Only run the tpu cases when we have an actual tpu device, supplied
# by the flags from the BUILD rule.
def _run():
call_variants.call_variants(
use_tpu=FLAGS.use_tpu,
examples_filename=testdata.GOLDEN_CALLING_EXAMPLES,
checkpoint_path=_LEAVE_MODEL_UNINITIALIZED,
model=self.model,
execution_hardware=hardware_env,
max_batches=1,
batch_size=1,
output_file=test_utils.test_tmpfile('zzz.tfrecord'))
if expect_exception:
with self.assertRaises(call_variants.ExecutionHardwareError):
_run()
else:
_run()
def test_catches_bad_argv(self):
with mock.patch.object(logging, 'error') as mock_logging, mock.patch.object(
sys, 'exit') as mock_exit:
call_variants.main(['call_variants.py', 'extra_arg'])
mock_logging.assert_called_once_with(
'Command line parsing failure: call_variants does not accept '
'positional arguments but some are present on the command line: '
'"[\'call_variants.py\', \'extra_arg\']".')
mock_exit.assert_called_once_with(errno.ENOENT)
if __name__ == '__main__':
absltest.main()
|
|
'''tests for some time series analysis functions
'''
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import statsmodels.api as sm
import statsmodels.tsa.stattools as tsa
import statsmodels.tsa.tsatools as tools
from statsmodels.tsa.tsatools import vec, vech
from .results import savedrvs
from .results.datamlw_tls import mlacf, mlccf, mlpacf, mlywar
xo = savedrvs.rvsdata.xar2
x100 = xo[-100:]/1000.
x1000 = xo/1000.
def test_acf():
acf_x = tsa.acf(x100, unbiased=False)[:21]
assert_array_almost_equal(mlacf.acf100.ravel(), acf_x, 8) #why only dec=8
acf_x = tsa.acf(x1000, unbiased=False)[:21]
assert_array_almost_equal(mlacf.acf1000.ravel(), acf_x, 8) #why only dec=9
def test_ccf():
ccf_x = tsa.ccf(x100[4:], x100[:-4], unbiased=False)[:21]
assert_array_almost_equal(mlccf.ccf100.ravel()[:21][::-1], ccf_x, 8)
ccf_x = tsa.ccf(x1000[4:], x1000[:-4], unbiased=False)[:21]
assert_array_almost_equal(mlccf.ccf1000.ravel()[:21][::-1], ccf_x, 8)
def test_pacf_yw():
pacfyw = tsa.pacf_yw(x100, 20, method='mle')
assert_array_almost_equal(mlpacf.pacf100.ravel(), pacfyw, 1)
pacfyw = tsa.pacf_yw(x1000, 20, method='mle')
assert_array_almost_equal(mlpacf.pacf1000.ravel(), pacfyw, 2)
#assert False
def test_pacf_ols():
pacfols = tsa.pacf_ols(x100, 20)
assert_array_almost_equal(mlpacf.pacf100.ravel(), pacfols, 8)
pacfols = tsa.pacf_ols(x1000, 20)
assert_array_almost_equal(mlpacf.pacf1000.ravel(), pacfols, 8)
#assert False
def test_ywcoef():
assert_array_almost_equal(mlywar.arcoef100[1:],
-sm.regression.yule_walker(x100, 10, method='mle')[0], 8)
assert_array_almost_equal(mlywar.arcoef1000[1:],
-sm.regression.yule_walker(x1000, 20, method='mle')[0], 8)
def test_yule_walker_inter():
# see 1869
x = np.array([1, -1, 2, 2, 0, -2, 1, 0, -3, 0, 0])
# it works
result = sm.regression.yule_walker(x, 3)
def test_duplication_matrix():
for k in range(2, 10):
m = tools.unvech(np.random.randn(k * (k + 1) / 2))
Dk = tools.duplication_matrix(k)
assert(np.array_equal(vec(m), np.dot(Dk, vech(m))))
def test_elimination_matrix():
for k in range(2, 10):
m = np.random.randn(k, k)
Lk = tools.elimination_matrix(k)
assert(np.array_equal(vech(m), np.dot(Lk, vec(m))))
def test_commutation_matrix():
m = np.random.randn(4, 3)
K = tools.commutation_matrix(4, 3)
assert(np.array_equal(vec(m.T), np.dot(K, vec(m))))
def test_vec():
arr = np.array([[1, 2],
[3, 4]])
assert(np.array_equal(vec(arr), [1, 3, 2, 4]))
def test_vech():
arr = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert(np.array_equal(vech(arr), [1, 4, 7, 5, 8, 9]))
def test_add_lag_insert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:3],lagmat,nddata[3:,-1]))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_add_lag_noinsert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3, insert=False)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_add_lag_noinsert_atend():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,-1],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(data, 'cpi', 3, insert=False)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
# should be the same as insert
lag_data2 = sm.tsa.add_lag(data, 'cpi', 3, insert=True)
assert_equal(lag_data2.view((float,len(lag_data2.dtype.names))), results)
def test_add_lag_ndarray():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:3],lagmat,nddata[3:,-1]))
lag_data = sm.tsa.add_lag(nddata, 2, 3)
assert_equal(lag_data, results)
def test_add_lag_noinsert_ndarray():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(nddata, 2, 3, insert=False)
assert_equal(lag_data, results)
def test_add_lag_noinsertatend_ndarray():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,-1],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(nddata, 3, 3, insert=False)
assert_equal(lag_data, results)
# should be the same as insert also check negative col number
lag_data2 = sm.tsa.add_lag(nddata, -1, 3, insert=True)
assert_equal(lag_data2, results)
def test_add_lag1d():
data = np.random.randn(100)
lagmat = sm.tsa.lagmat(data,3,trim='Both')
results = np.column_stack((data[3:],lagmat))
lag_data = sm.tsa.add_lag(data, lags=3, insert=True)
assert_equal(results, lag_data)
# add index
data = data[:,None]
lagmat = sm.tsa.lagmat(data,3,trim='Both') # test for lagmat too
results = np.column_stack((data[3:],lagmat))
lag_data = sm.tsa.add_lag(data,lags=3, insert=True)
assert_equal(results, lag_data)
def test_add_lag1d_drop():
data = np.random.randn(100)
lagmat = sm.tsa.lagmat(data,3,trim='Both')
lag_data = sm.tsa.add_lag(data, lags=3, drop=True, insert=True)
assert_equal(lagmat, lag_data)
# no insert, should be the same
lag_data = sm.tsa.add_lag(data, lags=3, drop=True, insert=False)
assert_equal(lagmat, lag_data)
def test_add_lag1d_struct():
data = np.zeros(100, dtype=[('variable',float)])
nddata = np.random.randn(100)
data['variable'] = nddata
lagmat = sm.tsa.lagmat(nddata,3,trim='Both', original='in')
lag_data = sm.tsa.add_lag(data, 'variable', lags=3, insert=True)
assert_equal(lagmat, lag_data.view((float,4)))
lag_data = sm.tsa.add_lag(data, 'variable', lags=3, insert=False)
assert_equal(lagmat, lag_data.view((float,4)))
lag_data = sm.tsa.add_lag(data, lags=3, insert=True)
assert_equal(lagmat, lag_data.view((float,4)))
def test_add_lag_1d_drop_struct():
data = np.zeros(100, dtype=[('variable',float)])
nddata = np.random.randn(100)
data['variable'] = nddata
lagmat = sm.tsa.lagmat(nddata,3,trim='Both')
lag_data = sm.tsa.add_lag(data, lags=3, drop=True)
assert_equal(lagmat, lag_data.view((float,3)))
def test_add_lag_drop_insert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:2],lagmat,nddata[3:,-1]))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3, drop=True)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_add_lag_drop_noinsert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,np.array([0,1,3])],lagmat))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3, insert=False, drop=True)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_freq_to_period():
from pandas.tseries.frequencies import to_offset
freqs = ['A', 'AS-MAR', 'Q', 'QS', 'QS-APR', 'W', 'W-MON', 'B']
expected = [1, 1, 4, 4, 4, 52, 52, 52]
for i, j in zip(freqs, expected):
assert_equal(tools.freq_to_period(i), j)
assert_equal(tools.freq_to_period(to_offset(i)), j)
if __name__ == '__main__':
#running them directly
# test_acf()
# test_ccf()
# test_pacf_yw()
# test_pacf_ols()
# test_ywcoef()
import nose
nose.runmodule()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
from contextlib import contextmanager
import logging
import traceback
import warnings
import json
import hashlib
import re
import copy
import functools
import luigi
from luigi import six
from luigi import parameter
from luigi.task_register import Register
from luigi.parameter import ParameterVisibility
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
TASK_ID_INCLUDE_PARAMS = 3
TASK_ID_TRUNCATE_PARAMS = 16
TASK_ID_TRUNCATE_HASH = 10
TASK_ID_INVALID_CHAR_REGEX = re.compile(r'[^A-Za-z0-9_]')
_SAME_AS_PYTHON_MODULE = '_same_as_python_module'
def namespace(namespace=None, scope=''):
"""
Call to set namespace of tasks declared after the call.
It is often desired to call this function with the keyword argument
``scope=__name__``.
The ``scope`` keyword makes it so that this call is only effective for task
classes with a matching [*]_ ``__module__``. The default value for
``scope`` is the empty string, which means all classes. Multiple calls with
the same scope simply replace each other.
The namespace of a :py:class:`Task` can also be changed by specifying the property
``task_namespace``.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
This explicit setting takes priority over whatever is set in the
``namespace()`` method, and it's also inherited through normal python
inheritence.
There's no equivalent way to set the ``task_family``.
*New since Luigi 2.6.0:* ``scope`` keyword argument.
.. [*] When there are multiple levels of matching module scopes like
``a.b`` vs ``a.b.c``, the more specific one (``a.b.c``) wins.
.. seealso:: The new and better scaling :py:func:`auto_namespace`
"""
Register._default_namespace_dict[scope] = namespace or ''
def auto_namespace(scope=''):
"""
Same as :py:func:`namespace`, but instead of a constant namespace, it will
be set to the ``__module__`` of the task class. This is desirable for these
reasons:
* Two tasks with the same name will not have conflicting task families
* It's more pythonic, as modules are Python's recommended way to
do namespacing.
* It's traceable. When you see the full name of a task, you can immediately
identify where it is defined.
We recommend calling this function from your package's outermost
``__init__.py`` file. The file contents could look like this:
.. code-block:: python
import luigi
luigi.auto_namespace(scope=__name__)
To reset an ``auto_namespace()`` call, you can use
``namespace(scope='my_scope')``. But this will not be
needed (and is also discouraged) if you use the ``scope`` kwarg.
*New since Luigi 2.6.0.*
"""
namespace(namespace=_SAME_AS_PYTHON_MODULE, scope=scope)
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and not
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Each :py:class:`~luigi.Parameter` of the Task should be declared as members:
.. code:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
second_param = luigi.Parameter()
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or worker-timeout value in config file
#: Only works when using multiple workers.
worker_timeout = None
#: Maximum number of tasks to run together as a batch. Infinite by default
max_batch_size = float('inf')
@property
def batchable(self):
"""
True if this instance can be run as part of a batch. By default, True
if it has any batched parameters
"""
return bool(self.batch_param_names())
@property
def retry_count(self):
"""
Override this positive integer to have different ``retry_count`` at task level
Check :ref:`scheduler-config`
"""
return None
@property
def disable_hard_timeout(self):
"""
Override this positive integer to have different ``disable_hard_timeout`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def disable_window_seconds(self):
"""
Override this positive integer to have different ``disable_window_seconds`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def owner_email(self):
'''
Override this to send out additional error emails to task owner, in addition to the one
defined in the global configuration. This should return a string or a list of strings. e.g.
'test@exmaple.com' or ['test1@example.com', 'test2@example.com']
'''
return None
def _owner_list(self):
"""
Turns the owner_email property into a list. This should not be overridden.
"""
owner_email = self.owner_email
if owner_email is None:
return []
elif isinstance(owner_email, six.string_types):
return owner_email.split(',')
else:
return owner_email
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def accepts_messages(self):
"""
For configuring which scheduler messages can be received. When falsy, this tasks does not
accept any message. When True, all messages are accepted.
"""
return False
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
_visible_in_registry = True # TODO: Consider using in luigi.util as well
__not_user_specified = '__not_user_specified'
# This is here just to help pylint, the Register metaclass will always set
# this value anyway.
_namespace_at_class_time = None
task_namespace = __not_user_specified
"""
This value can be overriden to set the namespace that will be used.
(See :ref:`Task.namespaces_famlies_and_ids`)
If it's not specified and you try to read this value anyway, it will return
garbage. Please use :py:meth:`get_task_namespace` to read the namespace.
Note that setting this value with ``@property`` will not work, because this
is a class level value.
"""
@classmethod
def get_task_namespace(cls):
"""
The task family for the given class.
Note: You normally don't want to override this.
"""
if cls.task_namespace != cls.__not_user_specified:
return cls.task_namespace
elif cls._namespace_at_class_time == _SAME_AS_PYTHON_MODULE:
return cls.__module__
return cls._namespace_at_class_time
@property
def task_family(self):
"""
DEPRECATED since after 2.4.0. See :py:meth:`get_task_family` instead.
Hopefully there will be less meta magic in Luigi.
Convenience method since a property on the metaclass isn't directly
accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_task_family(cls):
"""
The task family for the given class.
If ``task_namespace`` is not set, then it's simply the name of the
class. Otherwise, ``<task_namespace>.`` is prefixed to the class name.
Note: You normally don't want to override this.
"""
if not cls.get_task_namespace():
return cls.__name__
else:
return "{}.{}".format(cls.get_task_namespace(), cls.__name__)
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1]._counter)
return params
@classmethod
def batch_param_names(cls):
return [name for name, p in cls.get_params() if p._is_batchable()]
@classmethod
def get_param_names(cls, include_significant=False):
return [name for name, p in cls.get_params() if include_significant or p.significant]
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_family = cls.get_task_family()
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_family, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = param_obj.normalize(arg)
# Then the keyword arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = params_dict[param_name].normalize(arg)
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_family, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_family, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register kwargs as an attribute on the class. Might be useful
self.param_kwargs = dict(param_values)
self._warn_on_wrong_param_types()
self.task_id = task_id_str(self.get_task_family(), self.to_str_params(only_significant=True, only_public=True))
self.__hash = hash(self.task_id)
self.set_tracking_url = None
self.set_status_message = None
self.set_progress_percentage = None
@property
def param_args(self):
warnings.warn("Use of param_args has been deprecated.", DeprecationWarning)
return tuple(self.param_kwargs[k] for k, v in self.get_params())
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
def _warn_on_wrong_param_types(self):
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
params[param_name]._warn_on_wrong_param_type(param_name, param_value)
@classmethod
def from_str_params(cls, params_str):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value as string.
"""
kwargs = {}
for param_name, param in cls.get_params():
if param_name in params_str:
param_str = params_str[param_name]
if isinstance(param_str, list):
kwargs[param_name] = param._parse_list(param_str)
else:
kwargs[param_name] = param.parse(param_str)
return cls(**kwargs)
def to_str_params(self, only_significant=False, only_public=False):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if (((not only_significant) or params[param_name].significant)
and ((not only_public) or params[param_name].visibility == ParameterVisibility.PUBLIC)
and params[param_name].visibility != ParameterVisibility.PRIVATE):
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def _get_param_visibilities(self):
param_visibilities = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if params[param_name].visibility != ParameterVisibility.PRIVATE:
param_visibilities[param_name] = params[param_name].visibility.serialize()
return param_visibilities
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in kwargs:
new_k[param_name] = kwargs[param_name]
elif hasattr(self, param_name):
new_k[param_name] = getattr(self, param_name)
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5')`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.get_task_family(), ', '.join(repr_parts))
return task_str
def __eq__(self, other):
return self.__class__ == other.__class__ and self.task_id == other.task_id
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exist.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
The returned value of this method is json encoded and sent to the scheduler
as the `expl` argument. Its string representation will be used as the
body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
@contextmanager
def no_unpicklable_properties(self):
"""
Remove unpicklable properties before dump task and resume them after.
This method could be called in subtask's dump method, to ensure unpicklable
properties won't break dump.
This method is a context-manager which can be called as below:
.. code-block: python
class DummyTask(luigi):
def _dump(self):
with self.no_unpicklable_properties():
pickle.dumps(self)
"""
unpicklable_properties = tuple(luigi.worker.TaskProcess.forward_reporter_attributes.values())
reserved_properties = {}
for property_name in unpicklable_properties:
if hasattr(self, property_name):
reserved_properties[property_name] = getattr(self, property_name)
setattr(self, property_name, 'placeholder_during_pickling')
yield
for property_name, value in six.iteritems(reserved_properties):
setattr(self, property_name, value)
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
generated_tuples = []
for parameter_tuple in parameter_tuples:
if isinstance(parameter_tuple, (list, tuple)):
if cls(*parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
elif isinstance(parameter_tuple, dict):
if cls(**parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
else:
if cls(parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
return generated_tuples
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = None
def externalize(taskclass_or_taskobject):
"""
Returns an externalized version of a Task. You may both pass an
instantiated task object or a task class. Some examples:
.. code-block:: python
class RequiringTask(luigi.Task):
def requires(self):
task_object = self.clone(MyTask)
return externalize(task_object)
...
Here's mostly equivalent code, but ``externalize`` is applied to a task
class instead.
.. code-block:: python
@luigi.util.requires(externalize(MyTask))
class RequiringTask(luigi.Task):
pass
...
Of course, it may also be used directly on classes and objects (for example
for reexporting or other usage).
.. code-block:: python
MyTask = externalize(MyTask)
my_task_2 = externalize(MyTask2(param='foo'))
If you however want a task class to be external from the beginning, you're
better off inheriting :py:class:`ExternalTask` rather than :py:class:`Task`.
This function tries to be side-effect free by creating a copy of the class
or the object passed in and then modify that object. In particular this
code shouldn't do anything.
.. code-block:: python
externalize(MyTask) # BAD: This does nothing (as after luigi 2.4.0)
"""
# Seems like with python < 3.3 copy.copy can't copy classes
# and objects with specified metaclass http://bugs.python.org/issue11480
compatible_copy = copy.copy if six.PY3 else copy.deepcopy
copied_value = compatible_copy(taskclass_or_taskobject)
if copied_value is taskclass_or_taskobject:
# Assume it's a class
clazz = taskclass_or_taskobject
@_task_wraps(clazz)
class _CopyOfClass(clazz):
# How to copy a class: http://stackoverflow.com/a/9541120/621449
_visible_in_registry = False
_CopyOfClass.run = None
return _CopyOfClass
else:
# We assume it's an object
copied_value.run = None
return copied_value
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""
Class for configuration. See :ref:`ConfigClasses`.
"""
# TODO: let's refactor Task & Config so that it inherits from a common
# ParamContainer base class
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct))
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else:
# Remaining case: assume struct is iterable...
try:
return [getpaths(r) for r in struct]
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
def _task_wraps(task_class):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
assigned = functools.WRAPPER_ASSIGNMENTS + ('_namespace_at_class_time',)
return functools.wraps(task_class, assigned=assigned, updated=[])
|
|
import os, sys
import json
import time
import csv
try:
import http.client as client
import urllib.parse as parse
from urllib.request import urlopen as urlopen
from urllib.request import Request as request
from urllib.parse import urlencode as encode
# py2
except ImportError:
import httplib as client
from urllib2 import urlparse as parse
from urllib2 import urlopen as urlopen
from urllib2 import Request as request
from urllib import urlencode as encode
unicode = str
#Force connections to the previous HTTP 1.0 standard as this is faster with chunked responses, which is typically what you'll get
client.HTTPConnection._http_vsn= 10
client.HTTPConnection._http_vsn_str='HTTP/1.0'
licenseLookup = {"A" : 'desktopAdvN',
"S" : 'desktopStdN',
"B" : 'desktopBasicN',
"1" : 'spatialAnalystN',
"2" : '3DAnalystN',
"3" : 'networkAnalystN',
"4" : 'geostatAnalystN',
"5" : 'dataReviewerN',
"6" : 'workflowMgrN',
"7" : 'dataInteropN',
"8" : 'smpNAmericaN',
"9" : 'smpEuropeN'}
class ARCGIScom(object):
def __init__(self, username, password, orgURL):
now = time.time()
self.username = username
self.password = password
if "/sharing/rest" not in orgURL: orgURL += "/sharing/rest"
self.ORGURL = orgURL
self.LocalPortal = True if '7443' in orgURL else False #TO-DO, may need to revist this logic
self.token, self.expires, self.referer = self.getToken(username, password, orgURL)
self.OrgTitle, self.PortalURL = self.getPortalInfo()
self.adminEmail, self.adminFName, self.adminLName, self.orgID = self.getAdminUserInfo()
self.IdentityStoreType = self.getIdentityStore() if self.LocalPortal else "arcgiscom"
self.orgEntitlements, self.orgProvision, self.ProID = self.getOrgProvision()
self.LicensedUser, self.AllUser = self.getUserLicenseInfo()
self.LicensedUserCount = len(self.LicensedUser['userEntitlements'])
self.AllUserCount = len(self.AllUser['users'])
self.license = self.setLicenseCounts()
print("<<<TOTAL TIME to make requests: {0}>>>\n".format(time.time()-now))
def getToken(self, username, password, orgURL, exp=60): #expires in 60minutes
"""Generates a token."""
#TOKENURL = "https://www.arcgis.com/sharing/rest/generateToken"
if "https" not in orgURL.lower():
self.ORGURL = orgURL.replace("http", "https")
TOKENURL = self.ORGURL + "/generateToken"
if "arcgis.com" in orgURL: referer = "http://www.arcgis.com/"
else:
referer = orgURL[:orgURL.rfind(":",2)] #eg. 'https://server.domain.com'
tokenParams = {'username' : username,
'password' : password,
'client' : 'referer',
'referer' : referer,
'f' : 'json' }
token = sendReq(TOKENURL, tokenParams)
if "token" not in token:
print(token['error'])
sys.exit()
else:
return token['token'], token['expires'], referer
def getOrgProvision(self):
"""Get basic information about the org from the user assigned to it"""
orgURL = self.ORGURL + '/portals/self/purchases'
orgReqParms = {'token': self.token,
'f' :'json' }
orgData = sendReq(orgURL, orgReqParms)
try:
purchaseNum = len(orgData['purchases'])
except: #If user is not an admin it will error, display that error msg.
print ("You do not appear to be an admin for any Orgs. Goodbye.")
sys.exit()
if purchaseNum < 1:
print("User does not appear to have any valid licenses. Goodbye")
sys.exit()
else:
for i, val in enumerate(orgData['purchases']):
try:
if val['listing']['title'] == 'ArcGIS Pro':
orgEntitlment = val['provision']['orgEntitlements']
orgProvision = val['provision']
proID = val['provision']['itemId']
return orgEntitlment, orgProvision, proID
except:
continue
def getAdminUserInfo(self):
"""Collect details on the admin user"""
orgURL = self.ORGURL + '/community/self'
orgReqParms = {'token': self.token,
'f' :'json' }
orgData = sendReq(orgURL, orgReqParms)
try:
firstName = orgData['firstName']
lastName = orgData['lastName']
except KeyError:
firstName = ' '
lastName = ' '
return orgData['email'], firstName, lastName, orgData['orgId']
def getPortalInfo(self):
"""Portal name and URL"""
orgURL = self.ORGURL + '/portals/self'
portalParms = {'token': self.token,
'f' :'json' }
#There is a lot of data in here, but we only want the org title.
portalData = sendReq(orgURL, portalParms)
try:
portalURL = "http://" + portalData['urlKey'] + "." + portalData['customBaseUrl']
except KeyError:
portalURL = "https://" + portalData['portalLocalHostname']
return portalData['name'], portalURL
def getUserLicenseInfo(self):
"""Get active users with license assignments"""
licensedUserDict = {'token': self.token,
'f' :'json' }
actURL = self.ORGURL + '/content/listings/{0}/userEntitlements'.format(self.ProID)
self.LicensedUser = sendReq(actURL, licensedUserDict)
#Get all users, 20 at a time, regardless of licensed or not
userCnt = 0
totalUsers = 1
start = 1
self.AllUser = {'users':[]}
allURL = self.ORGURL + '/portals/self/users'
while (userCnt < totalUsers) or (start != -1):
allUsersDict = {'start' : start,
'sortOrder' : 'asc',
'num' : 20,
'token': self.token,
'f' :'json' }
userResp = sendReq(allURL, allUsersDict)
totalUsers = userResp['total']
userCnt += len(userResp['users'])
start = userResp['nextStart']
self.AllUser['users'] += userResp['users']
#Mash the 2 dicts together
reset = True
for i, allUsersVal in enumerate(self.AllUser['users']):
user = ''
for j, entitledUsersVal in enumerate(self.LicensedUser['userEntitlements']):
reset = False
if allUsersVal['username'] == entitledUsersVal['username']:
user = allUsersVal['username']
allUsersVal.update(entitledUsersVal)
reset = True
break
if reset == True:
continue
return self.LicensedUser, self.AllUser
def setLicenseCounts(self):
"""Keep track of number of used licenses"""
#key: ['Nice name', Entitled to #, Number Used]
self.license = {'desktopAdvN':["Advanced", 0, 0],
'desktopStdN':["Standard", 0, 0],
'desktopBasicN':["Basic", 0, 0],
'spatialAnalystN':["Spatial Statistics", 0, 0],
'geostatAnalystN':["GeoStatistical Analyst", 0, 0],
'workflowMgrN':["Workflow Manager", 0, 0],
'networkAnalystN':["Network Analyst", 0, 0],
'dataReviewerN':["Data Reviewer", 0, 0],
'3DAnalystN':["3D Analyst", 0, 0],
'dataInteropN':["Data Interop", 0, 0],
'smpNAmericaN':["St Map NA", 0, 0],
'smpEuropeN':["St Map Euro", 0, 0]
}
#Populate how many license each product has
for k,v in self.orgEntitlements['entitlements'].items():
self.license[k][1] = v['num']
for i, allUserVal in enumerate(self.AllUser['users']):
for k, v in allUserVal.items():
if k == 'entitlements':
for i in range(0, len(v)):
self.license[v[i]][2] +=1
return self.license
def getIdentityStore(self):
"""Examine what type of identity store is being used if local portal"""
URL = self.PortalURL + ":7443/arcgis/portaladmin/security/config"
if "https" not in URL:
URL = URL.replace("http", "https")
# This particular end point needs a GET request, not POST
URL += "?f=json&token={0}".format(self.token)
identityRes = sendReq(URL)
if len(identityRes) == 0:
return "builtin"
else:
#WINDOWS or LDAP
return identityRes['userStoreConfig']['type']
#### - Local portal only functions - ####
def releaseProLicense(self, user):
"""Release an offline license from the LOCAL portal"""
URL = self.PortalURL + ":7443/arcgis/portaladmin/system/licenses/releaseLicense"
releaseDict = {'token': self.token,
'f' :'json',
'username': user}
releaseRes = sendReq(URL, releaseDict)
return releaseRes
def getUsersByGroup(self, group):
"""Return all users part of a found group for a local portal with a windows/ldap security store"""
URL = self.PortalURL + ":7443/arcgis/portaladmin/security/groups/getUsersWithinEnterpriseGroup"
searchGroupDict = {'token': self.token,
'f' :'json',
'groupName': group}
EusersInGroup = sendReq(URL, searchGroupDict)
return EusersInGroup
def searchEUsers(self, userName):
"""Search for a user from a local portal that is using windows/ldap security store"""
URL = self.PortalURL + ":7443/arcgis/portaladmin/security/users/searchEnterpriseUsers"
searchEUserDict = {'token': self.token,
'f' :'json',
'filter': userName}
eUserList = sendReq(URL, searchEUserDict)
return eUserList
def createUserLocalPortal(self, userPayload):
"""Add a local, enterprise windows/ldap user to the portal"""
URL = self.PortalURL + ":7443/arcgis/portaladmin/security/users/createUser"
header = { 'Referer' : self.referer}
newUsers = []
for i, userVal in enumerate(userPayload['users']):
userVal['token'] = self.token
userVal['f'] = 'json'
userVal['role'] = 'org_user'
userVal['provider'] = 'enterprise'
try:
userVal['email']
except KeyError as e:
print("{0} key not found from group search.".format(e))
if e == "email":
print("Ensure the identity store is setup with 'mail', not 'email':")
print(" \"userEmailAttribute\": \"mail\" ")
return 0
createUserRes = sendReq(URL, userVal, header)
#If user was added, collect that name, but change to the NAME@DOMAIN style so we can license
if 'status' in createUserRes:
if createUserRes['status'] == 'success':
if "\\" in userVal['username']:
username = userVal['username'][userVal['username'].find("\\"):].strip("\\") +"@"+ userVal['username'][:userVal['username'].find("\\")]
else:
username =userVal['username']
newUsers.append(username)
else:
print(createUserRes)
return newUsers
#### //END - Local portal only functions - ####
#### ArcGIS.com Portal functions - ####
def listActiveUsers(self):
"""Return all user information as JSON"""
#Currently not used anywhere
activeUserDict = {'token': self.token,
'f' :'json' }
URL = self.ORGURL + '/content/listings/{0}/userEntitlements'.format(self.ProID)
print("\nQuerying for active users, this may take a few seconds....\n")
activeUsers = sendReq(URL, activeUserDict)
return activeUsers
def checkUser(self, username):
"""Check that a username is available and can be created in the Portal"""
URL = self.ORGURL + "/community/checkUsernames"
checkUserDict = {'token': self.token,
'f': 'json',
'usernames': username}
checkUserRes = sendReq(URL, checkUserDict)
if "usernames" in checkUserRes:
if checkUserRes['usernames'][0]['suggested'] == checkUserRes['usernames'][0]['requested']:
return username
else:
#print("Requested user name {0} is unavailable. Using {1}\n".format(username, checkUserRes['usernames'][0]['suggested']))
return checkUserRes['usernames'][0]['suggested']
else:
print("Error checking username: {0}\n".format(username))
print(checkUserRes)
return "" #Return an empty string, logic on the other side needs to catch the error
def createUser(self, userPayload):
''' Create a user account with password
Accepts a list of lists. Lists must be properly order:
username, password, firstname, lastname, fullname, email, role
Note: This is the only function that accepts a list. Everything else is singular.
'''
create = False
if "Password" in userPayload[0]:
create = True
print("Passwords found: >creating<, not inviting users by email.")
URL = self.ORGURL + "/portals/self/invite"
newUsers = []
for i in range(0, len(userPayload)):
newUserDict = {'token': self.token,
'f' :'json',
'invitationList': {"invitations":[
{"username":userPayload[i]['Username'],
#"password": "password123",
"firstname":userPayload[i]['First Name'],
"lastname":userPayload[i]['Last Name'],
"fullname":userPayload[i]['First Name'] +" "+ userPayload[i]['Last Name'],
"email":userPayload[i]['Email'],
"role":userPayload[i]['Role']}]},
'subject':'An invitation to join an ArcGIS Online Organization, {0}. DO NOT REPLY'.format(self.OrgTitle),
'html': '<html><body><p>{0} {1} has invited you to join an ArcGIS Online Organization, {2}.</p><p> \
Please click this link to finish setting up your account and establish your password:\
<a href="https://www.arcgis.com/home/newuser.html?invitation=@@invitation.id@@">\
https://www.arcgis.com/home/newuser.html?invitation=@@invitation.id@@</a></p><p>\
Note that your account has already been created for you with the username, <strong>@@touser.username@@</strong>\
and that usernames are case sensitive. </p><p>If you have difficulty signing in, please contact your \
administrator {1} {2} ({3}). Be sure to include a description of the problem, the error message, and a \
screenshot.</p><p>For your reference, you can access the home page of the organization here: \
<br>{4}</p><p>This link will expire in two weeks.</p><p style="color:gray;">\
This is an automated email, please do not reply.</p></body></html>'.format(self.OrgTitle, self.adminFName, self.adminLName, self.adminEmail, self.PortalURL)
}
#If a password exists, we're creating users, not inviting them: add password into the payload.
if create:
newUserDict['invitationList']['invitations'][0]['password'] = userPayload[i]['Password']
newUserDict['subject'] = "Some place holder text."
inviteRes = sendReq(URL, newUserDict)
if 'success' in inviteRes:
if inviteRes['success'] == True:
if inviteRes['notInvited']: pass
else:
newUsers.append(userPayload[i]['Username'])
else:
print(inviteRes)
return newUsers
def deleteUser(self, user):
'''Deletes the user from the portal.
BE CAREFUL, THERE IS NO UNDO!'''
URL = self.ORGURL + "/community/users/{0}/delete".format(user)
deleteUserDict = {'token': self.token,
'f' :'json'}
deleteRes = sendReq(URL, deleteUserDict)
return deleteRes
def assignProPermissions(self, user, entitlements):
'''User is a simple string, entitlements need to be a list'''
#URL = CON.PortalURL + "/sharing/rest/content/listings/{0}/provisionUserEntitlements".format(CON.ProID)
URL = self.ORGURL + "/content/listings/{0}/provisionUserEntitlements".format(self.ProID)
permissionDict = {'token': self.token,
'f' :'json',
'userEntitlements': {'users':[user],
'entitlements':entitlements }
}
assignPermRes = sendReq(URL, permissionDict)
return assignPermRes
def createGroup(self, title, description, snippet, tags):
'''Create an inivte only group inside ArcGIS.com. This group creation is designed to help manage accounts for license
management. Groups can be manually modified after creation to perform other functions.
'''
URL = self.ORGURL + "/community/createGroup"
createDict = {'token': self.token,
'f' :'json',
'title': title,
'access': 'org',
'description': description,
'snippet': snippet,
'tags': tags,
'isViewOnly': 'true',
'isInvitationOnly': 'true',
'sortField': 'avgrating',
'sortOrder': 'desc'
}
createGroupRes = sendReq(URL, createDict)
return createGroupRes
def searchGroups(self):
'''Find all groups matching a criteria. Function will continue to loop and return all groups as JSON, thus this
function only needs to be called once.
'''
URL = self.ORGURL + "/community/groups"
start = 1
groups = []
while start >=1:
searchDict = {'token': self.token,
'f':'json',
'q': 'orgid:' + self.orgID,
'start': start,
'num': 50,
'sortField': 'title'
}
createGroupRes = sendReq(URL, searchDict)
start = createGroupRes['nextStart']
groups += createGroupRes['results']
return groups
def addUserToGroup(self, groupID, users):
'''Assign 1 or more users to an existing group.'''
URL = self.ORGURL + "/community/groups/{0}/addUsers".format(groupID)
if type(users) == list:
users = ','.join(users)
addUsersDict = {'token': self.token,
'f':'json',
'users': users
}
addedUserResp = sendReq(URL, addUsersDict)
return addedUserResp
#### //END ArcGIS.com Portal functions - ####
# Helper functions
def sendReq(url, qDict=None, headers=None):
"""Robust request maker"""
#Need to handle chunked response / incomplete reads. 2 solutions here: http://stackoverflow.com/questions/14442222/how-to-handle-incompleteread-in-python
#This function sends a request and handles incomplete reads. However its found to be very slow. It adds 30 seconds to chunked
#responses. Forcing the connection to HTTP 10 (1.0) at the top, for some reason makes it faster.
#qData = parse.urlencode(qDict).encode('UTF-8') if qDict else None
qData = encode(qDict).encode('UTF-8') if qDict else None
reqObj = request(url)
if headers != None:
for k, v in headers.items():
reqObj.add_header(k, v)
try:
if qDict == None: #GET
r = urlopen(reqObj)
else: #POST
r = urlopen(reqObj, qData)
responseJSON=""
while True:
try:
responseJSONpart = r.read()
except client.IncompleteRead as icread:
responseJSON = responseJSON + icread.partial.decode('utf-8')
continue
else:
responseJSON = responseJSON + responseJSONpart.decode('utf-8')
break
return (json.loads(responseJSON))
except Exception as RESTex:
print("Exception occurred making REST call: " + RESTex.__str__())
def convertTime(intime, mins=True):
'''Convert epoch time to human readable'''
if intime == -1 or intime == "":
return intime
elif mins:
return time.strftime('%m-%d-%y %H:%M', time.localtime(intime/1000))
else:
return time.strftime('%m-%d-%y', time.localtime(intime/1000))
def readUserCSV(userCSV):
'''Reads a CSV file into a dictionary'''
if not os.path.isfile(userCSV):
print("CSV file not found. Check the path.")
return None
else:
userDict = {}
i=0
with open(userCSV, 'r') as f:
reader = csv.DictReader( f )
for line in reader:
userDict[i] = line
i+=1
return userDict
def createCSV(location, password='y'):
'''Creates a basic CSV to serve as a template.
Note: No checking is done here, the function expects a filepath which is ok'''
licenses= ','.join(list(licenseLookup.values()))
template = {"First Name":"joe",
"Last Name":'smith',
"Email":"joe@email.com",
"Username":"joeUser1234",
"Password":"joePassword1234",
"Role":"account_user",
"License":licenses
}
if password == 'n':
template.pop('Password')
try:
with open(location, 'w', newline='') as f:
w = csv.DictWriter(f, template.keys())
w.writeheader()
w.writerow(template)
return 0
except:
return 1
def exportUsersToCSV(location, users):
"""Exports all users from self.UserInfo to a CSV file """
# If the first record, which we get headers from doesnt have 'disconnectedInfo', add the key
try:
users['users'][0]['disconnectedInfo']
except KeyError:
users['users'][0]['disconnectedInfo'] = {'disconnectedSince':''}
try:
with open(location, 'w', newline='') as f:
w = csv.DictWriter(f, users['users'][0].keys())
w.writeheader()
for i, val in enumerate(users['users']):
w.writerow(val)
return 0
except:
return 1
# //End helper functions
if __name__ == "__main__":
print("Not to be called directly. Goodbye.")
|
|
from .common import random_str, check_subject_in_rb
from rancher import ApiError
from .conftest import wait_until, wait_for, DEFAULT_TIMEOUT
import pytest
import time
import kubernetes
rb_resource = 'rolebinding'
def test_create_cluster_template_with_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
_ = \
create_cluster_template_revision(admin_mc.client, templateId)
_ = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
template_reloaded = client.by_id_cluster_template(cluster_template.id)
assert template_reloaded.links.revisions is not None
def test_create_template_revision_k8s_translation(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"kubernetesVersion": "1.15"
}
}
with pytest.raises(ApiError) as e:
client.create_cluster_template_revision(clusterConfig=cconfig,
clusterTemplateId=tId,
enabled="true")
assert e.value.error.status == 422
# template k8s question needed if using generic version
cconfig = {
"rancherKubernetesEngineConfig": {
"kubernetesVersion": "1.15.x"
}
}
questions = [{
"variable": "dockerRootDir",
"required": "false",
"type": "string",
"default": "/var/lib/docker"
}]
with pytest.raises(ApiError) as e:
client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
assert e.value.error.status == 422
def test_default_pod_sec(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
enabled="true")
time.sleep(2)
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.defaultPodSecurityPolicyTemplateId == "restricted"
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_check_default_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
first_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
wait_for_default_revision(client, templateId, first_revision.id)
# delete the cluster template revision, it should error out
with pytest.raises(ApiError) as e:
client.delete(first_revision)
assert e.value.error.status == 403
def test_create_cluster_with_template(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
template_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
answers = {
"values": {
"dockerRootDir": "/var/lib/docker123",
"rancherKubernetesEngineConfig.ignoreDockerVersion":
"false"
}
}
revId = template_revision.id
client = admin_mc.client
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=revId,
description="template from cluster",
answers=answers)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.questions is not None
k8s_version = cluster.rancherKubernetesEngineConfig.kubernetesVersion
assert k8s_version != "v1.15.x"
# edit cluster should not fail
client.update(cluster, name=random_str(), clusterTemplateRevisionId=revId)
# edit cluster to remove template must fail
with pytest.raises(ApiError) as e:
client.update(cluster, name=random_str(), clusterTemplateId=None,
clusterTemplateRevisionId=None)
assert e.value.error.status == 422
# delete the cluster template, it should error out
with pytest.raises(ApiError) as e:
client.delete(cluster_template)
assert e.value.error.status == 422
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_create_cluster_validations(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
template_revision = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
revId = template_revision.id
client = admin_mc.client
rConfig = getRKEConfig()
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=revId,
description="template from cluster",
rancherKubernetesEngineConfig=rConfig)
except ApiError as e:
assert e.error.status == 500
@pytest.mark.nonparallel
def test_create_cluster_template_with_members(admin_mc, remove_resource,
user_factory):
client = admin_mc.client
user_member = user_factory()
remove_resource(user_member)
user_not_member = user_factory()
remove_resource(user_not_member)
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
time.sleep(30)
# check who has access to the cluster template
# admin and user_member should be able to list it
id = cluster_template.id
ct = client.by_id_cluster_template(id)
assert ct is not None
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_member.user.id, rb_name),
timeout=60,
fail_handler=lambda: "failed to check rolebinding")
um_client = user_member.client
ct = um_client.by_id_cluster_template(id)
assert ct is not None
# user not added as member shouldn't be able to access
unm_client = user_not_member.client
try:
unm_client.by_id_cluster_template(id)
except ApiError as e:
assert e.error.status == 403
# add * as member to share with all
new_members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}, {"groupPrincipalId": "*",
"accessType": "read-only"}]
client.update(ct, members=new_members)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
'system:authenticated', rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
time.sleep(30)
ct = user_not_member.client.by_id_cluster_template(id)
assert ct is not None
def test_creation_standard_user(admin_mc, remove_resource, user_factory):
user_member = user_factory()
remove_resource(user_member)
um_client = user_member.client
with pytest.raises(ApiError) as e:
um_client.create_cluster_template(name="user template",
description="user template")
assert e.value.error.status == 403
@pytest.mark.nonparallel
def test_check_enforcement(admin_mc, remove_resource,
list_remove_resource, user_factory):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
client = admin_mc.client
# turn on the enforcement
client.update_by_id_setting(id='cluster-template-enforcement',
value="true")
# a globaladmin can create a rke cluster without a template
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_list.insert(0, cluster)
# a user cannot create an rke cluster without template
user = user_factory()
remove_resource(user)
crtb_owner = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userId=user.user.id)
remove_resource(crtb_owner)
wait_until(rtb_cb(client, crtb_owner))
user_client = user.client
with pytest.raises(ApiError) as e:
user_client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
assert e.value.error.status == 422
# a user can create a non-rke cluster without template
cluster2 = user_client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_list.insert(0, cluster2)
# a user can create an rke cluster with a public template
template_reloaded = client.by_id_cluster_template(templateId)
new_members = [{"groupPrincipalId": "*", "accessType": "read-only"}]
client.update(template_reloaded, members=new_members)
cluster3 = wait_for_cluster_create(user_client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster3)
client.update_by_id_setting(id='cluster-template-enforcement',
value="false")
def test_revision_creation_permission(admin_mc, remove_resource,
user_factory):
user_readonly = user_factory()
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_readonly.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
templateId = cluster_template.id
# user with accessType=owner should be able to create revision
# since a standard user can add revisions to template shared
# with owner access
create_cluster_template_revision(user_owner.client, templateId)
# user with read-only accessType should get Forbidden error
with pytest.raises(ApiError) as e:
create_cluster_template_revision(user_readonly.client, templateId)
assert e.value.error.status == 403
def test_updated_members_revision_access(admin_mc, remove_resource,
user_factory):
# create cluster template without members and a revision
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
# update template to add a user as member
user_member = user_factory()
members = [{"userPrincipalId": "local://" + user_member.user.id,
"accessType": "read-only"}]
admin_mc.client.update(cluster_template, members=members)
# this member should get access to existing revision "rev"
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = rev.id.split(":")
name = split[1]
rb_name = name + "-ctr-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_member.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
revision = user_member.client.by_id_cluster_template_revision(rev.id)
assert revision is not None
# remove this user from cluster_template members list
admin_mc.client.update(cluster_template, members=[])
# now this user should not be able to see that revision
try:
user_member.client.by_id_cluster_template_revision(rev.id)
except ApiError as e:
assert e.error.status == 403
def test_permissions_removed_on_downgrading_access(admin_mc, remove_resource,
user_factory):
user_owner = user_factory()
remove_resource(user_owner)
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
# create cluster template with one member having "member" accessType
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
# user with accessType=owner should be able to update template
# so adding new member by the user_member should be allowed
new_member = user_factory()
remove_resource(new_member)
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"},
{"userPrincipalId": "local://" + new_member.user.id,
"accessType": "read-only"}]
user_owner.client.update(cluster_template, members=members)
# now change user_owner's accessType to read-only
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + new_member.user.id,
"accessType": "read-only"}]
admin_mc.client.update(cluster_template, members=members)
rb_name = name + "-ct-r"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
# user_owner should not be allowed to update cluster template now
# test updating members field by removing new_member
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "read-only"}]
try:
user_owner.client.update(cluster_template, members=members)
except ApiError as e:
assert e.error.status == 403
def test_required_template_question(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"variable":
"rancherKubernetesEngineConfig.ignoreDockerVersion",
"required": "false",
"type": "boolean",
"default": "true"
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template with no answer should fail
answers = {
"values": {
"rancherKubernetesEngineConfig.ignoreDockerVersion":
"false"
}
}
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster",
answers=answers)
except ApiError as e:
assert e.error.status == 422
def test_secret_template_answers(admin_mc, remove_resource,
list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
azureClientId = "rancherKubernetesEngineConfig.cloudProvider.\
azureCloudProvider.aadClientId"
azureClientSecret = "rancherKubernetesEngineConfig.cloudProvider.\
azureCloudProvider.aadClientSecret"
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": azureClientId,
"required": "true",
"type": "string",
"default": "abcdClientId"
},
{
"variable": azureClientSecret,
"required": "true",
"type": "string",
"default": ""
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template
answers = {
"values": {
"dockerRootDir": "/var/lib/docker123",
azureClientId: "abcdClientId",
azureClientSecret: "abcdClientSecret"
}
}
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster",
answers=answers)
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.answers.values[azureClientId] is not None
assert azureClientSecret not in cluster.answers.values
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_member_accesstype_check(admin_mc, user_factory, remove_resource):
client = admin_mc.client
user_readonly = user_factory()
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "member"}]
# creation with a member with accessType "member" shouldn't be allowed
try:
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
except ApiError as e:
assert e.error.status == 422
members = [{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_resource(cluster_template)
updated_members = \
[{"userPrincipalId": "local://" + user_readonly.user.id,
"accessType": "read-only"},
{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "member"}]
# updating a cluster template to add user with access type "member"
# shouldn't be allowed
try:
client.update(cluster_template, members=updated_members)
except ApiError as e:
assert e.error.status == 422
def test_create_cluster_with_invalid_revision(admin_mc, remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
tId = cluster_template.id
client = admin_mc.client
# templaterevision with question with invalid format
cconfig = {
"rancherKubernetesEngineConfig": {
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
}
}
},
"defaultPodSecurityPolicyTemplateId": "restricted",
}
questions = [{
"variable": "dockerRootDir",
"required": "true",
"type": "string",
"default": ""
},
{
"default": "map[enabled:true type:localClusterAuthEndpoint]",
"required": "false",
"type": "string",
"variable": "localClusterAuthEndpoint"
}]
rev = client.create_cluster_template_revision(name=random_str(),
clusterConfig=cconfig,
clusterTemplateId=tId,
questions=questions,
enabled="true")
# creating a cluster with this template
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
except ApiError as e:
assert e.error.status == 422
def test_disable_template_revision(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
tId = cluster_template.id
client = admin_mc.client
rev = \
create_cluster_template_revision(admin_mc.client, tId)
# creating a cluster with this template
cluster = wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# disable the revision
client.action(obj=rev, action_name="disable")
try:
wait_for_cluster_create(client, name=random_str(),
clusterTemplateRevisionId=rev.id)
except ApiError as e:
assert e.error.status == 500
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def test_template_delete_by_members(admin_mc, remove_resource,
list_remove_resource, user_factory):
user_owner = user_factory()
members = [{"userPrincipalId": "local://" + user_owner.user.id,
"accessType": "owner"}]
cluster_template = create_cluster_template(admin_mc, members, admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
split = cluster_template.id.split(":")
name = split[1]
rb_name = name + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
user_owner.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
templateId = cluster_template.id
rev = create_cluster_template_revision(user_owner.client, templateId)
cluster = wait_for_cluster_create(admin_mc.client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
# user with accessType=owner should not be able to delete this
# template since a cluster exists
wait_for_clusterTemplate_update_failure(admin_mc.client, rev)
with pytest.raises(ApiError) as e:
user_owner.client.delete(cluster_template)
assert e.value.error.status == 422
admin_mc.client.delete(cluster)
wait_for_cluster_to_be_deleted(admin_mc.client, cluster.id)
def test_template_access(admin_mc, remove_resource, user_factory):
user = user_factory()
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_resource(cluster_template)
templateId = cluster_template.id
rev = create_cluster_template_revision(admin_mc.client, templateId)
wait_for_clusterTemplate_list_failure(user.client, rev)
with pytest.raises(ApiError) as e:
user.client.create_cluster(name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
assert e.value.error.status == 404
def test_save_as_template_action(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc, [], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = create_cluster_template_revision(admin_mc.client, templateId)
cluster = wait_for_cluster_create(admin_mc.client, name=random_str(),
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate", )
except AttributeError as e:
assert e is not None
def test_cluster_desc_update(admin_mc, list_remove_resource):
cluster_template = create_cluster_template(admin_mc,
[], admin_mc)
remove_list = [cluster_template]
list_remove_resource(remove_list)
templateId = cluster_template.id
rev = \
create_cluster_template_revision(admin_mc.client, templateId)
# create a cluster with this template
client = admin_mc.client
cname = random_str()
cluster = wait_for_cluster_create(admin_mc.client, name=cname,
clusterTemplateRevisionId=rev.id,
description="template from cluster")
remove_list.insert(0, cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.description == 'template from cluster'
# edit cluster description
updatedC = client.update(cluster, name=cname,
clusterTemplateRevisionId=rev.id,
description="updated desc")
assert updatedC.description == 'updated desc'
client.delete(cluster)
wait_for_cluster_to_be_deleted(client, cluster.id)
def rtb_cb(client, rtb):
"""Wait for the prtb to have the userId populated"""
def cb():
rt = client.reload(rtb)
return rt.userPrincipalId is not None
return cb
def grb_cb(client, grb):
"""Wait for the grb to have the userId populated"""
def cb():
rt = client.reload(grb)
return rt.userId is not None
return cb
# When calling this function you _must_ remove the cluster_template manually
# If a cluster is created also it must be removed after the template
def create_cluster_template(creator, members, admin_mc):
template_name = random_str()
cluster_template = \
creator.client.create_cluster_template(
name=template_name,
description="demo template",
members=members)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
rb_name = cluster_template.id.split(":")[1] + "-ct-a"
wait_for(lambda: check_subject_in_rb(rbac, 'cattle-global-data',
creator.user.id, rb_name),
timeout=60,
fail_handler=fail_handler(rb_resource))
return cluster_template
def create_cluster_template_revision(client, clusterTemplateId):
rke_config = getRKEConfig()
cluster_config = {
"dockerRootDir": "/var/lib/docker",
"enableClusterAlerting": "false",
"enableClusterMonitoring": "false",
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"rancherKubernetesEngineConfig": rke_config
}
questions = [{
"variable": "dockerRootDir",
"required": "false",
"type": "string",
"default": "/var/lib/docker"
},
{
"variable":
"rancherKubernetesEngineConfig.ignoreDockerVersion",
"required": "false",
"type": "boolean",
"default": "true"
},
{
"variable":
"rancherKubernetesEngineConfig.kubernetesVersion",
"required": "false",
"type": "string",
"default": "1.15.x"
}]
revision_name = random_str()
cluster_template_revision = \
client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=clusterTemplateId,
disabled="false",
questions=questions
)
return cluster_template_revision
def getRKEConfig():
rke_config = {
"addonJobTimeout": 30,
"ignoreDockerVersion": "true",
"sshAgentAuth": "false",
"type": "rancherKubernetesEngineConfig",
"kubernetesVersion": "1.15.x",
"authentication": {
"strategy": "x509",
"type": "authnConfig"
},
"network": {
"plugin": "canal",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan"
}
},
"ingress": {
"provider": "nginx",
"type": "ingressConfig"
},
"monitoring": {
"provider": "metrics-server",
"type": "monitoringConfig"
},
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
},
"etcd": {
"creation": "12h",
"extraArgs": {
"heartbeat-interval": 500,
"election-timeout": 5000
},
"retention": "72h",
"snapshot": "false",
"type": "etcdService",
"backupConfig": {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig"
}
}
}
}
return rke_config
def wait_for_cluster_to_be_deleted(client, clusterId, timeout=45):
deleted = False
start = time.time()
interval = 0.5
while not deleted:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clusters")
cluster = client.by_id_cluster(clusterId)
if cluster is None:
deleted = True
time.sleep(interval)
interval *= 2
def wait_for_default_revision(client, templateId, revisionId, timeout=60):
updated = False
interval = 0.5
start = time.time()
while not updated:
if time.time() - start > timeout:
raise Exception('Timeout waiting for clustertemplate to update')
template_reloaded = client.by_id_cluster_template(templateId)
if template_reloaded.defaultRevisionId is not None:
updated = True
time.sleep(interval)
interval *= 2
def fail_handler(resource):
return "failed waiting for clustertemplate" + resource + " to get updated"
def wait_for_cluster_create(client, **kwargs):
timeout = DEFAULT_TIMEOUT
interval = 0.5
start = time.time()
while True:
try:
return client.create_cluster(kwargs)
except ApiError as e:
if e.error.status != 404:
raise e
if time.time() - start > timeout:
exception_msg = 'Timeout waiting for condition.'
raise Exception(exception_msg)
time.sleep(interval)
interval *= 2
def wait_for_clusterTemplate_update_failure(client, revision, timeout=45):
updateWorks = True
start = time.time()
interval = 0.5
cconfig = {
"rancherKubernetesEngineConfig": {
}
}
while updateWorks:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clustertemplate update failure")
try:
client.update(revision, name=random_str(), clusterConfig=cconfig)
except ApiError as e:
if e.error.status == 422:
updateWorks = False
time.sleep(interval)
interval *= 2
def wait_for_clusterTemplate_list_failure(client, revision, timeout=45):
listWorks = True
start = time.time()
interval = 0.5
while listWorks:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for clustertemplate list failure")
try:
client.by_id_cluster_template_revision(revision.id)
except ApiError as e:
if e.error.status == 403:
listWorks = False
time.sleep(interval)
interval *= 2
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception
from tempest import clients
from tempest.common.utils import data_utils
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.test import skip_because
from tempest.thirdparty.boto.test import BotoTestCase
from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
from tempest.thirdparty.boto.utils.wait import re_search_wait
from tempest.thirdparty.boto.utils.wait import state_wait
LOG = logging.getLogger(__name__)
class InstanceRunTest(BotoTestCase):
@classmethod
def setUpClass(cls):
super(InstanceRunTest, cls).setUpClass()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
cls.os = clients.Manager()
cls.s3_client = cls.os.s3_client
cls.ec2_client = cls.os.ec2api_client
cls.zone = cls.ec2_client.get_good_zone()
config = cls.config
cls.materials_path = config.boto.s3_materials_path
ami_manifest = config.boto.ami_manifest
aki_manifest = config.boto.aki_manifest
ari_manifest = config.boto.ari_manifest
cls.instance_type = config.boto.instance_type
cls.bucket_name = data_utils.rand_name("s3bucket-")
cls.keypair_name = data_utils.rand_name("keypair-")
cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
cls.keypair_name)
bucket = cls.s3_client.create_bucket(cls.bucket_name)
cls.addResourceCleanUp(cls.destroy_bucket,
cls.s3_client.connection_data,
cls.bucket_name)
s3_upload_dir(bucket, cls.materials_path)
cls.images = {"ami":
{"name": data_utils.rand_name("ami-name-"),
"location": cls.bucket_name + "/" + ami_manifest},
"aki":
{"name": data_utils.rand_name("aki-name-"),
"location": cls.bucket_name + "/" + aki_manifest},
"ari":
{"name": data_utils.rand_name("ari-name-"),
"location": cls.bucket_name + "/" + ari_manifest}}
for image in cls.images.itervalues():
image["image_id"] = cls.ec2_client.register_image(
name=image["name"],
image_location=image["location"])
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
image["image_id"])
for image in cls.images.itervalues():
def _state():
retr = cls.ec2_client.get_image(image["image_id"])
return retr.state
state = state_wait(_state, "available")
if state != "available":
for _image in cls.images.itervalues():
cls.ec2_client.deregister_image(_image["image_id"])
raise exceptions.EC2RegisterImageException(image_id=
image["image_id"])
@attr(type='smoke')
def test_run_idempotent_instances(self):
# EC2 run instances idempotently
def _run_instance(client_token):
reservation = self.ec2_client.run_instances(
image_id=self.images["ami"]["image_id"],
kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
client_token=client_token)
rcuk = self.addResourceCleanUp(self.destroy_reservation,
reservation)
return (reservation, rcuk)
def _terminate_reservation(reservation, rcuk):
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
reservation_1, rcuk_1 = _run_instance('token_1')
reservation_2, rcuk_2 = _run_instance('token_2')
reservation_1a, rcuk_1a = _run_instance('token_1')
self.assertIsNotNone(reservation_1)
self.assertIsNotNone(reservation_2)
self.assertIsNotNone(reservation_1a)
# same reservation for token_1
self.assertEqual(reservation_1.id, reservation_1a.id)
# Cancel cleanup -- since it's a duplicate, it's
# handled by rcuk1
self.cancelResourceCleanUp(rcuk_1a)
_terminate_reservation(reservation_1, rcuk_1)
_terminate_reservation(reservation_2, rcuk_2)
reservation_3, rcuk_3 = _run_instance('token_1')
self.assertIsNotNone(reservation_3)
# make sure we don't get the old reservation back
self.assertNotEqual(reservation_1.id, reservation_3.id)
# clean up
_terminate_reservation(reservation_3, rcuk_3)
@attr(type='smoke')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
@attr(type='smoke')
def test_run_stop_terminate_instance_with_tags(self):
# EC2 run, stop and terminate instance with tags
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.add_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
self.assertEqual(len(tags), 0, str(tags))
for instance in reservation.instances:
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
self.assertEqual(len(tags), 0, str(tags))
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
@skip_because(bug="1098891")
@attr(type='smoke')
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
for instance in reservation.instances:
instance.terminate()
try:
instance.update(validate=True)
except ValueError:
pass
except exception.EC2ResponseError as exc:
if self.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
pass
else:
raise
else:
self.assertNotEqual(instance.state, "running")
# NOTE(afazekas): doctored test case,
# with normal validation it would fail
@skip_because(bug="1182679")
@attr(type='smoke')
def test_integration_1(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup-")
group_desc = sec_group_name + " security group description "
security_group = self.ec2_client.create_security_group(sec_group_name,
group_desc)
self.addResourceCleanUp(self.destroy_security_group_wait,
security_group)
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1))
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22))
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(1, self.zone)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
# TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
# NOTE(afazekas): it may be reports availble before it is available
ssh = RemoteClient(address.public_ip,
self.os.config.compute.ssh_user,
pkey=self.keypair.material)
text = data_utils.rand_name("Pattern text for console output -")
resp = ssh.write_to_console(text)
self.assertFalse(resp)
def _output():
output = instance.get_console_output()
return output.output
re_search_wait(_output, text)
part_lines = ssh.get_partitions().split('\n')
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
volume.update(validate=True)
return volume.status
self.assertVolumeStatusWait(_volume_state, "in-use")
re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
def _part_state():
current = ssh.get_partitions().split('\n')
if current > part_lines:
return 'INCREASE'
if current < part_lines:
return 'DECREASE'
return 'EQUAL'
state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
# TODO(afazekas): Resource compare to the flavor settings
volume.detach()
self.assertVolumeStatusWait(_volume_state, "available")
re_search_wait(_volume_state, "available")
LOG.info("Volume %s state: %s", volume.id, volume.status)
state_wait(_part_state, 'DECREASE')
instance.stop()
address.disassociate()
self.assertAddressDissasociatedWait(address)
self.cancelResourceCleanUp(rcuk_da)
address.release()
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
# TODO(afazekas): Snapshot/volume read/write test case
|
|
"""
.. codeauthor:: Kevin Kennedy <protonyx@users.noreply.github.com>
Driver
------
The BK Precision 9110 Series DC Power Sources use the default USB Test and
Measurement driver and should be recognized without problems when plugged in.
If the device is not recognized, it is likely because there is a problem with
the VISA driver installation.
The XLN Series DC Sources use a Silicon Labs `CP210x USB to UART Bridge`. This
requires a third party driver that must be downloaded from the BK Precision
website before connecting the device.
That driver can be downloaded from
`here <https://bkpmedia.s3.amazonaws.com/downloads/software/CP210X_USB_Driver.zip>`_
Remote Interface
----------------
The XLN series DC sources feature a remote web interface using the Ethernet
connection, that can be accessed by typing in the instrument IP address into a
Java-enabled web browser.
.. note:
The default admin password is 123456
"""
import labtronyx
class d_911X(labtronyx.DriverBase):
"""
Driver for BK Precision 9110 Series DC Power Sources
"""
author = 'KKENNEDY'
version = '1.0'
deviceType = 'DC Power Supply'
compatibleInterfaces = ['VISA']
compatibleInstruments = {
'BK Precision': ['9115', '9116']
}
@classmethod
def VISA_validResource(cls, identity):
vendors = ['BK Precision', 'BK PRECISION']
models = ['BK9115', 'BK9116']
return identity[0] in vendors and identity[1] in models
def open(self):
self.setRemoteControl()
def close(self):
self.setLocalControl()
def getProperties(self):
return {
'protectionModes': ['Voltage'],
'terminalSense': ['Voltage', 'Current', 'Power'],
'controlModes': ['Voltage', 'Current']
}
def setRemoteControl(self):
"""
Sets the instrument in remote control mode
"""
self.instr.write("SYST:REM")
def setLocalControl(self):
"""
Sets the instrument in local control mode
"""
self.instr.write("SYST:LOC")
def disableFrontPanel(self):
"""
Disables the front panel of the instrument. To re-enable the front
panel, call `setLocalControl`
"""
self.instr.write("SYST:RWL")
def powerOn(self):
"""
Enables the instrument to power the output
"""
self.instr.write("OUTP ON")
def powerOff(self):
"""
Disables the output power connections.
"""
self.instr.write("OUTP OFF")
def getError(self):
"""
Read any pending error codes with accompanying information
:returns: str
"""
return self.instr.query("SYST:ERR?")
def trigger(self):
"""
Create a trigger signal for the instrument. This command has no effect
if the instrument is not using `BUS` as the trigger source.
"""
self.instr.write("*TRG")
def setTriggerSource(self, source):
"""
Set the trigger source for the instrument. Manual trigger requires
pressing the `Trigger` button on the front panel. Bus trigger requires
a trigger command to be sent.
:param source: Trigger Source ("BUS" or "MANUAL")
:type source: str
"""
self.instr.write("TRIG:SOUR %s" % str(source))
def setVoltage(self, voltage):
"""
Set the output voltage level
:param voltage: Voltage (in Volts)
:type voltage: float
"""
self.instr.write("VOLT %f" % float(voltage))
def getVoltage(self):
"""
Get the output voltage level
:returns: float
"""
return float(self.instr.query("VOLT?"))
def setTriggeredVoltage(self, voltage):
"""
Set the programmed output voltage level after a trigger has occurred.
:param voltage: Voltage (in Volts)
:type voltage: float
"""
self.instr.write("VOLT:TRIG %f" % float(voltage))
def getTriggeredVoltage(self):
"""
Get the programmed output voltage level after a trigger has occurred.
:returns: float
"""
return float(self.instr.query("VOLT:TRIG?"))
def setVoltageSlewRate(self, rise, fall):
"""
Set the voltage rising and falling time of the power supply. Units are
seconds.
Parameters must be between 0 - 65.535 seconds
note::
This command is not supported by the device
:param rise: Rise time (in seconds)
:type rise: float
:param fall: Fall time (in seconds)
:type fall: float
"""
# TODO: This doesn't work
self.instr.write("RISE %f" % float(rise))
self.instr.write("FALL %f" % float(fall))
def setCurrent(self, current):
"""
Set the output current level
:param current: Current (in Amps)
:type current: float
"""
self.instr.write("CURR %f" % float(current))
def getCurrent(self):
"""
Get the output current level
:returns: float
"""
return float(self.instr.query("CURR?"))
def setTriggeredCurrent(self, current):
"""
Set the programmed output current level after a trigger has occurred.
:param current: Current (in Amps)
:type current: float
"""
self.instr.write("CURR:TRIG %f" % float(current))
def getTriggeredCurrent(self):
"""
Get the programmed output current level after a trigger has occurred.
:returns: float
"""
return float(self.instr.query("CURR:TRIG?"))
def getTerminalVoltage(self):
"""
Get the measured voltage from the terminals of the instrument
:returns: float
"""
return float(self.instr.query("MEAS:VOLT?"))
def getTerminalCurrent(self):
"""
Get the measured current from the terminals of the instrument
:returns: float
"""
return float(self.instr.query("MEAS:CURR?"))
def getTerminalPower(self):
"""
Get the measured power from the terminals of the instrument
:returns: float
"""
return float(self.instr.query("MEAS:POW?"))
def setVoltageRange(self, lower, upper):
"""
Set the lower and upper limitation of the output voltage
:param lower: Lower limit (in Volts)
:type lower: float
:param upper: Upper limit (in Volts)
:type upper: float
"""
self.instr.write("VOLT:LIM %f" % float(lower))
self.instr.write("VOLT:RANG %f" % float(upper))
def setProtection(self, voltage=None):
"""
Enable the protection circuitry. If any of the parameters is zero, that
protection is disabled.
:param voltage: OVP Setting (in Volts)
:type voltage: float
"""
# Voltage
if voltage is not None:
self.instr.write("VOLT:PROT:STAT ON")
self.instr.write("VOLT:PROT %f" % float(voltage))
else:
self.instr.write("VOLT:PROT:STAT OFF")
def getProtection(self):
"""
Get the protection set points
:returns: dict with keys ['Voltage']
"""
return {
'Voltage': self.instr.query('VOLT:PROT?')
}
def setProtectionDelay(self, delay):
"""
Set the OVP (Over-Voltage Protection) circuitry delay. Can be used
to set the delay (in seconds) before the OVP kicks in.
Delay must be between 0.001 - 0.6
:param delay: OVP delay (in seconds)
:type delay: float
"""
if delay >= 0.001 and delay < 0.6:
self.instr.write("VOLT:PROT:DELAY %f" % float(delay))
else:
ValueError("Value not in range")
def getProtectionState(self):
"""
This command is used to query the executing state of OVP (Over-Voltage
Protection). If 1, this indicates the OVP circuit has been triggered
and must be cleared using `clearProtectionState` before normal operation
can continue.
note..
This operation is not supported by the device
:returns: int
"""
# TODO: This doesn't work
return int(self.instr.query("VOLT:PROT:TRIG?"))
def clearProtectionState(self):
"""
This command is used to clear the OVP (Over-Voltage Protection) state.
Before sending this command, please increase the upper limitation of
OVP or reduce the output voltage
"""
self.instr.write("PROT:CLE")
class d_XLN(labtronyx.DriverBase):
"""
Driver for BK Precision XLN Series DC Sources
"""
author = 'KKENNEDY'
version = '1.0'
deviceType = 'DC Power Supply'
compatibleInterfaces = ['VISA']
compatibleInstruments = {
'BK Precision': ['XLN3640', 'XLN6024', 'XLN8018', 'XLN10014', 'XLN15010', 'XLN30052', 'XLN60026']
}
@classmethod
def VISA_validResource(cls, identity):
vendors = ['B&K Precision', 'B&K PRECISION']
return identity[0] in vendors and identity[1] in cls.compatibleInstruments['BK Precision']
def open(self):
self.configure(baudrate=57600, bytesize=8, parity='N', stopbits=1)
self.setRemoteControl()
self.identify()
def close(self):
pass
def getProperties(self):
return {
'protectionModes': ['Voltage', 'Current', 'Power'],
'terminalSense': ['Voltage', 'Current', 'Power']
}
def setRemoteControl(self):
"""
Enable Remote Control Mode
"""
self.write("SYS:REM USB")
def disableFrontPanel(self):
"""
Disables the front panel of the instrument. To re-enable the front
panel, call `enableFrontPanel`
"""
self.write("SYS:KEY:LOCK 1")
def enableFrontPanel(self):
"""
Enables the front panel of the instrument.
"""
self.write("SYS:KEY:LOCK 0")
def powerOn(self):
"""
Enables the instrument to power the output
"""
self.write("OUT ON")
def powerOff(self):
"""
Disables the output power connections.
"""
self.write("OUT OFF")
def getError(self):
"""
Read any pending error codes with accompanying information
:returns: str
"""
return self.query("SYS:ERR?")
def setVoltage(self, voltage):
"""
Set the output voltage level
:param voltage: Voltage (in Volts)
:type voltage: float
"""
self.write("VOLT %f" % float(voltage))
def getVoltage(self):
"""
Get the output voltage level
:returns: float
"""
return float(self.query("VOLT?"))
def setMaxVoltage(self, voltage):
"""
Set the voltage limit
:param voltage: Voltage (in Volts)
:type voltage: float
"""
self.write("OUT:LIM:VOLT %f" % float(voltage))
def getMaxVoltage(self):
"""
Get the voltage limit
:returns: float
"""
return float(self.query("OUT:LIM:VOLT?"))
def setSlewRate(self, voltage, current):
"""
Set the voltage and current rise/fall time of the power supply. Units are
seconds.
:param voltage: Voltage Slew rate (in seconds)
:type voltage: float
:param current: Current Slew rate (in seconds)
:type current: float
"""
self.write("OUT:SR:VOLT %f" % float(voltage))
self.write("OUT:SR:CURR %f" % float(current))
def setCurrent(self, current):
"""
Set the output current level
:param current: Current (in Amps)
:type current: float
"""
self.write("CURR %f" % float(current))
def getCurrent(self):
"""
Get the output current level
:returns: float
"""
return float(self.query("CURR?"))
def setMaxCurrent(self, current):
"""
Set the current limit
:param current: Current (in Amps)
:type current: float
"""
self.write("OUT:LIM:CURR %f" % float(current))
def getTerminalVoltage(self):
"""
Get the measured voltage from the terminals of the instrument
:returns: float
"""
return float(self.query("MEAS:VOLT?"))
def getTerminalCurrent(self):
"""
Get the measured current from the terminals of the instrument
:returns: float
"""
return float(self.query("MEAS:CURR?"))
def getTerminalPower(self):
"""
Get the measured power from the terminals of the instrument
:returns: float
"""
return float(self.query("MEAS:POW?"))
def setVoltageRange(self, lower, upper):
"""
Set the lower and upper limitation of the output voltage
:param lower: Lower limit (in Volts)
:type lower: float
:param upper: Upper limit (in Volts)
:type upper: float
"""
self.write("VOLT:LIM %f" % float(lower))
self.write("VOLT:RANG %f" % float(upper))
def setProtection(self, voltage=None, current=None, power=None):
"""
Enable the protection circuitry. If any of the parameters is zero, that
protection is disabled.
:param voltage: OVP Setting (in Volts)
:type voltage: float
:param current: OCP Setting (in Amps)
:type current: float
:param power: OPP Setting (in Watts)
:type power: float
"""
# Voltage
if voltage is not None:
if voltage > 0.0:
self.write("PROT:OVP:LEV %f" % float(voltage))
self.write("PROT:OVP 1")
else:
self.write("PROT:OVP 0")
# Current
if current is not None:
if current > 0.0:
self.write("PROT:OCP:LEV %f" % float(current))
self.write("PROT:OCP 1")
else:
self.write("PROT:OCP 0")
# Power
if power is not None:
if power > 0.0:
self.write("PROT:OPP:LEV %f" % float(power))
self.write("PROT:OPP 1")
else:
self.write("PROT:OPP 0")
def getProtection(self):
"""
Get the protection set points
:returns: dict with keys ['Voltage', 'Current', 'Power']
"""
return {
'Voltage': self.query('PROT:OVP:LEV?'),
'Current': self.query('PROT:OCP:LEV?'),
'Power': self.query('PROT:OPP:LEV?')
}
def disableProtection(self):
"""
Disable the protection circuitry.
"""
self.write("PROT:OVP 0")
self.write("PROT:OCP 0")
self.write("PROT:OPP 0")
def getProtectionState(self):
"""
This command is used to query the executing state of the protection
circuitry. If 1, this indicates the protection circuit has been
triggered and must be cleared using `clearProtection` before normal
operation can continue.
:returns: int
"""
return int(self.query("PROT?"))
def clearProtection(self):
"""
This command is used to clear the protection state.
Before sending this command, please increase the upper limitation of
OVP/OCP or reduce the output voltage/current
"""
self.write("PROT:CLE")
|
|
"Simple Fts backend"
import re
import os
import datetime
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import connection, transaction
from django.db.models import Q, Max
from django.core.cache import cache
from fts.backends.base import BaseClass, BaseModel, BaseManager
from fts.models import Word, Index, Namespace
import unicodedata
from fts.words.stop import FTS_STOPWORDS
try:
from fts.words.snowball import Stemmer
except ImportError:
from fts.words.porter import Stemmer
# http://code.google.com/p/django-fts/issues/detail?id=7
# http://djangosnippets.org/snippets/1725/
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def commit_on_success_unless_managed(func):
"""
If the decorated function runs successfully, a commit is made, unless the
transactions are being managed; if the function produces an exception,
a rollback is made, again unless transactions are being managed somewhere
else.
"""
def _commit_on_success_unless_managed(*args, **kw):
try:
if transaction.is_managed():
forced_managed = False
else:
transaction.enter_transaction_management()
forced_managed = True
try:
res = func(*args, **kw)
except:
# All exceptions must be handled here (even string ones).
if transaction.is_dirty():
if forced_managed:
transaction.rollback()
else:
transaction.rollback_unless_managed()
raise
else:
if transaction.is_dirty():
if forced_managed:
transaction.commit()
else:
transaction.commit_unless_managed()
return res
finally:
if forced_managed:
transaction.leave_transaction_management()
return wraps(func)(_commit_on_success_unless_managed)
qn = connection.ops.quote_name
WEIGHTS = {
'A' : 10,
'B' : 4,
'C' : 2,
'D' : 1
}
SEP = re.compile(r'[\s,.()\[\]|]')
_NAMESPACES_CACHE = {}
_NAMESPACES_CACHE_SYNC = {}
class SearchClass(BaseClass):
def __init__(self, server, params):
self.backend = 'simple'
class SearchManager(BaseManager):
def __init__(self, **kwargs):
super(SearchManager, self).__init__(**kwargs)
# For autocomplete, generally you'd want:
# full_index=True and stem_words=False (full_index implies exact_search)
# For regular Fulltext search, you'd want:
# full_index=False, steam_words=True and exact_search=True
self.full_index = kwargs.get('full_index', False)
self.stem_words = kwargs.get('stem_words', True)
self.exact_search = kwargs.get('exact_search', True)
self.namespace = kwargs.get('namespace', None)
def _get_namespace_id(self, namespace):
_k_ = namespace
try:
now = datetime.datetime.now()
sync_time = _NAMESPACES_CACHE_SYNC.get(_k_)
expired = not sync_time and True or False
keys = (
'fts-namespaces-last-updated',
)
for key in keys:
if not key: continue
last_updated = cache.get(key)
if not last_updated:
last_updated = now
cache.set(key, last_updated)
if sync_time and last_updated > sync_time:
expired = True
if expired:
raise KeyError
namespace_id = _NAMESPACES_CACHE[_k_]
except KeyError:
for n in Namespace.objects.all():
_NAMESPACES_CACHE[n.slug] = n.id
namespace_id = _NAMESPACES_CACHE.get(namespace)
# save sync time for cache:
_NAMESPACES_CACHE[_k_] = namespace_id
_NAMESPACES_CACHE_SYNC[_k_] = datetime.datetime.now()
return namespace_id
def _get_idx_words(self, line, minlen=0):
words = self._get_words(line, minlen)
if self.full_index:
# Find all the substrings of the word (all digit words treated differently):
words = set( word[i:j] for word in words for i in not word.isdigit() and range(len(word)) or (0,) for j in range(i+1, len(word)+1) if j-i > minlen )
return words
def _get_words(self, line, minlen=0):
# Remove accents
line = ''.join((c for c in unicodedata.normalize('NFD', unicode(line)) if unicodedata.category(c) != 'Mn'))
# Lowercase and split in a set of words
words = set(SEP.split(line.lower()))
# Stemmer function
if self.stem_words:
stem = Stemmer(self.language_code)
else:
stem = lambda w: w
# Get stemmed set of words not in the list of stop words and with a minimum of a minlen length
return set( stem(word) for word in words if word and word not in FTS_STOPWORDS[self.language_code] and len(word) > minlen )
@commit_on_success_unless_managed
def _update_index(self, pk, dumping=None):
"""
Index Update (Live or Dumping)
For Dumping update (recommended method):
dumping = {} # use to pass and keep context for multiple calls
Entity.autocomplete._update_index(None, dumping)
GeonameAlternateName.autocomplete._update_index(None, dumping)
TagLabel.autocomplete._update_index(None, dumping)
then in Sqlite3:
.separator "\t"
.import fts_word.txt fts_word
.import fts_index.txt fts_index
...or in PostgreSQL:
COPY fts_word FROM 'fts_word.txt';
COPY fts_index FROM 'fts_index.txt';
For Live update (very slow compared to dumping update):
TagLabel.autocomplete.update_index()
Usage:
TagLabel.autocomplete.search('label')
"""
if self.model._meta.abstract:
return # skip abstract class updates
namespace_id = self._get_namespace_id(self.namespace)
if not namespace_id and self.namespace:
ns = Namespace.objects.create(slug=self.namespace)
namespace_id = ns.id
ctype = ContentType.objects.get_for_model(self.model)
filter = { 'content_type__pk': ctype.pk }
if namespace_id: filter['namespace'] = namespace_id
if pk is not None:
if isinstance(pk, (set,list,tuple)):
filter['object_id__in'] = pk
items = self.filter(pk__in=pk)
else:
filter['object_id'] = pk
items = self.filter(pk=pk)
else:
items = self.all()
cursor = connection.cursor()
cursor.execute('DELETE FROM'+str(Index.objects.filter(**filter).query).split('FROM')[1])
transaction.set_dirty()
if dumping is None:
c = { 'IW': {} }
else:
c = dumping
c['fw'] = c.get('fw') or open('fts_word.txt', 'wt')
c['fi'] = c.get('fi') or open('fts_index.txt', 'wt')
c['IW'] = c.get('IW')
if not c['IW']:
c['IW'] = {}
c['widx'] = 0
c['iidx'] = (Index.objects.aggregate(Max('id'))['id__max'] or 0) + 1
for iw in Word.objects.all():
if iw.id > c['widx']:
c['widx'] = iw.id
c['IW'][iw.word] = iw.id
c['widx'] += 1
for item in items:
item_words = {}
for field, weight in self._fields.items():
if callable(field):
words = field(item)
else:
words = item
for col in field.split('__'):
words = getattr(words, col)
# get all the possible substrings for words
idx_words = self._get_idx_words(words)
if dumping is None:
# of all those substrings, retrieve the missing ones in our c['IW'] dictionary
idx_words_to_get = [w for w in idx_words if w not in c['IW']]
if len(idx_words_to_get):
for iw in Word.objects.filter(word__in=idx_words_to_get):
c['IW'][iw.word] = iw
# finally, for each substring to index, build the index in item_words:
for word in idx_words:
try:
iw = c['IW'][word];
except KeyError:
if dumping is not None:
print >>c['fw'], u'\t'.join([unicode(w) or '' for w in (c['widx'], word)]).encode('utf8')
iw = c['IW'][word] = c['widx']
c['widx'] += 1
else:
iw = Word.objects.get_or_create(word=word)[0]
c['IW'][word] = iw
if ord(weight) < ord(item_words.get(iw, 'Z')):
item_words[iw] = weight
for iw, weight in item_words.items():
if dumping is not None:
print >>c['fi'], u'\t'.join([unicode(w) or '' for w in (c['iidx'], iw, WEIGHTS[weight], namespace_id, ctype.pk, item.pk)]).encode('utf8')
c['iidx'] += 1
else:
Index.objects.create(content_object=item, word=iw, weight=WEIGHTS[weight], namespace_id=namespace_id)
def _search(self, query, **kwargs):
rank_field = kwargs.get('rank_field')
qs = self.get_query_set()
joins = []
weights = []
joins_params = []
namespace_id = self._get_namespace_id(self.namespace)
for idx, word in enumerate(self._get_words(query)):
if self.full_index or self.exact_search:
joins_params.append("'%s'" % word.replace("'", "''"))
if namespace_id is not None:
joins_params.append(namespace_id)
namespace_sql = u'AND i%(idx)d.namespace_id = %%%%d' % { 'idx':idx }
else:
namespace_sql = u''
joins.append(u"INNER JOIN %%(words_table_name)s AS w%(idx)d ON (w%(idx)d.word = %%%%s) INNER JOIN %%(index_table_name)s AS i%(idx)d ON (w%(idx)d.id = i%(idx)d.word_id AND i%(idx)d.content_type_id = %%(content_type_id)s AND i%(idx)d.object_id = %%(table_name)s.id %(namespace_sql)s)" % { 'idx':idx, 'namespace_sql': namespace_sql })
else:
joins_params.append("'%s%%%%'" % word.replace("'", "''"))
if namespace_id is not None:
joins_params.append(namespace_id)
namespace_sql = u'AND i%(idx)d.namespace_id = %%%%d' % { 'idx':idx }
else:
namespace_sql = u''
joins.append(u"INNER JOIN %%(words_table_name)s AS w%(idx)d ON (w%(idx)d.word LIKE %%%%s) INNER JOIN %%(index_table_name)s AS i%(idx)d ON (w%(idx)d.id = i%(idx)d.word_id AND i%(idx)d.content_type_id = %%(content_type_id)s AND i%(idx)d.object_id = %%(table_name)s.id %(namespace_sql)s)" % { 'idx':idx, 'namespace_sql': namespace_sql })
qs.query.distinct = True
weights.append("i%(idx)d.weight" % { 'idx':idx })
table_name = self.model._meta.db_table
words_table_name = qn(Word._meta.db_table)
index_table_name = qn(Index._meta.db_table)
ctype = ContentType.objects.get_for_model(self.model)
joins = ' '.join(joins) % {
'table_name': qn(table_name),
'words_table_name': words_table_name,
'index_table_name': index_table_name,
'content_type_id': ctype.id,
}
# these params should be set as FROM params to be returned by get_from_clause() but it doesn't support FROM params
joins = joins % tuple(joins_params)
# monkey patch the query set:
qs.query.table_alias(table_name) # create alias
qs.query.alias_map[table_name] = (table_name, joins, None, None, None, None, None) # map the joins to the alias
if rank_field is not None:
select = {}
order = []
select[rank_field] = '+'.join(weights)
order = ['-%s' % rank_field]
qs = qs.extra(select=select, order_by=order)
return qs
class SearchableModel(BaseModel):
class Meta:
abstract = True
objects = SearchManager()
|
|
#!/usr/bin/python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Functions to facilitate a branching for merges.
#
# A "sushi branch" is a branch that we've created and manage. We do this to
# prevent making changes to branches that we don't understand. It's mostly as
# a sanity check that we're being used correctly.
import check_merge
from datetime import datetime
import find_patches
import os
import re
from robo_lib import UserInstructions
from robo_lib import log
from subprocess import check_output
def IsWorkingDirectoryClean():
"""Return true if and only if the working directory is clean."""
return not check_output(["git", "status", "--untracked-files=no",
"--porcelain"]).strip()
def RequiresCleanWorkingDirectory(fn):
def wrapper(*args, **kwargs):
if not IsWorkingDirectoryClean():
raise Exception("Working directory is not clean.")
fn(*args, **kwargs)
return wrapper
@RequiresCleanWorkingDirectory
def CreateAndCheckoutDatedSushiBranch(cfg):
"""Create a dated branch from origin/master and check it out."""
now = datetime.now()
branch_name=cfg.sushi_branch_prefix() + now.strftime("%Y-%m-%d-%H-%M-%S")
log("Creating dated branch %s" % branch_name)
# Fetch the latest from origin
if cfg.Call(["git", "fetch", "origin"]):
raise Exception("Could not fetch from origin")
# Create the named branch
# Note that we definitely do not want this branch to track origin/master; that
# would eventually cause 'git cl upload' to push the merge commit, assuming
# that the merge commit is pushed to origin/sushi-branch. One might argue
# that we should push the merge to origin/master, which would make this okay.
# For now, we leave the branch untracked to make sure that the user doesn't
# accidentally do the wrong thing. I think that with an automatic deps roll,
# we'll want to stage things on origin/sushi-branch.
#
# We don't want to push anything to origin yet, though, just to keep from
# making a bunch of sushi branches. We can do it later just as easily.
if cfg.Call(["git",
"branch",
"--no-track",
branch_name,
"origin/master"]):
raise Exception("Could not create branch")
# NOTE: we could push the remote branch back to origin and start tracking it
# now, and not worry about tracking later. However, until the scripts
# actually work, i don't want to push a bunch of branches to origin.
# Check out the branch. On failure, delete the new branch.
if cfg.Call(["git", "checkout", branch_name]):
cfg.Call(["git", "branch", "-D", branch_name])
raise Exception("Could not checkout branch")
cfg.SetBranchName(branch_name)
def CreateAndCheckoutDatedSushiBranchIfNeeded(cfg):
"""Create a dated branch from origin/master if we're not already on one."""
if cfg.sushi_branch_name():
log("Already on sushi branch %s" % cfg.sushi_branch_name())
return
CreateAndCheckoutDatedSushiBranch(cfg)
@RequiresCleanWorkingDirectory
def MergeUpstreamToSushiBranch(cfg):
log("Merging upstream/master to local branch")
if not cfg.sushi_branch_name():
raise Exception("Refusing to do a merge on a branch I didn't create")
if cfg.Call(["git", "fetch", "upstream"]):
raise Exception("Could not fetch from upstream")
if cfg.Call(["git", "merge", "upstream/master"]):
raise UserInstructions("Merge failed -- resolve conflicts manually.")
log("Merge has completed successfully")
def GetMergeParentsIfAny(cfg):
"""Return the set of commit sha-1s of the merge commit, if one exists, between
HEAD and where it joins up with origin/master. Otherwise, return []."""
# Get all sha1s between us and origin/master
sha1s = check_output(["git", "log", "--format=%H",
"origin/master..%s" % cfg.branch_name()]).split()
for sha1 in sha1s:
# Does |sha1| have more than one parent commit?
parents = check_output(["git", "show", "--no-patch", "--format=%P",
sha1]).split()
if len(parents) > 1:
return parents
return []
def IsMergeCommitOnThisBranch(cfg):
"""Return true if there's a merge commit on this branch."""
return GetMergeParentsIfAny(cfg) != []
def FindUpstreamMergeParent(cfg):
"""Return the sha-1 of the upstream side of the merge, if there is a merge
commit on this branch. Otherwise, fail."""
sha1s = GetMergeParentsIfAny(cfg)
for sha1 in sha1s:
# 'not' is correct -- it returns zero if it is an ancestor => upstream.
if not cfg.Call(["git", "merge-base", "--is-ancestor", sha1,
"upstream/master"]):
return sha1
raise Exception("No upstream merge parent found. Is the merge committed?")
def MergeUpstreamToSushiBranchIfNeeded(cfg):
"""Start a merge if we've not started one before, or do nothing successfully
if the merge is complete. If it's half done, then get mad and exit."""
if IsMergeCommitOnThisBranch(cfg):
log("Merge commit already marked as complete")
return
# See if a merge is in progress. "git merge HEAD" will do nothing if it
# succeeds, but will fail if a merge is in progress.
if cfg.Call(["git", "merge", "HEAD"]):
raise UserInstructions(
"Merge is in progress -- please resolve conflicts and complete it.")
# There is no merge on this branch, and none is in progress. Start a merge.
MergeUpstreamToSushiBranch(cfg)
def CheckMerge(cfg):
"""Verify that the merge config looks good."""
# If we haven't built all configs, then we might not be checking everything.
# The check might look at config for each platform, etc.
log("Checking merge for common failures")
cfg.chdir_to_ffmpeg_home();
check_merge.main([])
def WritePatchesReadme(cfg):
"""Write the chromium patches file."""
log("Generating CHROMIUM.patches file")
cfg.chdir_to_ffmpeg_home();
with open(os.path.join("chromium", "patches", "README"), "w+") as f:
find_patches.write_patches_file("HEAD", f)
def WriteConfigChangesFile(cfg):
"""Write a file that summarizes the config changes, for easier reviewing."""
cfg.chdir_to_ffmpeg_home();
# This looks for things that were added / deleted that look like #define or
# %define (for asm) ending in 0 or 1, that have changed in any of the configs.
os.system("git diff origin/master --unified=0 -- chromium/config/* |"
"grep '^[+-].*[01]$' | sed -e 's/[%#]define//g' |sort |"
"uniq -s 1 >chromium/patches/config_flag_changes.txt")
def AddAndCommit(cfg, commit_title):
"""Add everything, and commit locally with |commit_title|"""
log("Creating local commit %s" % commit_title)
cfg.chdir_to_ffmpeg_home();
if IsWorkingDirectoryClean():
log("No files to commit to %s" % commit_title)
return
# TODO: Ignore this file, for the "comment out autorename exception" thing.
if cfg.Call(["git", "add", "-u"]):
raise Exception("Could not add files")
if cfg.Call(["git", "commit", "-m", commit_title]):
raise Exception("Could create commit")
def IsTrackingBranchSet(cfg):
"""Check if the local branch is tracking upstream."""
# git branch -vv --list ffmpeg_roll
# ffmpeg_roll 28e7fbe889 [origin/master: behind 8859] Merge branch 'merge-m57'
output = check_output(["git", "branch", "-vv", "--list",
cfg.sushi_branch_name()])
# Note that it might have ": behind" or other things.
return "[origin/%s" % cfg.sushi_branch_name() in output
def PushToOriginWithoutReviewAndTrackIfNeeded(cfg):
"""Push the local branch to origin/ if we haven't yet."""
cfg.chdir_to_ffmpeg_home();
# If the tracking branch is unset, then assume that we haven't done this yet.
if IsTrackingBranchSet(cfg):
log("Already have local tracking branch")
return
log("Pushing merge to origin without review")
cfg.Call(["git", "push", "origin", cfg.sushi_branch_name()])
log("Setting tracking branch")
cfg.Call(["git", "branch", "--set-upstream-to=origin/%s" %
cfg.sushi_branch_name()])
# Sanity check. We don't want to start pushing other commits without review.
if not IsTrackingBranchSet(cfg):
raise Exception("Tracking branch is not set, but I just set it!")
def HandleAutorename(cfg):
# We assume that there is a script written by generate_gn.py that adds /
# removes files needed for autorenames. Run it.
log("Updating git for any autorename changes")
cfg.chdir_to_ffmpeg_home();
if cfg.Call(["chmod", "+x", cfg.autorename_git_file()]):
raise Exception("Unable to chmod %s" % cfg.autorename_git_file())
if cfg.Call([cfg.autorename_git_file()]):
raise Exception("Unable to run %s" % cfg.autorename_git_file())
def IsCommitOnThisBranch(robo_configuration, commit_title):
"""Detect if we've already committed the |commit_title| locally."""
# Get all commit titles between us and origin/master
titles = check_output(["git", "log", "--format=%s",
"origin/master..%s" % robo_configuration.branch_name()])
return commit_title in titles
def IsPatchesFileDone(robo_configuration):
"""Return False if and only if the patches file isn't checked in."""
if IsCommitOnThisBranch(
robo_configuration,
robo_configuration.patches_commit_title()):
log("Skipping patches file since already committed")
return True
return False
@RequiresCleanWorkingDirectory
def UpdatePatchesFileUnconditionally(robo_configuration):
"""Update the patches file."""
WritePatchesReadme(robo_configuration)
AddAndCommit(robo_configuration,
robo_configuration.patches_commit_title())
def IsChromiumReadmeDone(robo_configuration):
"""Return False if and only if README.chromium isn't checked in."""
if IsCommitOnThisBranch(
robo_configuration,
robo_configuration.readme_chromium_commit_title()):
log("Skipping README.chromium file since already committed")
return True
return False
@RequiresCleanWorkingDirectory
def UpdateChromiumReadmeWithUpstream(robo_configuration):
"""Update the upstream info in README.chromium and commit the result."""
log("Updating merge info in README.chromium")
merge_sha1 = FindUpstreamMergeParent(robo_configuration)
robo_configuration.chdir_to_ffmpeg_home();
with open("README.chromium", "r+") as f:
readme = f.read()
last_upstream_merge = "Last Upstream Merge:"
merge_date = check_output(["git", "log", "-1","--date=format:%b %d %Y",
"--format=%cd", merge_sha1])
readme = re.sub(r"(Last Upstream Merge:).*\n",
r"\1 %s, %s" % (merge_sha1, merge_date),
readme)
with open("README.chromium", "w") as f:
f.write(readme)
AddAndCommit(robo_configuration,
robo_configuration.readme_chromium_commit_title())
def HasGerritIssueNumber(robo_configuration):
"""Return True if and only if this branch has been pushed for review."""
robo_configuration.chdir_to_ffmpeg_home();
return os.system(
"git cl issue 2>/dev/null |grep Issue |grep None >/dev/null") != 0
def IsUploadedForReview(robo_configuration):
"""Check if the local branch is already uploaded."""
robo_configuration.chdir_to_ffmpeg_home();
if not HasGerritIssueNumber(robo_configuration):
log("No Gerrit issue number exsts.")
return False
if not IsWorkingDirectoryClean():
log("Working directory is not clean -- commit changes and update CL");
return False
# Has been uploaded for review. Might or might not have been landed yet.
return True
def IsUploadedForReviewAndLanded(robo_configuration):
"""Check if the local sushi branch has been uploaded for review, and has also
been landed."""
robo_configuration.chdir_to_ffmpeg_home();
if not IsUploadedForReview(robo_configuration):
log("Is not uploaded for review")
return False
# See if origin/sushi and local/sushi are the same. This check by itself
# isn't sufficient, since it would return true any time the two are in sync.
diff = check_output(["git", "diff",
"origin/" + robo_configuration.sushi_branch_name(),
robo_configuration.sushi_branch_name()]).strip()
return not diff
@RequiresCleanWorkingDirectory
def UploadForReview(robo_configuration):
"""Assuming that tests pass (we can't check), upload to review."""
robo_configuration.chdir_to_ffmpeg_home();
if IsUploadedForReview(robo_configuration):
raise Exception(
"Sushi branch is already uploaded for review! (try git cl web)")
log("Uploading sushi branch for review.")
os.system("git cl upload")
@RequiresCleanWorkingDirectory
def TryFakeDepsRoll(robo_configuration):
"""Start a deps roll against the sushi branch, and -1 it."""
log("Considering starting a fake deps roll")
# Make sure that we've landed the sushi commits. Note that this can happen if
# somebody re-runs robosushi after we upload the commits to Gerrit, but before
# they've been reviewed and landed. This way, we provide a meaningful error.
if not IsUploadedForReviewAndLanded(robo_configuration):
raise Exception("Cannot start a fake deps roll until gerrit review lands!")
robo_configuration.chdir_to_ffmpeg_home();
sha1 = check_output("git", "show", "-1", "--format=%P").strip()
if not sha1:
raise Exception("Cannot get sha1 of HEAD for fakes dep roll")
robo_configuration.chdir_to_chrome_src()
# TODO: make sure that there's not a deps roll in progress, else we'll keep
# doing this every time we're run.
# TODO: get mad otherwise.
check_output(["roll-deps.py", "third_party/ffmpeg", sha1])
# TODO: -1 it.
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime as dt
import json
import uuid
import mock
from oslo_utils import timeutils
import six
from heat.common import identifier
from heat.common import template_format
from heat.engine import api
from heat.engine import event
from heat.engine import parameters
from heat.engine import resource
from heat.engine import stack as parser
from heat.engine import template
from heat.rpc import api as rpc_api
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
datetime = dt.datetime
class FormatTest(common.HeatTestCase):
def setUp(self):
super(FormatTest, self).setUp()
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'generic1': {'Type': 'GenericResourceType'},
'generic2': {
'Type': 'GenericResourceType',
'DependsOn': 'generic1'}
}
})
resource._register_class('GenericResourceType',
generic_rsrc.GenericResource)
resource._register_class('ResWithComplexPropsAndAttrs',
generic_rsrc.ResWithComplexPropsAndAttrs)
self.stack = parser.Stack(utils.dummy_context(), 'test_stack',
tmpl, stack_id=str(uuid.uuid4()))
def _dummy_event(self, event_id):
resource = self.stack['generic1']
return event.Event(utils.dummy_context(), self.stack, 'CREATE',
'COMPLETE', 'state changed',
'z3455xyc-9f88-404d-a85b-5315293e67de',
resource.properties, resource.name, resource.type(),
uuid='abc123yc-9f88-404d-a85b-531529456xyz',
id=event_id)
def test_format_stack_resource(self):
res = self.stack['generic1']
resource_keys = set((
rpc_api.RES_CREATION_TIME,
rpc_api.RES_UPDATED_TIME,
rpc_api.RES_NAME,
rpc_api.RES_PHYSICAL_ID,
rpc_api.RES_ACTION,
rpc_api.RES_STATUS,
rpc_api.RES_STATUS_DATA,
rpc_api.RES_TYPE,
rpc_api.RES_ID,
rpc_api.RES_STACK_ID,
rpc_api.RES_STACK_NAME,
rpc_api.RES_REQUIRED_BY,
))
resource_details_keys = resource_keys.union(set((
rpc_api.RES_DESCRIPTION,
rpc_api.RES_METADATA,
rpc_api.RES_SCHEMA_ATTRIBUTES,
)))
formatted = api.format_stack_resource(res, True)
self.assertEqual(resource_details_keys, set(six.iterkeys(formatted)))
formatted = api.format_stack_resource(res, False)
self.assertEqual(resource_keys, set(six.iterkeys(formatted)))
@mock.patch.object(api, 'format_resource_properties')
def test_format_stack_resource_with_props(self, mock_format_props):
mock_format_props.return_value = 'formatted_res_props'
res = self.stack['generic1']
formatted = api.format_stack_resource(res, True, with_props=True)
formatted_props = formatted[rpc_api.RES_SCHEMA_PROPERTIES]
self.assertEqual('formatted_res_props', formatted_props)
@mock.patch.object(api, 'format_resource_attributes')
def test_format_stack_resource_with_attributes(self, mock_format_attrs):
mock_format_attrs.return_value = 'formatted_resource_attrs'
res = self.stack['generic1']
formatted = api.format_stack_resource(res, True, with_attr=['a', 'b'])
formatted_attrs = formatted[rpc_api.RES_SCHEMA_ATTRIBUTES]
self.assertEqual('formatted_resource_attrs', formatted_attrs)
def test_format_resource_attributes(self):
res = self.stack['generic1']
formatted_attributes = api.format_resource_attributes(res)
self.assertEqual(2, len(formatted_attributes))
self.assertIn('foo', formatted_attributes)
self.assertIn('Foo', formatted_attributes)
def test_format_resource_attributes_show_attribute(self):
res = mock.Mock()
res.attributes = {'a': 'a_value', 'show': {'b': 'b_value'}}
formatted_attributes = api.format_resource_attributes(res)
self.assertIn('b', formatted_attributes)
self.assertNotIn('a', formatted_attributes)
def test_format_resource_attributes_show_attribute_fail(self):
res = mock.Mock()
res.attributes = {'a': 'a_value', 'show': ''}
formatted_attributes = api.format_resource_attributes(res)
self.assertIn('a', formatted_attributes)
self.assertIn('show', formatted_attributes)
def test_format_resource_attributes_force_attributes(self):
res = self.stack['generic1']
force_attrs = ['a1', 'a2']
formatted_attributes = api.format_resource_attributes(res, force_attrs)
self.assertEqual(4, len(formatted_attributes))
self.assertIn('foo', formatted_attributes)
self.assertIn('Foo', formatted_attributes)
self.assertIn('a1', formatted_attributes)
self.assertIn('a2', formatted_attributes)
def _get_formatted_resource_properties(self, res_name):
tmpl = template.Template(template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: ResWithComplexPropsAndAttrs
resource2:
type: ResWithComplexPropsAndAttrs
properties:
a_string: foobar
resource3:
type: ResWithComplexPropsAndAttrs
properties:
a_string: { get_attr: [ resource2, string] }
'''))
stack = parser.Stack(utils.dummy_context(), 'test_stack_for_preview',
tmpl, stack_id=str(uuid.uuid4()))
res = stack[res_name]
return api.format_resource_properties(res)
def test_format_resource_properties_empty(self):
props = self._get_formatted_resource_properties('resource1')
self.assertIsNone(props['a_string'])
self.assertIsNone(props['a_list'])
self.assertIsNone(props['a_map'])
def test_format_resource_properties_direct_props(self):
props = self._get_formatted_resource_properties('resource2')
self.assertEqual('foobar', props['a_string'])
def test_format_resource_properties_get_attr(self):
props = self._get_formatted_resource_properties('resource3')
self.assertEqual('', props['a_string'])
def test_format_stack_resource_with_nested_stack(self):
res = self.stack['generic1']
nested_id = {'foo': 'bar'}
res.nested = mock.Mock()
res.nested.return_value.identifier.return_value = nested_id
formatted = api.format_stack_resource(res, False)
self.assertEqual(nested_id, formatted[rpc_api.RES_NESTED_STACK_ID])
def test_format_stack_resource_with_nested_stack_none(self):
res = self.stack['generic1']
res.nested = mock.Mock()
res.nested.return_value = None
resource_keys = set((
rpc_api.RES_CREATION_TIME,
rpc_api.RES_UPDATED_TIME,
rpc_api.RES_NAME,
rpc_api.RES_PHYSICAL_ID,
rpc_api.RES_ACTION,
rpc_api.RES_STATUS,
rpc_api.RES_STATUS_DATA,
rpc_api.RES_TYPE,
rpc_api.RES_ID,
rpc_api.RES_STACK_ID,
rpc_api.RES_STACK_NAME,
rpc_api.RES_REQUIRED_BY))
formatted = api.format_stack_resource(res, False)
self.assertEqual(resource_keys, set(six.iterkeys(formatted)))
def test_format_stack_resource_with_nested_stack_empty(self):
res = self.stack['generic1']
nested_id = {'foo': 'bar'}
res.nested = mock.MagicMock()
res.nested.return_value.identifier.return_value = nested_id
res.nested.return_value.__len__.return_value = 0
formatted = api.format_stack_resource(res, False)
res.nested.return_value.identifier.assert_called_once_with()
self.assertEqual(nested_id, formatted[rpc_api.RES_NESTED_STACK_ID])
def test_format_stack_resource_required_by(self):
res1 = api.format_stack_resource(self.stack['generic1'])
res2 = api.format_stack_resource(self.stack['generic2'])
self.assertEqual(['generic2'], res1['required_by'])
self.assertEqual([], res2['required_by'])
def test_format_stack_resource_with_parent_stack(self):
res = self.stack['generic1']
res.stack.parent_resource_name = 'foobar'
formatted = api.format_stack_resource(res, False)
self.assertEqual('foobar', formatted[rpc_api.RES_PARENT_RESOURCE])
def test_format_event_identifier_uuid(self):
self._test_format_event('abc123yc-9f88-404d-a85b-531529456xyz')
def _test_format_event(self, event_id):
event = self._dummy_event(event_id)
event_keys = set((
rpc_api.EVENT_ID,
rpc_api.EVENT_STACK_ID,
rpc_api.EVENT_STACK_NAME,
rpc_api.EVENT_TIMESTAMP,
rpc_api.EVENT_RES_NAME,
rpc_api.EVENT_RES_PHYSICAL_ID,
rpc_api.EVENT_RES_ACTION,
rpc_api.EVENT_RES_STATUS,
rpc_api.EVENT_RES_STATUS_DATA,
rpc_api.EVENT_RES_TYPE,
rpc_api.EVENT_RES_PROPERTIES))
formatted = api.format_event(event)
self.assertEqual(event_keys, set(six.iterkeys(formatted)))
event_id_formatted = formatted[rpc_api.EVENT_ID]
event_identifier = identifier.EventIdentifier(
event_id_formatted['tenant'],
event_id_formatted['stack_name'],
event_id_formatted['stack_id'],
event_id_formatted['path'])
self.assertEqual(event_id, event_identifier.event_id)
@mock.patch.object(api, 'format_stack_resource')
def test_format_stack_preview(self, mock_fmt_resource):
def mock_format_resources(res, **kwargs):
return 'fmt%s' % res
mock_fmt_resource.side_effect = mock_format_resources
resources = [1, [2, [3]]]
self.stack.preview_resources = mock.Mock(return_value=resources)
stack = api.format_stack_preview(self.stack)
self.assertIsInstance(stack, dict)
self.assertIsNone(stack.get('status'))
self.assertIsNone(stack.get('action'))
self.assertIsNone(stack.get('status_reason'))
self.assertEqual('test_stack', stack['stack_name'])
self.assertIn('resources', stack)
self.assertEqual(['fmt1', ['fmt2', ['fmt3']]], stack['resources'])
kwargs = mock_fmt_resource.call_args[1]
self.assertTrue(kwargs['with_props'])
def test_format_stack(self):
self.stack.created_time = datetime(1970, 1, 1)
info = api.format_stack(self.stack)
aws_id = ('arn:openstack:heat::test_tenant_id:'
'stacks/test_stack/' + self.stack.id)
expected_stack_info = {
'capabilities': [],
'creation_time': '1970-01-01T00:00:00Z',
'description': 'No description',
'disable_rollback': True,
'notification_topics': [],
'stack_action': 'CREATE',
'stack_name': 'test_stack',
'stack_owner': 'test_username',
'stack_status': 'IN_PROGRESS',
'stack_status_reason': '',
'stack_user_project_id': None,
'template_description': 'No description',
'timeout_mins': None,
'tags': None,
'parameters': {
'AWS::Region': 'ap-southeast-1',
'AWS::StackId': aws_id,
'AWS::StackName': 'test_stack'},
'stack_identity': {
'path': '',
'stack_id': self.stack.id,
'stack_name': 'test_stack',
'tenant': 'test_tenant_id'},
'updated_time': None,
'parent': None}
self.assertEqual(expected_stack_info, info)
def test_format_stack_created_time(self):
self.stack.created_time = None
info = api.format_stack(self.stack)
self.assertIsNotNone(info['creation_time'])
def test_format_stack_updated_time(self):
self.stack.updated_time = None
info = api.format_stack(self.stack)
self.assertIsNone(info['updated_time'])
self.stack.updated_time = datetime(1970, 1, 1)
info = api.format_stack(self.stack)
self.assertEqual('1970-01-01T00:00:00Z', info['updated_time'])
@mock.patch.object(api, 'format_stack_outputs')
def test_format_stack_adds_outputs(self, mock_fmt_outputs):
mock_fmt_outputs.return_value = 'foobar'
self.stack.action = 'CREATE'
self.stack.status = 'COMPLETE'
info = api.format_stack(self.stack)
self.assertEqual('foobar', info[rpc_api.STACK_OUTPUTS])
def test_format_stack_outputs(self):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'generic': {'Type': 'GenericResourceType'}
},
'Outputs': {
'correct_output': {
'Description': 'Good output',
'Value': {'Fn::GetAtt': ['generic', 'Foo']}
},
'incorrect_output': {
'Value': {'Fn::GetAtt': ['generic', 'Bar']}
}
}
})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
tmpl, stack_id=str(uuid.uuid4()))
stack.action = 'CREATE'
stack.status = 'COMPLETE'
stack['generic'].action = 'CREATE'
stack['generic'].status = 'COMPLETE'
info = api.format_stack_outputs(stack, stack.outputs)
expected = [{'description': 'No description given',
'output_error': 'The Referenced Attribute (generic Bar) '
'is incorrect.',
'output_key': 'incorrect_output',
'output_value': None},
{'description': 'Good output',
'output_key': 'correct_output',
'output_value': 'generic'}]
self.assertEqual(expected, info)
class FormatValidateParameterTest(common.HeatTestCase):
base_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test",
"Parameters" : {
%s
}
}
'''
base_template_hot = '''
{
"heat_template_version" : "2013-05-23",
"description" : "test",
"parameters" : {
%s
}
}
'''
scenarios = [
('simple',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('default',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"Default": "dummy"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'Default': 'dummy',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_length_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MinLength": 4
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('max_length_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MaxLength": 10
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_max_length_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MinLength": 4,
"MaxLength": 10
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_value_constraint',
dict(template=base_template,
param_name='MyNumber',
param='''
"MyNumber": {
"Type": "Number",
"Description": "A number",
"MinValue": 4
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('max_value_constraint',
dict(template=base_template,
param_name='MyNumber',
param='''
"MyNumber": {
"Type": "Number",
"Description": "A number",
"MaxValue": 10
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('min_max_value_constraint',
dict(template=base_template,
param_name='MyNumber',
param='''
"MyNumber": {
"Type": "Number",
"Description": "A number",
"MinValue": 4,
"MaxValue": 10
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('allowed_values_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"AllowedValues": [ "foo", "bar", "blub" ]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedValues': ['foo', 'bar', 'blub'],
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('allowed_pattern_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"AllowedPattern": "[a-zA-Z0-9]+"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('multiple_constraints',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MinLength": 4,
"MaxLength": 10,
"AllowedValues": [
"foo", "bar", "blub"
],
"AllowedPattern": "[a-zA-Z0-9]+"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'AllowedValues': ['foo', 'bar', 'blub'],
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('simple_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('default_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"default": "dummy"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'Default': 'dummy',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_length_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4} }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('max_length_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "max": 10} }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_max_length_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min":4, "max": 10} }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_value_constraint_hot',
dict(template=base_template_hot,
param_name='MyNumber',
param='''
"MyNumber": {
"type": "number",
"description": "A number",
"constraints": [
{ "range": { "min": 4} }
]
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('max_value_constraint_hot',
dict(template=base_template_hot,
param_name='MyNumber',
param='''
"MyNumber": {
"type": "number",
"description": "A number",
"constraints": [
{ "range": { "max": 10} }
]
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('min_max_value_constraint_hot',
dict(template=base_template_hot,
param_name='MyNumber',
param='''
"MyNumber": {
"type": "number",
"description": "A number",
"constraints": [
{ "range": { "min": 4, "max": 10} }
]
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('allowed_values_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "allowed_values": [
"foo", "bar", "blub"
]
}
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedValues': ['foo', 'bar', 'blub'],
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('allowed_pattern_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "allowed_pattern": "[a-zA-Z0-9]+" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('multiple_constraints_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4, "max": 10} },
{ "allowed_values": [
"foo", "bar", "blub"
]
},
{ "allowed_pattern": "[a-zA-Z0-9]+" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'AllowedValues': ['foo', 'bar', 'blub'],
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('constraint_description_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4},
"description": "Big enough" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'ConstraintDescription': 'Big enough',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('constraint_multiple_descriptions_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4},
"description": "Big enough." },
{ "allowed_pattern": "[a-zA-Z0-9]+",
"description": "Only letters." }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'AllowedPattern': "[a-zA-Z0-9]+",
'ConstraintDescription': 'Big enough. Only letters.',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('constraint_custom_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Public Network",
"constraints": [
{ "custom_constraint": "neutron.network" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Public Network',
'NoEcho': 'false',
'Label': 'KeyName',
'CustomConstraint': 'neutron.network'
})
)
]
def test_format_validate_parameter(self):
"""
Test format of a parameter.
"""
t = template_format.parse(self.template % self.param)
tmpl = template.Template(t)
tmpl_params = parameters.Parameters(None, tmpl)
tmpl_params.validate(validate_value=False)
param = tmpl_params.params[self.param_name]
param_formated = api.format_validate_parameter(param)
self.assertEqual(self.expected, param_formated)
class FormatSoftwareConfigDeploymentTest(common.HeatTestCase):
def _dummy_software_config(self):
config = mock.Mock()
self.now = timeutils.utcnow()
config.name = 'config_mysql'
config.group = 'Heat::Shell'
config.id = str(uuid.uuid4())
config.created_at = self.now
config.config = {
'inputs': [{'name': 'bar'}],
'outputs': [{'name': 'result'}],
'options': {},
'config': '#!/bin/bash\n'
}
return config
def _dummy_software_deployment(self):
config = self._dummy_software_config()
deployment = mock.Mock()
deployment.config = config
deployment.id = str(uuid.uuid4())
deployment.server_id = str(uuid.uuid4())
deployment.input_values = {'bar': 'baaaaa'}
deployment.output_values = {'result': '0'}
deployment.action = 'INIT'
deployment.status = 'COMPLETE'
deployment.status_reason = 'Because'
deployment.created_at = config.created_at
deployment.updated_at = config.created_at
return deployment
def test_format_software_config(self):
config = self._dummy_software_config()
result = api.format_software_config(config)
self.assertIsNotNone(result)
self.assertEqual([{'name': 'bar'}], result['inputs'])
self.assertEqual([{'name': 'result'}], result['outputs'])
self.assertEqual([{'name': 'result'}], result['outputs'])
self.assertEqual({}, result['options'])
self.assertEqual(timeutils.isotime(self.now),
result['creation_time'])
def test_format_software_config_none(self):
self.assertIsNone(api.format_software_config(None))
def test_format_software_deployment(self):
deployment = self._dummy_software_deployment()
result = api.format_software_deployment(deployment)
self.assertIsNotNone(result)
self.assertEqual(deployment.id, result['id'])
self.assertEqual(deployment.config.id, result['config_id'])
self.assertEqual(deployment.server_id, result['server_id'])
self.assertEqual(deployment.input_values, result['input_values'])
self.assertEqual(deployment.output_values, result['output_values'])
self.assertEqual(deployment.action, result['action'])
self.assertEqual(deployment.status, result['status'])
self.assertEqual(deployment.status_reason, result['status_reason'])
self.assertEqual(timeutils.isotime(self.now),
result['creation_time'])
self.assertEqual(timeutils.isotime(self.now),
result['updated_time'])
def test_format_software_deployment_none(self):
self.assertIsNone(api.format_software_deployment(None))
class TestExtractArgs(common.HeatTestCase):
def test_timeout_extract(self):
p = {'timeout_mins': '5'}
args = api.extract_args(p)
self.assertEqual(5, args['timeout_mins'])
def test_timeout_extract_zero(self):
p = {'timeout_mins': '0'}
args = api.extract_args(p)
self.assertNotIn('timeout_mins', args)
def test_timeout_extract_garbage(self):
p = {'timeout_mins': 'wibble'}
args = api.extract_args(p)
self.assertNotIn('timeout_mins', args)
def test_timeout_extract_none(self):
p = {'timeout_mins': None}
args = api.extract_args(p)
self.assertNotIn('timeout_mins', args)
def test_timeout_extract_negative(self):
p = {'timeout_mins': '-100'}
error = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid timeout value', six.text_type(error))
def test_timeout_extract_not_present(self):
args = api.extract_args({})
self.assertNotIn('timeout_mins', args)
def test_adopt_stack_data_extract_present(self):
p = {'adopt_stack_data': json.dumps({'Resources': {}})}
args = api.extract_args(p)
self.assertTrue(args.get('adopt_stack_data'))
def test_invalid_adopt_stack_data(self):
params = {'adopt_stack_data': json.dumps("foo")}
exc = self.assertRaises(ValueError, api.extract_args, params)
self.assertIn('Invalid adopt data', six.text_type(exc))
def test_adopt_stack_data_extract_not_present(self):
args = api.extract_args({})
self.assertNotIn('adopt_stack_data', args)
def test_disable_rollback_extract_true(self):
args = api.extract_args({'disable_rollback': True})
self.assertIn('disable_rollback', args)
self.assertTrue(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'True'})
self.assertIn('disable_rollback', args)
self.assertTrue(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'true'})
self.assertIn('disable_rollback', args)
self.assertTrue(args.get('disable_rollback'))
def test_disable_rollback_extract_false(self):
args = api.extract_args({'disable_rollback': False})
self.assertIn('disable_rollback', args)
self.assertFalse(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'False'})
self.assertIn('disable_rollback', args)
self.assertFalse(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'false'})
self.assertIn('disable_rollback', args)
self.assertFalse(args.get('disable_rollback'))
def test_disable_rollback_extract_bad(self):
self.assertRaises(ValueError, api.extract_args,
{'disable_rollback': 'bad'})
def test_tags_extract(self):
p = {'tags': ["tag1", "tag2"]}
args = api.extract_args(p)
self.assertEqual(['tag1', 'tag2'], args['tags'])
def test_tags_extract_not_present(self):
args = api.extract_args({})
self.assertNotIn('tags', args)
def test_tags_extract_not_map(self):
p = {'tags': {"foo": "bar"}}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tags, not a list: ', six.text_type(exc))
def test_tags_extract_not_string(self):
p = {'tags': ["tag1", 2]}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "2" is not a string', six.text_type(exc))
def test_tags_extract_over_limit(self):
p = {'tags': ["tag1", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" is longer '
'than 80 characters', six.text_type(exc))
def test_tags_extract_comma(self):
p = {'tags': ["tag1", 'tag2,']}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "tag2," contains a comma',
six.text_type(exc))
|
|
"""A collection of tools for analysing a pdb file.
Helper module for the biostructmap package.
"""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from Bio.SeqIO import PdbIO
from Bio.SeqUtils import seq1
from Bio.Data.SCOPData import protein_letters_3to1
from Bio.PDB.Polypeptide import PPBuilder
import numpy as np
from .seqtools import align_protein_sequences
try:
from scipy.spatial import distance, cKDTree
SCIPY_PRESENT = True
except ImportError:
SCIPY_PRESENT = False
SS_LOOKUP_DICT = {
'H': 0,
'B': 1,
'E': 2,
'G': 3,
'I': 4,
'T': 5,
'S': 6,
'-': 7,
0: 'H',
1: 'B',
2: 'E',
3: 'G',
4: 'I',
5: 'T',
6: 'S',
7: '-'
}
def _euclidean_distance_matrix(model, selector='all'):
"""Compute the Euclidean distance matrix for all atoms in a pdb model.
Args:
model (Model): Bio.PDB Model object.
selector (str): The atom in each residue with which to compute
distances. The default setting is 'all', which gets all
non-heterologous atoms. Other potential options include 'CA', 'CB'
etc. If an atom is not found within a residue object, then method
reverts to using 'CA'.
Returns:
np.array: A euclidean distance matrix.
np.array: A reference list of all atoms in the model (positionally
matched to the euclidean matrix).
"""
reference = []
coords = []
# Get all non-HET residues from all chains
residues = [res for chain in model for res in chain if
res.get_id()[0] == ' ']
#Filter on non-HET atoms
for residue in residues:
#If selecting based on all atoms within residue
if selector == 'all':
for atom in residue:
coords.append(atom.get_coord())
reference.append(atom.get_full_id()[2:4])
#If measuring distance on particular atoms
else:
if selector in residue:
select_atom = selector
#Revert to carbon alpha if atom is not found
elif 'CA' in residue:
select_atom = 'CA'
#if CA is not found, do not include residue in distance matrix
else:
continue
coords.append(residue[select_atom].get_coord())
reference.append(residue[select_atom].get_full_id()[2:4])
#Convert to a np array, and compute Euclidean distance.
coord_array = np.array(coords)
euclid_mat = _pairwise_euclidean_distance(coord_array)
ref_array = reference
return euclid_mat, ref_array
def _pairwise_euclidean_distance(coord_array):
'''Compute the pairwise euclidean distance matrix for a numpy array'''
if SCIPY_PRESENT:
euclid_mat = distance.pdist(coord_array, 'euclidean')
#Convert to squareform matrix
euclid_mat = distance.squareform(euclid_mat)
else:
euclid_mat = np.sqrt(((coord_array[:, :, None] -
coord_array[:, :, None].T) ** 2).sum(1))
return euclid_mat
def _get_nearby_matrix(model, selector, radius):
"""Get a matrix of all nearby atoms in a pdb model.
Args:
model (Model): Bio.PDB Model object.
selector (str): The atom in each residue with which to compute
distances. The default setting is 'all', which gets all
non-heterologous atoms. Other potential options include 'CA', 'CB'
etc. If an atom is not found within a residue object, then method
reverts to using 'CA'.
radius (float): The radius within which to extract nearby residues/atoms.
Returns:
list: A nearby matrix (list of lists).
np.array: A reference list of all atoms in the model (positionally
matched to the nearby matrix).
"""
reference = []
coords = []
# Get all non-HET residues from all chains
residues = [res for chain in model for res in chain if
res.get_id()[0] == ' ']
#Filter on non-HET atoms
for residue in residues:
#If selecting based on all atoms within residue
if selector == 'all':
for atom in residue:
coords.append(atom.get_coord())
reference.append(atom.get_full_id()[2:4])
#If measuring distance on particular atoms
else:
if selector in residue:
select_atom = selector
#Revert to carbon alpha if atom is not found
elif 'CA' in residue:
select_atom = 'CA'
#if CA is not found, do not include residue in distance matrix
else:
continue
coords.append(residue[select_atom].get_coord())
reference.append(residue[select_atom].get_full_id()[2:4])
#Convert to a np array, and use a KDTree to identify points within a certain distance.
coord_array = np.array(coords)
point_tree = cKDTree(coord_array)
ball_tree = point_tree.query_ball_tree(point_tree, radius)
ref_array = reference
return ball_tree, ref_array
def nearby(model, radius=15, selector='all'):
"""
Takes a Bio.PDB model object, and find all residues within a radius of a
given residue.
Args:
model (Model): Bio.PDB Model object.
radius (float/int): The radius (Angstrom) over which to select nearby
residues
selector (str): The atom in each residue with which to compute
distances. The default setting is 'all', which gets all
non-heterologous atoms. Other potential options include 'CA', 'CB'
etc. If an atom is not found within a residue object, then method
reverts to using 'CA'.
Returns:
dict: A dictionary containing nearby residues for each
residue in the chain.
"""
#TODO: Cleanup case when scipy not present. Could just enforce scipy usage.
ref_dict = {}
#if SCIPY_PRESENT:
if SCIPY_PRESENT:
near_map, ref = _get_nearby_matrix(model, selector, radius)
_ref_dict = defaultdict(set)
for i, x in enumerate(near_map):
_ref_dict[ref[i]].update({ref[y] for y in x})
else:
euclidean_distance, ref = _euclidean_distance_matrix(model, selector)
within_radius = euclidean_distance <= radius
del euclidean_distance
# 1-indexed as 0 means not within range.
near_map = within_radius * np.arange(1, len(ref)+1)
#Iterate over all atoms in Euclidean distance matrix.
for i, atom in enumerate(near_map):
if atom[i] not in ref_dict:
ref_dict[atom[i]] = atom[np.nonzero(atom)]
else:
ref_dict[atom[i]] = np.append(ref_dict[atom[i]],
atom[np.nonzero(atom)])
_ref_dict = {}
del near_map
# Go from numerical index to residue id
for key, value in ref_dict.items():
_ref_dict[ref[key-1]] = {ref[x-1] for x in value} | _ref_dict.get(ref[key-1], set())
return _ref_dict
def mmcif_sequence_to_res_id(mmcif_dict):
"""Create a lookup from mmcif sequence id to a pdb residue ID and vice versa.
This allows mapping between reference PDB sequences and BioPython
residue ids.
Args:
mmcif_dict (dict): An mmcif dictionary from a Bio.PDB.Structure object.
Returns:
dict: Dictionary in the form {full Bio.PDB residue id: (chain,
mmcif sequence id)}.
dict: Dictionary in the form {(chain, mmcif sequence id):
full Bio.PDB residue id}
"""
_seq_id_list = mmcif_dict['_atom_site.label_seq_id']
seq_id_list = []
for seq_id in _seq_id_list:
try:
seq_id_list.append(int(seq_id))
except ValueError:
seq_id_list.append(None)
# Parse dictionary to extract sequences from mmCIF file
auth_chain_id_list = mmcif_dict['_atom_site.auth_asym_id']
icode_list = mmcif_dict['_atom_site.pdbx_PDB_ins_code']
# Create list of Bio.PDB full ids for each _atom_site
hetero = mmcif_dict['_atom_site.group_PDB']
residue_id_list = mmcif_dict['_atom_site.label_comp_id']
# Use author provided numbering if given.
if "_atom_site.auth_seq_id" in mmcif_dict:
auth_seq_id_list = [int(x) for x in mmcif_dict["_atom_site.auth_seq_id"]]
else:
auth_seq_id_list = [int(x) for x in mmcif_dict["_atom_site.label_seq_id"]]
# Get HET flags as used by Bio.PDB
het_flag = [' ' if het == 'ATOM' else 'W' if res_type in ['HOH', 'WAT']
else 'H_' + res_type for het, res_type in
zip(hetero, residue_id_list)]
# Get insertion code flags as used by Bio.PDB
icode_flag = [' ' if icode == '?' else icode for icode in icode_list]
# Residue ID as used by Bio.PDB
bio_residue_ids = [*zip(het_flag, auth_seq_id_list, icode_flag)]
full_ids = list(zip(auth_chain_id_list, bio_residue_ids))
chain_and_seq_id_list = list(zip(auth_chain_id_list, seq_id_list))
full_id_to_poly_seq_index = {key: value for key, value in
zip(full_ids, chain_and_seq_id_list)}
poly_seq_index_to_full_id = {value: key for key, value in
full_id_to_poly_seq_index.items() if value}
return full_id_to_poly_seq_index, poly_seq_index_to_full_id
def get_pdb_seq(filename):
"""
Get a protein sequence from a PDB file.
Will return multiple sequences if PDB file contains several chains.
Args:
filename (str/filehandle): A PDB filename or file-like object.
Returns:
dict: Protein sequences (str) accessed by chain id.
"""
#Open PDB file and get sequence data
try:
with open(filename, 'r') as f:
seq = [s for s in PdbIO.PdbSeqresIterator(f)]
except TypeError:
#If file-like object is passed instead (io.StringIO)
seq = [s for s in PdbIO.PdbSeqresIterator(filename)]
#A bit of manipulation to get Seq object into a dictionary
#Key is chain ID, and value is sequence as a string.
try:
sequences = {s.id.split(":")[1]:''.join([x for x in s]) for s in seq}
except IndexError:
sequences = {s.id:''.join([x for x in s]) for s in seq}
return sequences
def get_mmcif_canonical_seq(mmcif_dict):
"""
Get structure sequences from an mmCIF file.
Args:
filename (str/filehandle): An mmCIF filename or file-like object.
Returns:
dict: Protein sequences (str) accesssed by chain id.
"""
# TODO Should we use _pdbx_poly_seq_scheme here?
# Parse dictionary to extract sequences from mmCIF file
try:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code_can']
except KeyError:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code']
if isinstance(entity_seqs, list):
chain_ids = [ids.split(',') for ids in
mmcif_dict['_entity_poly.pdbx_strand_id']]
# Create dictionary of chain id (key) and sequences (value)
sequences = dict((x, sublist[1].replace('\n', '')) for sublist in
zip(chain_ids, entity_seqs) for x in sublist[0])
else:
chain_ids = mmcif_dict['_entity_poly.pdbx_strand_id'].split(',')
sequences = {chain_id: entity_seqs.replace('\n', '') for chain_id in chain_ids}
return sequences
def get_mmcif_seqs(mmcif_dict):
"""
Get structure sequences from an mmCIF file.
Args:
filename (str/filehandle): An mmCIF filename or file-like object.
Returns:
dict: Protein sequences (str) accesssed by chain id.
"""
# TODO Should we use _pdbx_poly_seq_scheme here?
# Parse dictionary to extract sequences from mmCIF file
entity_ids = mmcif_dict['_entity_poly.entity_id']
chain_ids = [ids.split(',') for ids in mmcif_dict['_entity_poly.pdbx_strand_id']]
try:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code_can']
except KeyError:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code']
if isinstance(entity_seqs, list):
# Create dictionary of chain id (key) and sequences (value)
sequences = dict((x, sublist[1].replace('\n', '')) for sublist in
zip(chain_ids, entity_seqs) for x in sublist[0])
else:
sequences = {chain_ids[0]: entity_seqs.replace('\n', '')}
return sequences
def get_pdb_seq_from_atom(chain):
"""
Get a protein sequence from chain atoms in a PDB file.
This is used as a 'last resort' when sequence is not available in PDB
headers.
Args:
chain: A Bio.PDB chain object.
Returns:
str: Protein sequence.
"""
# TODO Deprecate and revert to using polypeptide builder.
seq_dict = {}
for residue in chain.get_residues():
res_num = int(residue.id[1])
aminoacid = seq1(residue.resname, custom_map=protein_letters_3to1)
seq_dict[res_num] = aminoacid
# TODO Fix this, as not all residues are sequentially numbered.
pdb_sequence = [seq_dict[x] for x in sorted(seq_dict)]
return ''.join([x for x in pdb_sequence])
def match_pdb_residue_num_to_seq(model, ref=None):
"""Match PDB residue numbering (as given in PDB file) to
a reference sequence (can be pdb sequence) numbered by index.
Reference sequence is 1-indexed (and is indexed as such in output).
Args:
model: A biostructmap Model object.
ref (dict): A dictionary containing reference protein sequences for each
chain in the protein structure. Defaults to the protein sequences
given in PDB file.
Returns:
dict: A dictionary mapping reference sequence index (key) to
residue numbering as given in the PDB file (value). For example,
we might have a key of ('A', 17) for the 17th residue in the
reference sequence for chain 'A', with a value of
('A', (' ', 273, ' ')) that represents the Bio.PDB identifier for
the corresponding residue.
"""
ppb = PPBuilder()
polypeptides = ppb.build_peptides(model.parent().structure)
if ref is None:
ref = model.parent().sequences
output = {}
for peptide in polypeptides:
peptide_sequence = peptide.get_sequence()
# Presume that peptide belongs to a single chain
chain_id = peptide[0].get_full_id()[2]
_, ref_to_pdb = align_protein_sequences(peptide_sequence, ref[chain_id])
for ref_pos, pdb_pos in ref_to_pdb.items():
output[(chain_id, ref_pos)] = peptide[pdb_pos - 1].get_full_id()[2:4]
return output
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Nilesh Bhosale <nilesh.bhosale@in.ibm.com>
# Sasikanth Eda <sasikanth.eda@in.ibm.com>
"""
Tests for the IBM NAS family (SONAS, Storwize V7000 Unified).
"""
import mock
from oslo.config import cfg
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import ibmnas
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class FakeEnv(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class IBMNASDriverTestCase(test.TestCase):
TEST_NFS_EXPORT = 'nfs-host1:/export'
TEST_SIZE_IN_GB = 1
TEST_EXTEND_SIZE_IN_GB = 2
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE = '/mnt'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_VOLUME_PATH = '/export/volume-123'
TEST_SNAP_PATH = '/export/snapshot-123'
def setUp(self):
super(IBMNASDriverTestCase, self).setUp()
self._driver = ibmnas.IBMNAS_NFSDriver(configuration=
conf.Configuration(None))
self._mock = mock.Mock()
self._def_flags = {'nas_ip': 'hostname',
'nas_login': 'user',
'nas_ssh_port': 22,
'nas_password': 'pass',
'nas_private_key': 'nas.key',
'nfs_shares_config': None,
'nfs_sparsed_volumes': True,
'nfs_used_ratio': 0.95,
'nfs_oversub_ratio': 1.0,
'nfs_mount_point_base':
self.TEST_MNT_POINT_BASE,
'nfs_mount_options': None}
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
def _set_flag(self, flag, value):
group = self._driver.configuration.config_group
self._driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self._driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
self._set_flag(k, v)
def test_check_for_setup_error(self):
"""Check setup with bad parameters."""
drv = self._driver
required_flags = [
'nas_ip',
'nas_login',
'nas_ssh_port']
for flag in required_flags:
self._set_flag(flag, None)
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
self._set_flag('nas_password', None)
self._set_flag('nas_private_key', None)
self.assertRaises(exception.InvalidInput,
self._driver.check_for_setup_error)
self._reset_flags()
def test_get_provider_location(self):
"""Check provider location for given volume id."""
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
mock.drv._get_provider_location.return_value = self.TEST_NFS_EXPORT
self.assertEqual(self.TEST_NFS_EXPORT,
mock.drv._get_provider_location(volume['id']))
def test_get_export_path(self):
"""Check export path for the given volume."""
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
mock.drv._get_export_path.return_value = self.TEST_NFS_EXPORT.\
split(':')[1]
self.assertEqual(self.TEST_NFS_EXPORT.split(':')[1],
mock.drv._get_export_path(volume['id']))
def test_create_ibmnas_snap_mount_point_provided(self):
"""Create ibmnas snap if mount point is provided."""
drv = self._driver
mock = self._mock
drv._create_ibmnas_snap = mock.drv._run_ssh.return_value.\
drv._execute.return_value.drv._create_ibmnas_snap
drv._create_ibmnas_snap.return_value = True
self.assertEqual(True, mock.drv._run_ssh().
drv._execute().
drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
self.TEST_MNT_POINT))
def test_create_ibmnas_snap_no_mount_point_provided(self):
"""Create ibmnas snap if no mount point is provided."""
drv = self._driver
mock = self._mock
drv._create_ibmnas_snap = mock.drv._run_ssh.return_value.\
drv._execute.return_value.drv._create_ibmnas_snap
drv._create_ibmnas_snap.return_value = None
self.assertIsNone(mock.drv._run_ssh().
drv._execute().
drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
None))
def test_create_ibmnas_copy(self):
"""Create ibmnas copy test case."""
drv = self._driver
mock = self._mock
TEST_DEST_SNAP = '/export/snapshot-123.snap'
TEST_DEST_PATH = '/export/snapshot-123'
drv._create_ibmnas_copy = mock.drv._run_ssh.return_value.\
drv._create_ibmnas_copy
drv._create_ibmnas_copy.return_value = None
self.assertIsNone(mock.drv._run_ssh().
drv._create_ibmnas_copy(
self.TEST_VOLUME_PATH,
TEST_DEST_PATH,
TEST_DEST_SNAP))
def test_resize_volume_file(self):
"""Resize volume file test case."""
drv = self._driver
mock = self._mock
drv._resize_volume_file = mock.image_utils.resize_image.return_value.\
drv._resize_volume_file
drv._resize_volume_file.return_value = True
self.assertEqual(True, mock.image_utils.resize_image().
drv._resize_volume_file(
self.TEST_LOCAL_PATH,
self.TEST_EXTEND_SIZE_IN_GB))
def test_extend_volume(self):
"""Extend volume to greater size test case."""
drv = self._driver
mock = self._mock
drv.extend_volume = mock.drv.local_path.return_value.\
drv._resize_volume_file.return_value.\
drv.extend_volume
drv.extend_volume.return_value = None
self.assertIsNone(mock.drv.local_path().
drv._resize_volume_file().
drv.extend_volume(
self.TEST_LOCAL_PATH,
self.TEST_EXTEND_SIZE_IN_GB))
def test_delete_snapfiles(self):
"""Delete_snapfiles assert test case."""
drv = self._driver
mock = self._mock
drv._delete_snapfiles = mock.drv._run_ssh.return_value.\
drv._execute.return_value.\
drv._delete_snapfiles
drv._delete_snapfiles.return_value = None
self.assertIsNone(mock.drv._run_ssh().
drv._execute().
drv._delete_snapfiles(
self.TEST_VOLUME_PATH,
self.TEST_MNT_POINT))
def test_delete_volume_no_provider_location(self):
"""Delete volume with no provider location specified."""
drv = self._driver
volume = FakeEnv()
volume['name'] = 'volume-123'
volume['provider_location'] = None
result = drv.delete_volume(volume)
self.assertIsNone(result)
def test_delete_volume(self):
"""Delete volume test case."""
drv = self._driver
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
volume['provider_location'] = self.TEST_NFS_EXPORT
drv.delete_volume = mock.drv._get_export_path.return_value.\
drv._delete_snapfiles.return_value.drv.delete_volume
drv.delete_volume.return_value = True
self.assertEqual(True, mock.drv._get_export_path(volume['id']).
drv._delete_snapfiles(
self.TEST_VOLUME_PATH,
self.TEST_MNT_POINT).
drv.delete_volume(volume))
def test_create_snapshot(self):
"""Create snapshot simple test case."""
drv = self._driver
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = 'volume-123'
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot.name = 'snapshot-123'
drv.create_snapshot = mock.drv._get_export_path.return_value.\
drv._get_provider_location.return_value.\
drv._get_mount_point_for_share.return_value.\
drv._create_ibmnas_snap.return_value.\
drv.create_snapshot
drv.create_snapshot.return_value = None
self.assertIsNone(mock.drv._get_export_path(snapshot['volume_id']).
drv._get_provider_location(snapshot['volume_id']).
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT).
drv._create_ibmnas_snap(
src=self.TEST_VOLUME_PATH,
dest=self.TEST_SNAP_PATH,
mount_path=self.TEST_MNT_POINT).
drv.create_snapshot(snapshot))
def test_delete_snapshot(self):
"""Delete snapshot simple test case."""
drv = self._driver
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
volume['provider_location'] = self.TEST_NFS_EXPORT
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot['name'] = 'snapshot-123'
drv.delete_snapshot = mock.drv._get_provider_location.return_value.\
drv._get_mount_point_for_share.return_value.drv._execute.\
return_value.drv.delete_snapshot
drv.delete_snapshot.return_value = None
self.assertIsNone(mock.drv._get_provider_location(volume['id']).
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT).
drv._execute().
drv.delete_snapshot(snapshot))
def test_create_cloned_volume(self):
"""Clone volume with equal size test case."""
drv = self._driver
mock = self._mock
volume_src = FakeEnv()
volume_src['id'] = '123'
volume_src['name'] = 'volume-123'
volume_src.size = self.TEST_SIZE_IN_GB
volume_dest = FakeEnv()
volume_dest['id'] = '456'
volume_dest['name'] = 'volume-456'
volume_dest['size'] = self.TEST_SIZE_IN_GB
volume_dest.size = self.TEST_SIZE_IN_GB
drv.create_cloned_volume = mock.drv._get_export_path.\
return_value.drv._create_ibmnas_copy.return_value.\
drv._find_share.return_value.\
drv._set_rw_permissions_for_all.return_value.\
drv._resize_volume_file.return_value.\
drv.create_cloned_volume
drv.create_cloned_volume.return_value = self.TEST_NFS_EXPORT
self.assertEqual(self.TEST_NFS_EXPORT,
mock.drv._get_export_path(volume_src['id']).
drv._create_ibmnas_copy().
drv._find_share().
drv._set_rw_permissions_for_all().
drv._resize_volume_file().
drv.create_cloned_volume(
volume_dest,
volume_src))
def test_create_volume_from_snapshot(self):
"""Create volume from snapshot test case."""
drv = self._driver
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = 'volume-123'
volume['size'] = self.TEST_SIZE_IN_GB
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot['volume_size'] = self.TEST_SIZE_IN_GB
snapshot.name = 'snapshot-123'
drv.create_volume_from_snapshot = mock.drv._get_export_path.\
return_value.drv._create_ibmnas_snap.return_value.\
drv._find_share.return_value.\
drv._set_rw_permissions_for_all.return_value.\
drv._resize_volume_file.return_value.\
drv.create_volume_from_snapshot
drv.create_volume_from_snapshot.return_value = self.TEST_NFS_EXPORT
self.assertEqual(self.TEST_NFS_EXPORT,
mock.drv._get_export_path(volume['id']).
drv._create_ibmnas_snap().
drv._find_share().
drv._set_rw_permissions_for_all().
drv._resize_volume_file().
drv.create_volume_from_snapshot(snapshot))
|
|
#!/usr/bin/python
import StringIO
import subprocess
import os
import time
from datetime import datetime
from PIL import Image
threshold = 10
sensitivity = 20
forceCapture = True
forceCaptureTime = 60 * 60 # Once an hour
filepath = "/home/pi/www/picam"
filenamePrefix = "capture"
diskSpaceToReserve = 40 * 1024 * 1024 # Keep 40 mb free on disk
cameraSettings = ""
# settings of the photos to save
saveWidth = 1296
saveHeight = 972
saveQuality = 15 # Set jpeg quality (0 to 100)
# Test-Image settings
testWidth = 100
testHeight = 75
# this is the default setting, if the whole image should be scanned for changed pixel
testAreaCount = 1
testBorders = [ [[1,testWidth],[1,testHeight]] ] # [ [[start pixel on left side,end pixel on right side],[start pixel on top side,stop pixel on bottom side]] ]
debugMode = False # False or True
# Capture a small test image (for motion detection)
def captureTestImage(settings, width, height):
command = "raspistill %s -w %s -h %s -t 200 -e bmp -n -o -" % (settings, width, height)
imageData = StringIO.StringIO()
imageData.write(subprocess.check_output(command, shell=True))
imageData.seek(0)
im = Image.open(imageData)
buffer = im.load()
imageData.close()
return im, buffer
# Save a full size image to disk
def saveImage(settings, width, height, quality, diskSpaceToReserve):
keepDiskSpaceFree(diskSpaceToReserve)
time = datetime.now()
filename = filepath + "/" + filenamePrefix + "-%04d%02d%02d-%02d%02d%02d.jpg" % (time.year, time.month, time.day, time.hour, time.minute, time.second)
subprocess.call("raspistill %s -w %s -h %s -t 200 -e jpg -q %s -n -o %s" % (settings, width, height, quality, filename), shell=True)
print "Captured %s" % filename
# Keep free space above given level
def keepDiskSpaceFree(bytesToReserve):
if (getFreeSpace() < bytesToReserve):
for filename in sorted(os.listdir(filepath + "/")):
if filename.startswith(filenamePrefix) and filename.endswith(".jpg"):
os.remove(filepath + "/" + filename)
print "Deleted %s/%s to avoid filling disk" % (filepath,filename)
if (getFreeSpace() > bytesToReserve):
return
# Get available disk space
def getFreeSpace():
st = os.statvfs(filepath + "/")
du = st.f_bavail * st.f_frsize
return du
# Get first image
image1, buffer1 = captureTestImage(cameraSettings, testWidth, testHeight)
# Reset last capture time
lastCapture = time.time()
while (True):
# Get comparison image
image2, buffer2 = captureTestImage(cameraSettings, testWidth, testHeight)
# Count changed pixels
changedPixels = 0
takePicture = False
if (debugMode): # in debug mode, save a bitmap-file with marked changed pixels and with visible testarea-borders
debugimage = Image.new("RGB",(testWidth, testHeight))
debugim = debugimage.load()
for z in xrange(0, testAreaCount): # = xrange(0,1) with default-values = z will only have the value of 0 = only one scan-area = whole picture
for x in xrange(testBorders[z][0][0]-1, testBorders[z][0][1]): # = xrange(0,100) with default-values
for y in xrange(testBorders[z][1][0]-1, testBorders[z][1][1]): # = xrange(0,75) with default-values; testBorders are NOT zero-based, buffer1[x,y] are zero-based (0,0 is top left of image, testWidth-1,testHeight-1 is botton right)
if (debugMode):
debugim[x,y] = buffer2[x,y]
if ((x == testBorders[z][0][0]-1) or (x == testBorders[z][0][1]-1) or (y == testBorders[z][1][0]-1) or (y == testBorders[z][1][1]-1)):
# print "Border %s %s" % (x,y)
debugim[x,y] = (0, 0, 255) # in debug mode, mark all border pixel to blue
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x,y][1] - buffer2[x,y][1])
if pixdiff > threshold:
changedPixels += 1
if (debugMode):
debugim[x,y] = (0, 255, 0) # in debug mode, mark all changed pixel to green
# Save an image if pixels changed
if (changedPixels > sensitivity):
takePicture = True # will shoot the photo later
if ((debugMode == False) and (changedPixels > sensitivity)):
break # break the y loop
if ((debugMode == False) and (changedPixels > sensitivity)):
break # break the x loop
if ((debugMode == False) and (changedPixels > sensitivity)):
break # break the z loop
if (debugMode):
debugimage.save(filepath + "/debug.bmp") # save debug image as bmp
print "debug.bmp saved, %s changed pixel" % changedPixels
# Check force capture
if forceCapture:
if time.time() - lastCapture > forceCaptureTime:
takePicture = True
if takePicture:
lastCapture = time.time()
saveImage(cameraSettings, saveWidth, saveHeight, saveQuality, diskSpaceToReserve)
# Swap comparison buffers
image1 = image2
buffer1 = buffer2#!/usr/bin/python
|
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic cleaning methods.
"""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import feedparser
from google.appengine.api import users
from django import forms
from django.forms.util import ErrorList
from django.utils.translation import ugettext
from soc.logic import rights as rights_logic
from soc.logic import validate
from soc.logic.models import document as document_logic
from soc.logic.models.site import logic as site_logic
from soc.logic.models.user import logic as user_logic
from soc.models import document as document_model
DEF_LINK_ID_IN_USE_MSG = ugettext(
'This link ID is already in use, please specify another one')
DEF_NO_RIGHTS_FOR_ACL_MSG = ugettext(
'You do not have the required rights for that ACL.')
DEF_ORGANZIATION_NOT_ACTIVE_MSG = ugettext(
"This organization is not active or doesn't exist.")
DEF_NO_SUCH_DOCUMENT_MSG = ugettext(
"There is no such document with that link ID under this entity.")
DEF_MUST_BE_ABOVE_LIMIT_FMT = ugettext(
"Must be at least %d characters, it has %d characters.")
DEF_MUST_BE_UNDER_LIMIT_FMT = ugettext(
"Must be under %d characters, it has %d characters.")
def check_field_is_empty(field_name):
"""Returns decorator that bypasses cleaning for empty fields.
"""
def decorator(fun):
"""Decorator that checks if a field is empty if so doesn't do the cleaning.
Note Django will capture errors concerning required fields that are empty.
"""
from functools import wraps
@wraps(fun)
def wrapper(self):
"""Decorator wrapper method.
"""
field_content = self.cleaned_data.get(field_name)
if not field_content:
# field has no content so bail out
return None
else:
# field has contents
return fun(self)
return wrapper
return decorator
def clean_empty_field(field_name):
"""Incorporates the check_field_is_empty as regular cleaner.
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapper method.
"""
return self.cleaned_data.get(field_name)
return wrapper
def clean_link_id(field_name):
"""Checks if the field_name value is in a valid link ID format.
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapper method.
"""
# convert to lowercase for user comfort
link_id = self.cleaned_data.get(field_name).lower()
if not validate.isLinkIdFormatValid(link_id):
raise forms.ValidationError("This link ID is in wrong format.")
return link_id
return wrapper
def clean_scope_path(field_name):
"""Checks if the field_name value is in a valid scope path format.
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapper method.
"""
# convert to lowercase for user comfort
scope_path = self.cleaned_data.get(field_name).lower()
if not validate.isScopePathFormatValid(scope_path):
raise forms.ValidationError("This scope path is in wrong format.")
return scope_path
return wrapper
def clean_agrees_to_tos(field_name):
"""Checks if there is a ToS to see if it is allowed to leave
the field_name field false.
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapper method.
"""
agrees_to_tos = self.cleaned_data.get(field_name)
if not site_logic.getToS(site_logic.getSingleton()):
return agrees_to_tos
# Site settings specify a site-wide ToS, so agreement is *required*
if agrees_to_tos:
return True
# there was no agreement made so raise an error
raise forms.ValidationError(
'The site-wide Terms of Service must be accepted to participate'
' on this site.')
return wrapper
def clean_existing_user(field_name):
"""Check if the field_name field is a valid user.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
link_id = clean_link_id(field_name)(self)
user_entity = user_logic.getFromKeyFields({'link_id': link_id})
if not user_entity:
# user does not exist
raise forms.ValidationError("This user does not exist.")
return user_entity
return wrapped
def clean_user_is_current(field_name, as_user=True):
"""Check if the field_name value is a valid link_id and resembles the
current user.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
link_id = clean_link_id(field_name)(self)
user_entity = user_logic.getForCurrentAccount()
if not user_entity or user_entity.link_id != link_id:
# this user is not the current user
raise forms.ValidationError("This user is not you.")
return user_entity if as_user else link_id
return wrapped
def clean_user_not_exist(field_name):
"""Check if the field_name value is a valid link_id and a user with the
link id does not exist.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
link_id = clean_link_id(field_name)(self)
user_entity = user_logic.getFromKeyFields({'link_id': link_id})
if user_entity:
# user exists already
raise forms.ValidationError("There is already a user with this link id.")
return link_id
return wrapped
def clean_users_not_same(field_name):
"""Check if the field_name field is a valid user and is not
equal to the current user.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
clean_user_field = clean_existing_user(field_name)
user_entity = clean_user_field(self)
current_user_entity = user_logic.getForCurrentAccount()
if user_entity.key() == current_user_entity.key():
# users are equal
raise forms.ValidationError("You cannot enter yourself here.")
return user_entity
return wrapped
def clean_user_account(field_name):
"""Returns the User with the given field_name value.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
email_adress = self.cleaned_data[field_name]
return users.User(email_adress)
return wrapped
def clean_user_account_not_in_use(field_name):
"""Check if the field_name value contains an email
address that hasn't been used for an existing account.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
email_adress = self.cleaned_data.get(field_name).lower()
# get the user account for this email and check if it's in use
user_account = users.User(email_adress)
fields = {'account': user_account}
user_entity = user_logic.getForFields(fields, unique=True)
if user_entity or user_logic.isFormerAccount(user_account):
raise forms.ValidationError("There is already a user "
"with this email adress.")
return user_account
return wrapped
def clean_ascii_only(field_name):
"""Clean method for cleaning a field that may only contain ASCII-characters.
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapper method.
"""
value = self.cleaned_data.get(field_name)
try:
# encode to ASCII
value = value.encode("ascii")
except UnicodeEncodeError:
# can not encode as ASCII
raise forms.ValidationError("Only ASCII characters are allowed")
return value
return wrapper
def clean_content_length(field_name, min_length=0, max_length=500):
"""Clean method for cleaning a field which must contain at least min and
not more then max length characters.
Args:
field_name: the name of the field needed cleaning
min_length: the minimum amount of allowed characters
max_length: the maximum amount of allowed characters
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapper method.
"""
value = self.cleaned_data[field_name]
value_length = len(value)
if value_length < min_length:
raise forms.ValidationError(DEF_MUST_BE_ABOVE_LIMIT_FMT %(
min_length, value_length))
if value_length > max_length:
raise forms.ValidationError(DEF_MUST_BE_UNDER_LIMIT_FMT %(
max_length, value_length))
return value
return wrapper
def clean_phone_number(field_name):
"""Clean method for cleaning a field that may only contain numerical values.
"""
@check_field_is_empty(field_name)
def wrapper(self):
"""Decorator wrapped method.
"""
value = self.cleaned_data.get(field_name)
# allow for a '+' prefix which means '00'
if value[0] == '+':
value = '00' + value[1:]
if not value.isdigit():
raise forms.ValidationError("Only numerical characters are allowed")
return value
return wrapper
def clean_feed_url(self):
"""Clean method for cleaning feed url.
"""
feed_url = self.cleaned_data.get('feed_url')
if feed_url == '':
# feed url not supplied (which is OK), so do not try to validate it
return None
if not validate.isFeedURLValid(feed_url):
raise forms.ValidationError('This URL is not a valid ATOM or RSS feed.')
return feed_url
def clean_html_content(field_name):
"""Clean method for cleaning HTML content.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
content = self.cleaned_data.get(field_name)
if user_logic.isDeveloper():
return content
sanitizer = feedparser._HTMLSanitizer('utf-8')
sanitizer.feed(content)
content = sanitizer.output()
content = content.decode('utf-8')
content = content.strip().replace('\r\n', '\n')
return content
return wrapped
def clean_url(field_name):
"""Clean method for cleaning a field belonging to a LinkProperty.
"""
@check_field_is_empty(field_name)
def wrapped(self):
"""Decorator wrapper method.
"""
value = self.cleaned_data.get(field_name)
# call the Django URLField cleaning method to
# properly clean/validate this field
return forms.URLField.clean(self.fields[field_name], value)
return wrapped
def clean_refs(params, fields):
"""Cleans all references to make sure they are valid.
"""
logic = params['logic']
def wrapped(self):
"""Decorator wrapper method.
"""
scope_path = logic.getKeyNameFromFields(self.cleaned_data)
key_fields = {
'scope_path': scope_path,
'prefix': params['document_prefix'],
}
for field in fields:
link_id = self.cleaned_data.get(field)
if not link_id:
continue
key_fields['link_id'] = link_id
ref = document_logic.logic.getFromKeyFields(key_fields)
if not ref:
self._errors[field] = ErrorList([DEF_NO_SUCH_DOCUMENT_MSG])
del self.cleaned_data[field]
else:
self.cleaned_data['resolved_%s' % field] = ref
return self.cleaned_data
return wrapped
def validate_user_edit(link_id_field, account_field):
"""Clean method for cleaning user edit form.
Raises ValidationError if:
-Another User has the given email address as account
-Another User has the given email address in it's FormerAccounts list
"""
def wrapper(self):
"""Decorator wrapper method.
"""
cleaned_data = self.cleaned_data
link_id = cleaned_data.get(link_id_field)
user_account = cleaned_data.get(account_field)
# if both fields were valid do this check
if link_id and user_account:
# get the user from the link_id in the form
user_entity = user_logic.getFromKeyFields({'link_id': link_id})
# if it's not the user's current account
if user_entity.account != user_account:
# get the user having the given account
fields = {'account': user_account}
user_from_account_entity = user_logic.getForFields(fields,
unique=True)
# if there is a user with the given account or it's a former account
if user_from_account_entity or \
user_logic.isFormerAccount(user_account):
# raise an error because this email address can't be used
raise forms.ValidationError("There is already a user with "
"this email address.")
return cleaned_data
return wrapper
def validate_new_group(link_id_field, scope_path_field,
group_logic, group_app_logic):
"""Clean method used to clean the group application or new group form.
Raises ValidationError if:
-A application with this link id and scope path already exists
-A group with this link id and scope path already exists
"""
def wrapper(self):
"""Decorator wrapper method.
"""
cleaned_data = self.cleaned_data
fields = {}
link_id = cleaned_data.get(link_id_field)
if link_id:
fields['link_id'] = link_id
scope_path = cleaned_data.get(scope_path_field)
if scope_path:
fields['scope_path'] = scope_path
# get the application
group_app_entity = group_app_logic.logic.getForFields(fields, unique=True)
# get the current user
user_entity = user_logic.getForCurrentAccount()
# if the proposal has not been accepted or it's not the applicant
# creating the new group then show link ID in use message
if group_app_entity and (group_app_entity.status != 'accepted' or (
group_app_entity.applicant.key() != user_entity.key())):
# add the error message to the link id field
self._errors[link_id_field] = ErrorList([DEF_LINK_ID_IN_USE_MSG])
del cleaned_data[link_id_field]
# return the new cleaned_data
return cleaned_data
# check if there is already a group for the given fields
group_entity = group_logic.logic.getForFields(fields, unique=True)
if group_entity:
# add the error message to the link id field
self._errors[link_id_field] = ErrorList([DEF_LINK_ID_IN_USE_MSG])
del cleaned_data[link_id_field]
# return the new cleaned_data
return cleaned_data
return cleaned_data
return wrapper
def validate_student_proposal(org_field, scope_field,
student_logic, org_logic):
"""Validates the form of a student proposal.
Raises ValidationError if:
-The organization link_id does not match an active organization
-The hidden scope path is not a valid active student
"""
def wrapper(self):
"""Decorator wrapper method.
"""
cleaned_data = self.cleaned_data
org_link_id = cleaned_data.get(org_field)
scope_path = cleaned_data.get(scope_field)
# only if both fields are valid
if org_link_id and scope_path:
filter = {'scope_path': scope_path,
'status': 'active'}
student_entity = student_logic.logic.getFromKeyName(scope_path)
if not student_entity or student_entity.status != 'active':
# raise validation error, access checks should have prevented this
raise forms.ValidationError(
ugettext("The given student is not valid."))
filter = {'link_id': org_link_id,
'scope': student_entity.scope,
'status': 'active'}
org_entity = org_logic.logic.getForFields(filter, unique=True)
if not org_entity:
#raise validation error, non valid organization entered
self._errors['organization'] = ErrorList(
[DEF_ORGANZIATION_NOT_ACTIVE_MSG])
del cleaned_data['organization']
return cleaned_data
return wrapper
def validate_student_project(org_field, mentor_field, student_field):
"""Validates the form of a student proposal.
Args:
org_field: Field containing key_name for org
mentor_field: Field containing the link_id of the mentor
student_field: Field containing the student link_id
Raises ValidationError if:
-A valid Organization does not exist for the given keyname
-The mentor link_id does not match a mentor for the active organization
-The student link_id does not match a student in the org's Program
"""
def wrapper(self):
"""Decorator wrapper method.
"""
from soc.logic.models.mentor import logic as mentor_logic
from soc.logic.models.organization import logic as org_logic
from soc.logic.models.student import logic as student_logic
cleaned_data = self.cleaned_data
org_key_name = cleaned_data.get(org_field)
mentor_link_id = cleaned_data.get(mentor_field)
student_link_id = cleaned_data.get(student_field)
if not (org_key_name and mentor_link_id and student_link_id):
# we can't do the check the other cleaners will pickup empty fields
return cleaned_data
org_entity = org_logic.getFromKeyName(org_key_name)
if not org_entity:
# show error message
raise forms.ValidationError(
ugettext("The given Organization is not valid."))
fields = {'link_id': mentor_link_id,
'scope': org_entity,
'status': 'active'}
mentor_entity = mentor_logic.getForFields(fields, unique=True,)
if not mentor_entity:
# show error message
raise forms.ValidationError(
ugettext("The given Mentor is not valid."))
fields = {'link_id': student_link_id,
'scope': org_entity.scope,
'status': 'active'}
student_entity = student_logic.getForFields(fields, unique=True)
if not student_entity:
#show error message
raise forms.ValidationError(
ugettext("The given Student is not valid."))
# successfully validated
return cleaned_data
return wrapper
def validate_document_acl(view, creating=False):
"""Validates that the document ACL settings are correct.
"""
def wrapper(self):
"""Decorator wrapper method.
"""
cleaned_data = self.cleaned_data
read_access = cleaned_data.get('read_access')
write_access = cleaned_data.get('write_access')
if not (read_access and write_access and ('prefix' in cleaned_data)):
return cleaned_data
if read_access != 'public':
ordening = document_model.Document.DOCUMENT_ACCESS
if ordening.index(read_access) < ordening.index(write_access):
raise forms.ValidationError(
"Read access should be less strict than write access.")
params = view.getParams()
rights = params['rights']
user = user_logic.getForCurrentAccount()
rights.setCurrentUser(user.account, user)
prefix = self.cleaned_data['prefix']
scope_path = self.cleaned_data['scope_path']
validate_access(self, view, rights, prefix, scope_path, 'read_access')
validate_access(self, view, rights, prefix, scope_path, 'write_access')
if creating and not has_access(rights, 'restricted', scope_path, prefix):
raise forms.ValidationError(
"You do not have the required access to create this document.")
return cleaned_data
return wrapper
def has_access(rights, access_level, scope_path, prefix):
"""Checks whether the current user has the required access.
"""
checker = rights_logic.Checker(prefix)
roles = checker.getMembership(access_level)
django_args = {
'scope_path': scope_path,
'prefix': prefix,
}
return rights.hasMembership(roles, django_args)
def validate_access(self, view, rights, prefix, scope_path, field):
"""Validates that the user has access to the ACL for the specified fields.
"""
access_level = self.cleaned_data[field]
if not has_access(rights, access_level, scope_path, prefix):
self._errors[field] = ErrorList([DEF_NO_RIGHTS_FOR_ACL_MSG])
del self.cleaned_data[field]
|
|
import torch
from torch import nn
from torch.nn import functional as F
from math import sqrt
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + "_orig")
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + "_orig", nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name="weight"):
EqualLR.apply(module, name)
return module
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
class EqualConvTranspose2d(nn.Module):
### additional module for OOGAN usage
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.ConvTranspose2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class ConvBlock(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
padding,
kernel_size2=None,
padding2=None,
pixel_norm=True,
):
super().__init__()
pad1 = padding
pad2 = padding
if padding2 is not None:
pad2 = padding2
kernel1 = kernel_size
kernel2 = kernel_size
if kernel_size2 is not None:
kernel2 = kernel_size2
convs = [EqualConv2d(in_channel, out_channel, kernel1, padding=pad1)]
if pixel_norm:
convs.append(PixelNorm())
convs.append(nn.LeakyReLU(0.1))
convs.append(EqualConv2d(out_channel, out_channel, kernel2, padding=pad2))
if pixel_norm:
convs.append(PixelNorm())
convs.append(nn.LeakyReLU(0.1))
self.conv = nn.Sequential(*convs)
def forward(self, input):
out = self.conv(input)
return out
def upscale(feat):
return F.interpolate(feat, scale_factor=2, mode="bilinear", align_corners=False)
class Generator(nn.Module):
def __init__(self, input_code_dim=128, in_channel=128, pixel_norm=True, tanh=True):
super().__init__()
self.input_dim = input_code_dim
self.tanh = tanh
self.input_layer = nn.Sequential(
EqualConvTranspose2d(input_code_dim, in_channel, 4, 1, 0),
PixelNorm(),
nn.LeakyReLU(0.1),
)
self.progression_4 = ConvBlock(
in_channel, in_channel, 3, 1, pixel_norm=pixel_norm
)
self.progression_8 = ConvBlock(
in_channel, in_channel, 3, 1, pixel_norm=pixel_norm
)
self.progression_16 = ConvBlock(
in_channel, in_channel, 3, 1, pixel_norm=pixel_norm
)
self.progression_32 = ConvBlock(
in_channel, in_channel, 3, 1, pixel_norm=pixel_norm
)
self.progression_64 = ConvBlock(
in_channel, in_channel // 2, 3, 1, pixel_norm=pixel_norm
)
self.progression_128 = ConvBlock(
in_channel // 2, in_channel // 4, 3, 1, pixel_norm=pixel_norm
)
self.progression_256 = ConvBlock(
in_channel // 4, in_channel // 4, 3, 1, pixel_norm=pixel_norm
)
self.to_rgb_8 = EqualConv2d(in_channel, 3, 1)
self.to_rgb_16 = EqualConv2d(in_channel, 3, 1)
self.to_rgb_32 = EqualConv2d(in_channel, 3, 1)
self.to_rgb_64 = EqualConv2d(in_channel // 2, 3, 1)
self.to_rgb_128 = EqualConv2d(in_channel // 4, 3, 1)
self.to_rgb_256 = EqualConv2d(in_channel // 4, 3, 1)
self.max_step = 6
def progress(self, feat, module):
out = F.interpolate(feat, scale_factor=2, mode="bilinear", align_corners=False)
out = module(out)
return out
def output(self, feat1, feat2, module1, module2, alpha):
if 0 <= alpha < 1:
skip_rgb = upscale(module1(feat1))
out = (1 - alpha) * skip_rgb + alpha * module2(feat2)
else:
out = module2(feat2)
if self.tanh:
return torch.tanh(out)
return out
def forward(self, input, step=0, alpha=-1):
if step > self.max_step:
step = self.max_step
out_4 = self.input_layer(input.view(-1, self.input_dim, 1, 1))
out_4 = self.progression_4(out_4)
out_8 = self.progress(out_4, self.progression_8)
if step == 1:
if self.tanh:
return torch.tanh(self.to_rgb_8(out_8))
return self.to_rgb_8(out_8)
out_16 = self.progress(out_8, self.progression_16)
if step == 2:
return self.output(out_8, out_16, self.to_rgb_8, self.to_rgb_16, alpha)
out_32 = self.progress(out_16, self.progression_32)
if step == 3:
return self.output(out_16, out_32, self.to_rgb_16, self.to_rgb_32, alpha)
out_64 = self.progress(out_32, self.progression_64)
if step == 4:
return self.output(out_32, out_64, self.to_rgb_32, self.to_rgb_64, alpha)
out_128 = self.progress(out_64, self.progression_128)
if step == 5:
return self.output(out_64, out_128, self.to_rgb_64, self.to_rgb_128, alpha)
out_256 = self.progress(out_128, self.progression_256)
if step == 6:
return self.output(
out_128, out_256, self.to_rgb_128, self.to_rgb_256, alpha
)
class Discriminator(nn.Module):
def __init__(self, feat_dim=128):
super().__init__()
self.progression = nn.ModuleList(
[
ConvBlock(feat_dim // 4, feat_dim // 4, 3, 1),
ConvBlock(feat_dim // 4, feat_dim // 2, 3, 1),
ConvBlock(feat_dim // 2, feat_dim, 3, 1),
ConvBlock(feat_dim, feat_dim, 3, 1),
ConvBlock(feat_dim, feat_dim, 3, 1),
ConvBlock(feat_dim, feat_dim, 3, 1),
ConvBlock(feat_dim + 1, feat_dim, 3, 1, 4, 0),
]
)
self.from_rgb = nn.ModuleList(
[
EqualConv2d(3, feat_dim // 4, 1),
EqualConv2d(3, feat_dim // 4, 1),
EqualConv2d(3, feat_dim // 2, 1),
EqualConv2d(3, feat_dim, 1),
EqualConv2d(3, feat_dim, 1),
EqualConv2d(3, feat_dim, 1),
EqualConv2d(3, feat_dim, 1),
]
)
self.n_layer = len(self.progression)
self.linear = EqualLinear(feat_dim, 1)
def forward(self, input, step=0, alpha=-1):
for i in range(step, -1, -1):
index = self.n_layer - i - 1
if i == step:
out = self.from_rgb[index](input)
if i == 0:
out_std = torch.sqrt(out.var(0, unbiased=False) + 1e-8)
mean_std = out_std.mean()
mean_std = mean_std.expand(out.size(0), 1, 4, 4)
out = torch.cat([out, mean_std], 1)
out = self.progression[index](out)
if i > 0:
# out = F.avg_pool2d(out, 2)
out = F.interpolate(
out, scale_factor=0.5, mode="bilinear", align_corners=False
)
if i == step and 0 <= alpha < 1:
# skip_rgb = F.avg_pool2d(input, 2)
skip_rgb = F.interpolate(
input, scale_factor=0.5, mode="bilinear", align_corners=False
)
skip_rgb = self.from_rgb[index + 1](skip_rgb)
out = (1 - alpha) * skip_rgb + alpha * out
out = out.squeeze(2).squeeze(2)
# print(input.size(), out.size(), step)
out = self.linear(out)
return out
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
import mox
from neutronclient.neutron.v2_0 import port
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20PortJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20PortJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_port(self):
"""Create port: netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args(self):
"""Create port: netid --extra_dhcp_opt."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s' %
dhcp_opt)]
print args
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_full(self):
"""Create port: --mac_address mac --device_id deviceid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--mac_address', 'mac', '--device_id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
position_values = [netid, 'mac', 'deviceid']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--mac-address', 'mac', '--device-id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_tenant(self):
"""Create port: --tenant_id tenantid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--tenant_id', 'tenantid', netid, ]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', netid, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_port_tags(self):
"""Create port: netid mac_address device_id --tags a b."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--tags', 'a', 'b']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_port_secgroup(self):
"""Create port: --security-group sg1_id netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups(self):
"""Create port: <security_groups> netid
The <security_groups> are
--security-group sg1_id --security-group sg2_id
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id', 'sg2_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroup_off(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--no-security-group', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, None]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups_list(self):
"""Create port: netid <security_groups>
The <security_groups> are
--security-groups list=true sg_id1 sg_id2
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--security-groups', 'list=true', 'sg_id1', 'sg_id2']
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg_id1', 'sg_id2']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_ports(self):
"""List ports: -D."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ports_pagination(self):
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ports_sort(self):
"""list ports: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ports_limit(self):
"""list ports: -P."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_list_ports_tags(self):
"""List ports: -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_ports_detail_tags(self):
"""List ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_router_port(self, resources, cmd,
myid, detail=False, tags=[],
fields_1=[], fields_2=[]):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
args.append(myid)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
fields_1.extend(fields_2)
for field in fields_1:
if query:
query += "&fields=" + field
else:
query = "fields=" + field
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
query = query and query + '&device_id=%s' or 'device_id=%s'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query % myid), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertTrue('myid1' in _str)
def test_list_router_ports(self):
"""List router ports: -D."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, True)
def test_list_router_ports_tags(self):
"""List router ports: -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, tags=['a', 'b'])
def test_list_router_ports_detail_tags(self):
"""List router ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
detail=True, tags=['a', 'b'])
def test_list_router_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_port(self):
"""Update port: myid --name myname --tags a b."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_port_secgroup(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id', myid]
updatefields = {'security_groups': ['sg1_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_secgroups(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
myid]
updatefields = {'security_groups': ['sg1_id', 'sg2_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts(self):
"""Update port: myid --extra_dhcp_opt."""
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_security_group_off(self):
"""Update port: --no-security-groups myid."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-security-groups', 'myid'],
{'security_groups': None})
def test_show_port(self):
"""Show port: --fields id --fields name myid."""
resource = 'port'
cmd = port.ShowPort(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_port(self):
"""Delete port: myid."""
resource = 'port'
cmd = port.DeletePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
class CLITestV20PortXML(CLITestV20PortJSON):
format = 'xml'
|
|
# -*- coding: utf-8 -*-
"""Models for blogging app."""
from __future__ import unicode_literals
import hashlib
import time
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.db import models
from django.urls import NoReverseMatch, reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from blogging.managers import AvailableCategoriesManager, PostManager
def upload_to_blogging(instance, filename):
"""Create file path."""
hasher = hashlib.md5()
hasher.update(filename.encode('utf-8'))
hasher.update(str(time.time()).encode('utf-8'))
hashed_name = hasher.hexdigest()
extension = filename.split('.')[-1]
return "blogging/pictures/{0}-{1}.{2}".format(
hashed_name[:5], instance.slug, extension
)
@python_2_unicode_compatible
class Picture(models.Model):
name = models.CharField(_("Name"), max_length=100)
slug = models.SlugField(_("Slug"))
description = models.TextField(_("Description"), blank=True)
site = models.ForeignKey(Site, verbose_name=_("Site"),
default=settings.SITE_ID,
on_delete=models.CASCADE)
image = models.ImageField(upload_to=upload_to_blogging, max_length=200)
def __str__(self):
"""Human picture name."""
return self.name
@python_2_unicode_compatible
class Category(models.Model):
"""A category to regroup similar articles."""
name = models.CharField(_("Name"), max_length=100)
slug = models.SlugField(_("Slug"))
description = models.TextField(_("Description"), blank=True)
site = models.ForeignKey(Site, verbose_name=_("Site"),
default=settings.SITE_ID,
on_delete=models.CASCADE)
picture = models.ForeignKey(Picture, verbose_name=_("Picture"),
blank=True, null=True,
on_delete=models.CASCADE)
# hidden cached field
visible_posts_count = models.IntegerField(_("Visible posts in category"), editable=False, default=0)
all_posts_count = models.IntegerField(_("All posts in category"), editable=False, default=0)
objects = models.Manager() # The default manager.
on_site = CurrentSiteManager()
availables = AvailableCategoriesManager() # The Online manager.
class Meta:
ordering = ['name']
verbose_name = _("category")
verbose_name_plural = _("categories")
def __str__(self):
"""Human category name."""
return self.name
def get_absolute_url(self):
return reverse('blog-category', args=[self.slug])
def natural_key(self):
return [self.slug, self.site.id]
def is_empty(self):
return not Post.objects.published(site_id=settings.SITE_ID).filter(categories=self).exists()
@property
def get_online_posts_count(self):
return Post.objects.published(site_id=settings.SITE_ID).filter(categories=self).count()
@property
def get_all_posts_count(self):
return Post.objects.published(site_id=settings.SITE_ID).filter(categories=self).count()
def update_counters(self):
self.visible_posts_count = self.get_online_posts_count
self.all_posts_count = self.get_all_posts_count
self.save()
@python_2_unicode_compatible
class Post(models.Model):
"""
The Post contains the generic fields for a blog item.
It's for example the publication date, the author, the slug, etc.
"""
# Constants for the blog status
DRAFT = 1
PUBLISHED = 2
DELETED = 3
STATUS_CHOICES = (
(DRAFT, _("Draft")),
(PUBLISHED, _("Published")),
(DELETED, _("Deleted")),
)
# Constants for content type
TEXT = 1
QUOTE = 2
LINK = 3
VIDEO = 4
CONTENT_TYPE_CHOICES = (
(TEXT, _("Text")),
(QUOTE, _("Quote")),
(LINK, _("Link")),
(VIDEO, _("Video")),
)
title = models.CharField(_(u"Title"), max_length=150)
slug = models.SlugField(_(u"Slug"), max_length=150, db_index=True)
author = models.ForeignKey(User, verbose_name=_(u"Author"),
on_delete=models.CASCADE)
excerpt = models.TextField(_(u"Excerpt"), blank=True, db_column="exceprt")
content = models.TextField(_(u"Content"))
main_picture = models.ForeignKey(Picture, verbose_name=_("Picture"),
blank=True, null=True,
on_delete=models.CASCADE)
published_on = models.DateTimeField(_("Published on"), db_index=True,
default=timezone.now)
created_on = models.DateTimeField(auto_now_add=True, editable=False)
updated_on = models.DateTimeField(auto_now=True, editable=False)
post_type = models.IntegerField(_("Type"), choices=CONTENT_TYPE_CHOICES,
default=TEXT, db_index=True)
status = models.IntegerField(_("Status"), choices=STATUS_CHOICES,
db_index=True, default=DRAFT)
selected = models.BooleanField(_("Selected"), default=False)
comments_open = models.BooleanField(_("Are comments open?"), default=True)
source = models.URLField(_("Post source"), blank=True, null=True)
categories = models.ManyToManyField(Category, verbose_name=_("Categories"))
site = models.ForeignKey(Site, verbose_name=_("Site"),
default=settings.SITE_ID,
on_delete=models.CASCADE)
# Managers
objects = PostManager()
on_site = CurrentSiteManager()
class Meta:
ordering = ['-published_on']
verbose_name = _("item")
verbose_name_plural = _("items")
unique_together = (
("site", "slug"),
)
def __str__(self):
"""Human post name."""
return self.title
def natural_key(self):
return [self.slug, self.site.id]
def get_absolute_url(self):
try:
kwargs = {
'year': self.published_on.strftime("%Y"),
'month': self.published_on.strftime("%m"),
'day': self.published_on.strftime("%d"),
'slug': self.slug
}
return reverse('blog-item', kwargs=kwargs, urlconf=settings.ROOT_URLCONF)
except NoReverseMatch:
try:
kwargs = {
'year': self.published_on.strftime("%Y"),
'month': self.published_on.strftime("%m"),
'slug': self.slug
}
return reverse('blog-item', kwargs=kwargs, urlconf=settings.ROOT_URLCONF)
except NoReverseMatch:
kwargs = {
'slug': self.slug
}
return reverse('blog-item', kwargs=kwargs, urlconf=settings.ROOT_URLCONF)
def __item_cache_key(self):
"""Return a unique item key that can be used in order to cache it."""
return "blogging:post:{0}".format(self.id)
def related_items(self):
"""Return items related to the current item."""
return Post.objects.related_items(self, site_id=settings.SITE_ID)
|
|
# -*- test-case-name: wokkel.test.test_generic -*-
#
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Generic XMPP protocol helpers.
"""
from zope.interface import implements
from twisted.internet import defer, protocol
from twisted.python import reflect
from twisted.words.protocols.jabber import error, jid, xmlstream
from twisted.words.protocols.jabber.xmlstream import toResponse
from twisted.words.xish import domish, utility
try:
from twisted.words.xish.xmlstream import BootstrapMixin
except ImportError:
from wokkel.compat import BootstrapMixin
from wokkel.iwokkel import IDisco
from wokkel.subprotocols import XMPPHandler
IQ_GET = '/iq[@type="get"]'
IQ_SET = '/iq[@type="set"]'
NS_VERSION = 'jabber:iq:version'
VERSION = IQ_GET + '/query[@xmlns="' + NS_VERSION + '"]'
def parseXml(string):
"""
Parse serialized XML into a DOM structure.
@param string: The serialized XML to be parsed, UTF-8 encoded.
@type string: C{str}.
@return: The DOM structure, or C{None} on empty or incomplete input.
@rtype: L{domish.Element}
"""
roots = []
results = []
elementStream = domish.elementStream()
elementStream.DocumentStartEvent = roots.append
elementStream.ElementEvent = lambda elem: roots[0].addChild(elem)
elementStream.DocumentEndEvent = lambda: results.append(roots[0])
elementStream.parse(string)
return results and results[0] or None
def stripNamespace(rootElement):
namespace = rootElement.uri
def strip(element):
if element.uri == namespace:
element.uri = None
if element.defaultUri == namespace:
element.defaultUri = None
for child in element.elements():
strip(child)
if namespace is not None:
strip(rootElement)
return rootElement
class FallbackHandler(XMPPHandler):
"""
XMPP subprotocol handler that catches unhandled iq requests.
Unhandled iq requests are replied to with a service-unavailable stanza
error.
"""
def connectionInitialized(self):
self.xmlstream.addObserver(IQ_SET, self.iqFallback, -1)
self.xmlstream.addObserver(IQ_GET, self.iqFallback, -1)
def iqFallback(self, iq):
if iq.handled == True:
return
reply = error.StanzaError('service-unavailable')
self.xmlstream.send(reply.toResponse(iq))
class VersionHandler(XMPPHandler):
"""
XMPP subprotocol handler for XMPP Software Version.
This protocol is described in
U{XEP-0092<http://www.xmpp.org/extensions/xep-0092.html>}.
"""
implements(IDisco)
def __init__(self, name, version):
self.name = name
self.version = version
def connectionInitialized(self):
self.xmlstream.addObserver(VERSION, self.onVersion)
def onVersion(self, iq):
response = toResponse(iq, "result")
query = response.addElement((NS_VERSION, "query"))
name = query.addElement("name", content=self.name)
version = query.addElement("version", content=self.version)
self.send(response)
iq.handled = True
def getDiscoInfo(self, requestor, target, node):
info = set()
if not node:
from wokkel import disco
info.add(disco.DiscoFeature(NS_VERSION))
return defer.succeed(info)
def getDiscoItems(self, requestor, target, node):
return defer.succeed([])
class XmlPipe(object):
"""
XML stream pipe.
Connects two objects that communicate stanzas through an XML stream like
interface. Each of the ends of the pipe (sink and source) can be used to
send XML stanzas to the other side, or add observers to process XML stanzas
that were sent from the other side.
XML pipes are usually used in place of regular XML streams that are
transported over TCP. This is the reason for the use of the names source
and sink for both ends of the pipe. The source side corresponds with the
entity that initiated the TCP connection, whereas the sink corresponds with
the entity that accepts that connection. In this object, though, the source
and sink are treated equally.
Unlike Jabber
L{XmlStream<twisted.words.protocols.jabber.xmlstream.XmlStream>}s, the sink
and source objects are assumed to represent an eternal connected and
initialized XML stream. As such, events corresponding to connection,
disconnection, initialization and stream errors are not dispatched or
processed.
@ivar source: Source XML stream.
@ivar sink: Sink XML stream.
"""
def __init__(self):
self.source = utility.EventDispatcher()
self.sink = utility.EventDispatcher()
self.source.send = lambda obj: self.sink.dispatch(obj)
self.sink.send = lambda obj: self.source.dispatch(obj)
class Stanza(object):
"""
Abstract representation of a stanza.
@ivar sender: The sending entity.
@type sender: L{jid.JID}
@ivar recipient: The receiving entity.
@type recipient: L{jid.JID}
"""
recipient = None
sender = None
stanzaKind = None
stanzaID = None
stanzaType = None
def __init__(self, recipient=None, sender=None):
self.recipient = recipient
self.sender = sender
@classmethod
def fromElement(Class, element):
stanza = Class()
stanza.parseElement(element)
return stanza
def parseElement(self, element):
if element.hasAttribute('from'):
self.sender = jid.internJID(element['from'])
if element.hasAttribute('to'):
self.recipient = jid.internJID(element['to'])
self.stanzaType = element.getAttribute('type')
self.stanzaID = element.getAttribute('id')
# Save element
stripNamespace(element)
self.element = element
# accumulate all childHandlers in the class hierarchy of Class
handlers = {}
reflect.accumulateClassDict(self.__class__, 'childParsers', handlers)
for child in element.elements():
try:
handler = handlers[child.uri, child.name]
except KeyError:
pass
else:
getattr(self, handler)(child)
def toElement(self):
element = domish.Element((None, self.stanzaKind))
if self.sender is not None:
element['from'] = self.sender.full()
if self.recipient is not None:
element['to'] = self.recipient.full()
if self.stanzaType:
element['type'] = self.stanzaType
if self.stanzaID:
element['id'] = self.stanzaID
return element
class ErrorStanza(Stanza):
def parseElement(self, element):
Stanza.parseElement(self, element)
self.exception = error.exceptionFromStanza(element)
class Request(Stanza):
"""
IQ request stanza.
This is a base class for IQ get or set stanzas, to be used with
L{wokkel.subprotocols.StreamManager.request}.
"""
stanzaKind = 'iq'
stanzaType = 'get'
timeout = None
def __init__(self, recipient=None, sender=None, stanzaType='get'):
Stanza.__init__(self, recipient=recipient, sender=sender)
self.stanzaType = stanzaType
def toElement(self):
element = Stanza.toElement(self)
if not self.stanzaID:
element.addUniqueId()
self.stanzaID = element['id']
return element
class DeferredXmlStreamFactory(BootstrapMixin, protocol.ClientFactory):
protocol = xmlstream.XmlStream
def __init__(self, authenticator):
BootstrapMixin.__init__(self)
self.authenticator = authenticator
deferred = defer.Deferred()
self.deferred = deferred
self.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.deferred.callback)
self.addBootstrap(xmlstream.INIT_FAILED_EVENT, deferred.errback)
def buildProtocol(self, addr):
"""
Create an instance of XmlStream.
A new authenticator instance will be created and passed to the new
XmlStream. Registered bootstrap event observers are installed as well.
"""
xs = self.protocol(self.authenticator)
xs.factory = self
self.installBootstraps(xs)
return xs
def clientConnectionFailed(self, connector, reason):
self.deferred.errback(reason)
|
|
from __future__ import print_function
import collections
import os
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.sum import sum
from chainer.functions.noise.dropout import dropout
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import normal
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class VGG16Layers(link.Chain):
"""A pre-trained CNN model with 16 layers provided by VGG team.
During initialization, this chain model automatically downloads
the pre-trained caffemodel, convert to another chainer model,
stores it on your local directory, and initializes all the parameters
with it. This model would be useful when you want to extract a semantic
feature vector from a given image, or fine-tune the model
on a different dataset.
Note that this pre-trained model is released under Creative Commons
Attribution License.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
See: K. Simonyan and A. Zisserman, `Very Deep Convolutional Networks
for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.Normal(scale=0.01)``.
Attributes:
~VGG16Layers.available_layers (list of str): The list of available
layer names used by ``__call__`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
init = constant.Zero()
kwargs = {'initialW': init, 'initial_bias': init}
else:
# employ default initializers used in the original paper
kwargs = {
'initialW': normal.Normal(0.01),
'initial_bias': constant.Zero(),
}
super(VGG16Layers, self).__init__()
with self.init_scope():
self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs)
self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs)
self.fc7 = Linear(4096, 4096, **kwargs)
self.fc8 = Linear(4096, 1000, **kwargs)
if pretrained_model == 'auto':
_retrieve(
'VGG_ILSVRC_16_layers.npz',
'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
'caffe/VGG_ILSVRC_16_layers.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1_1', [self.conv1_1, relu]),
('conv1_2', [self.conv1_2, relu]),
('pool1', [_max_pooling_2d]),
('conv2_1', [self.conv2_1, relu]),
('conv2_2', [self.conv2_2, relu]),
('pool2', [_max_pooling_2d]),
('conv3_1', [self.conv3_1, relu]),
('conv3_2', [self.conv3_2, relu]),
('conv3_3', [self.conv3_3, relu]),
('pool3', [_max_pooling_2d]),
('conv4_1', [self.conv4_1, relu]),
('conv4_2', [self.conv4_2, relu]),
('conv4_3', [self.conv4_3, relu]),
('pool4', [_max_pooling_2d]),
('conv5_1', [self.conv5_1, relu]),
('conv5_2', [self.conv5_2, relu]),
('conv5_3', [self.conv5_3, relu]),
('pool5', [_max_pooling_2d]),
('fc6', [self.fc6, relu, dropout]),
('fc7', [self.fc7, relu, dropout]),
('fc8', [self.fc8]),
('prob', [softmax]),
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
npz.save_npz(path_npz, caffemodel, compression=False)
def __call__(self, x, layers=['prob'], **kwargs):
"""__call__(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
.. warning::
``test`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (~chainer.Variable): Input variable.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
target_layers = set(layers)
for key, funcs in self.functions.items():
if len(target_layers) == 0:
break
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
return activations
def extract(self, images, layers=['fc7'], size=(224, 224), **kwargs):
"""extract(self, images, layers=['fc7'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``__call__`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``__call__`` functions.
.. warning::
``test`` and ``volatile`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('enable_backprop', not volatile)``
respectively.
See :func:`chainer.using_config`.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = y.data.shape[0] // 10
y_shape = y.data.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for VGG models.
Note that you have to call this method before ``__call__``
because the pre-trained vgg model requires to resize the given image,
covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=numpy.float32)
image = image[:, :, ::-1]
image -= numpy.array(
[103.939, 116.779, 123.68], dtype=numpy.float32)
image = image.transpose((2, 0, 1))
return image
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=2)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
print('Now loading caffemodel (usually it may take few minutes)')
VGG16Layers.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
|
|
"""Attempt to generate templates for module reference with Sphinx
XXX - we exclude extension modules
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module`` script.
We get functions and classes by parsing the text of .py files.
Alternatively we could import the modules for discovery, and we'd have
to do that for extension modules. This would involve changing the
``_parse_module`` method to work via import and introspection, and
might involve changing ``discover_modules`` (which determines which
files are modules, and therefore which module URIs will be passed to
``_parse_module``).
NOTE: this is a modified version of a script originally shipped with the
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
project."""
# Stdlib imports
import os
import re
# Functions and classes
class ApiDocWriter(object):
''' Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format'''
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
'''
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
''' Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
'''
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([],[])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f)
f.close()
return functions, classes
def _parse_lines(self, linesource):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
if not len(functions) and not len(classes):
print('WARNING: Empty -',uri) # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
+ '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri:
title = 'Module: :mod:`' + uri_short + '`'
else:
title = ':mod:`' + uri_short + '`'
ad += title + '\n' + self.rst_section_levels[2] * len(title)
if len(classes):
ad += '\nInheritance diagram for ``%s``:\n\n' % uri
ad += '.. inheritance-diagram:: %s \n' % uri
ad += ' :parts: 3\n'
ad += '\n.. automodule:: ' + uri + '\n'
ad += '\n.. currentmodule:: ' + uri + '\n'
multi_class = len(classes) > 1
multi_fx = len(functions) > 1
if multi_class:
ad += '\n' + 'Classes' + '\n' + \
self.rst_section_levels[2] * 7 + '\n'
elif len(classes) and multi_fx:
ad += '\n' + 'Class' + '\n' + \
self.rst_section_levels[2] * 5 + '\n'
for c in classes:
ad += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[multi_class + 2 ] * \
(len(c)+9) + '\n\n'
ad += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
ad += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
'\n' \
' .. automethod:: __init__\n'
if multi_fx:
ad += '\n' + 'Functions' + '\n' + \
self.rst_section_levels[2] * 9 + '\n\n'
elif len(functions) and multi_class:
ad += '\n' + 'Function' + '\n' + \
self.rst_section_levels[2] * 8 + '\n\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules,outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
outfile = os.path.join(outdir,
m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules,outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot+self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path,'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath,f))
idx.close()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Snapshot.identifier'
db.delete_column(u'backup_snapshot', 'identifier')
# Deleting field 'Snapshot.export_path'
db.delete_column(u'backup_snapshot', 'export_path')
def backwards(self, orm):
# Adding field 'Snapshot.identifier'
db.add_column(u'backup_snapshot', 'identifier',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Snapshot.export_path'
db.add_column(u'backup_snapshot', 'export_path',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
models = {
u'backup.backupgroup': {
'Meta': {'object_name': 'BackupGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'backup.snapshot': {
'Meta': {'object_name': 'Snapshot'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backup_environment'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Environment']"}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backups'", 'null': 'True', 'to': u"orm['backup.BackupGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backup_instance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Instance']"}),
'is_automatic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purge_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'snapshopt_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'snapshot_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'start_at': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'volume': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backups'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Volume']"})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'offerings'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['backup']
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils
import six
from six import moves
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import manager
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests import tools
from neutron.tests.unit import testlib_api
EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertIn('resources', res.json)
self.assertEqual(len(res.json['resources']), 1)
resource = res.json['resources'][0]
self.assertIn('collection', resource)
self.assertEqual(resource['collection'], 'bar')
self.assertIn('name', resource)
self.assertEqual(resource['name'], 'foo')
self.assertIn('links', resource)
self.assertEqual(len(resource['links']), 1)
link = resource['links'][0]
self.assertIn('href', link)
self.assertEqual(link['href'], 'http://localhost/bar')
self.assertIn('rel', link)
self.assertEqual(link['rel'], 'self')
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
self.config_parse()
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
# APIRouter initialization resets policy module, re-initializing it
policy.init()
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy')]
for name, info in attr_info.items():
if info.get('primary_key'):
policy_attrs.append(name)
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict(
(arg, mock.ANY) for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance.get_networks.reset_mock()
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in six.iteritems(input_dict):
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
self._test_create_failure_bad_request('networks', data)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
self._test_create_failure_bad_request('networks', data,
extra_environ=env)
def test_create_no_body(self):
data = {'whoa': None}
self._test_create_failure_bad_request('networks', data)
def test_create_body_string_not_json(self):
data = 'a string'
self._test_create_failure_bad_request('networks', data)
def test_create_body_boolean_not_json(self):
data = True
self._test_create_failure_bad_request('networks', data)
def test_create_no_resource(self):
data = {}
self._test_create_failure_bad_request('networks', data)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
self._test_create_failure_bad_request('ports', data)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
self._test_create_failure_bad_request('networks', data)
def test_create_with_too_long_name(self):
data = {'network': {'name': "12345678" * 32,
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def _test_create_failure_bad_request(self, resource, data, **kwargs):
res = self.api.post(_get_path(resource, fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True, **kwargs)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_networks_none(self):
self._test_create_failure_bad_request('networks', {'networks': None})
def test_create_bulk_networks_empty_list(self):
self._test_create_failure_bad_request('networks', {'networks': []})
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {
'tenant_id': six.text_type(tenant_id)
}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
rules = oslo_policy.Rules.from_dict(
{'get_network:name': "rule:admin_only"})
policy.set_rules(rules, overwrite=False)
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
self.assertNotIn('name', res['network'])
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin'
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
self.config_parse()
self.setup_coreplugin(plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
api = router.APIRouter()
SUB_RESOURCES = {}
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
collection_name = SUB_RESOURCES['dummy'].get('collection_name')
resource_name = 'dummy'
parent = SUB_RESOURCES['dummy'].get('parent')
params = RESOURCE_ATTRIBUTE_MAP['dummies']
member_actions = {'mactions': 'GET'}
_plugin = manager.NeutronManager.get_plugin()
controller = v2_base.create_resource(collection_name, resource_name,
_plugin, params,
member_actions=member_actions,
parent=parent,
allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection_name)
mapper_kwargs = dict(controller=controller,
path_prefix=path_prefix)
api.map.collection(collection_name, resource_name, **mapper_kwargs)
api.map.resource(collection_name, collection_name,
controller=controller,
parent_resource=parent,
member=member_actions)
self.api = webtest.TestApp(api)
def tearDown(self):
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
def test_sub_resource_member_actions(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id,
action='mactions'))
instance.mactions.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(fake_notifier.NOTIFICATIONS),
len(expected_events))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual('INFO', msg['priority'])
self.assertEqual(event, msg['event_type'])
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
class DHCPNotificationTest(APIv2TestBase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(DHCPNotificationTest, self).setUp()
def _test_dhcp_notifier(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI,
'notify') as dhcp_notifier:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
expected_item = mock.call(mock.ANY, mock.ANY,
resource + "." + opname + ".end")
if initial_input and resource not in initial_input:
resource += 's'
num = len(initial_input[resource]) if initial_input and isinstance(
initial_input[resource], list) else 1
expected = [expected_item for x in moves.range(num)]
self.assertEqual(expected, dhcp_notifier.call_args_list)
self.assertEqual(num, dhcp_notifier.call_count)
self.assertEqual(expected_code, res.status_int)
def test_network_create_dhcp_notifer(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_dhcp_notifier('create', 'network', input)
def test_network_delete_dhcp_notifer(self):
self._test_dhcp_notifier('delete', 'network')
def test_network_update_dhcp_notifer(self):
input = {'network': {'name': 'net'}}
self._test_dhcp_notifier('update', 'network', input)
def test_networks_create_bulk_dhcp_notifer(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_dhcp_notifier('create', 'network', input)
class QuotaTest(APIv2TestBase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(QuotaTest, self).setUp()
# Use mock to let the API use a different QuotaEngine instance for
# unit test in this class. This will ensure resource are registered
# again and instanciated with neutron.quota.resource.CountableResource
replacement_registry = resource_registry.ResourceRegistry()
registry_patcher = mock.patch('neutron.quota.resource_registry.'
'ResourceRegistry.get_instance')
mock_registry = registry_patcher.start().return_value
mock_registry.get_resource = replacement_registry.get_resource
mock_registry.resources = replacement_registry.resources
# Register a resource
replacement_registry.register_resource_by_name('network')
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
manager.NeutronManager.get_plugin().supported_extension_aliases = (
["v2attrs"])
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self.api = None
self.plugin = None
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin(object):
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
def mactions(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertOrderedEqual(expect_val, actual_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
|
|
"""An abstraction over virtualenv and Conda environments."""
import codecs
import copy
import hashlib
import itertools
import json
import logging
import os
import shutil
import yaml
from django.conf import settings
from readthedocs.builds.constants import EXTERNAL
from readthedocs.config import PIP, SETUPTOOLS, ParseError
from readthedocs.config import parse as parse_yaml
from readthedocs.config.models import PythonInstall, PythonInstallRequirements
from readthedocs.doc_builder.config import load_yaml_config
from readthedocs.doc_builder.constants import DOCKER_IMAGE
from readthedocs.doc_builder.environments import DockerBuildEnvironment
from readthedocs.doc_builder.loader import get_builder_class
from readthedocs.projects.constants import LOG_TEMPLATE
from readthedocs.projects.models import Feature
log = logging.getLogger(__name__)
class PythonEnvironment:
"""An isolated environment into which Python packages can be installed."""
def __init__(self, version, build_env, config=None):
self.version = version
self.project = version.project
self.build_env = build_env
if config:
self.config = config
else:
self.config = load_yaml_config(version)
# Compute here, since it's used a lot
self.checkout_path = self.project.checkout_path(self.version.slug)
def delete_existing_build_dir(self):
# Handle deleting old build dir
build_dir = os.path.join(
self.venv_path(),
'build',
)
if os.path.exists(build_dir):
log.info(
LOG_TEMPLATE,
{
'project': self.project.slug,
'version': self.version.slug,
'msg': 'Removing existing build directory',
}
)
shutil.rmtree(build_dir)
def delete_existing_venv_dir(self):
venv_dir = self.venv_path()
# Handle deleting old venv dir
if os.path.exists(venv_dir):
log.info(
LOG_TEMPLATE,
{
'project': self.project.slug,
'version': self.version.slug,
'msg': 'Removing existing venv directory',
}
)
shutil.rmtree(venv_dir)
def install_requirements(self):
"""Install all requirements from the config object."""
for install in self.config.python.install:
if isinstance(install, PythonInstallRequirements):
self.install_requirements_file(install)
if isinstance(install, PythonInstall):
self.install_package(install)
def install_package(self, install):
"""
Install the package using pip or setuptools.
:param install: A install object from the config module.
:type install: readthedocs.config.models.PythonInstall
"""
if install.method == PIP:
# Prefix ./ so pip installs from a local path rather than pypi
local_path = (
os.path.join('.', install.path) if install.path != '.' else install.path
)
extra_req_param = ''
if install.extra_requirements:
extra_req_param = '[{}]'.format(
','.join(install.extra_requirements)
)
self.build_env.run(
self.venv_bin(filename='python'),
'-m',
'pip',
'install',
'--upgrade',
'--upgrade-strategy',
'eager',
*self._pip_cache_cmd_argument(),
'{path}{extra_requirements}'.format(
path=local_path,
extra_requirements=extra_req_param,
),
cwd=self.checkout_path,
bin_path=self.venv_bin(),
)
elif install.method == SETUPTOOLS:
self.build_env.run(
self.venv_bin(filename='python'),
os.path.join(install.path, 'setup.py'),
'install',
'--force',
cwd=self.checkout_path,
bin_path=self.venv_bin(),
)
def _pip_cache_cmd_argument(self):
"""
Return the pip command ``--cache-dir`` or ``--no-cache-dir`` argument.
The decision is made considering if the directories are going to be
cleaned after the build (``RTD_CLEAN_AFTER_BUILD=True`` or project has
the ``CLEAN_AFTER_BUILD`` feature enabled) or project has the feature
``CACHED_ENVIRONMENT``. In this case, there is no need to cache
anything.
"""
if (
# Cache is going to be removed anyways
settings.RTD_CLEAN_AFTER_BUILD or
self.project.has_feature(Feature.CLEAN_AFTER_BUILD) or
# Cache will be pushed/pulled each time and won't be used because
# packages are already installed in the environment
self.project.has_feature(Feature.CACHED_ENVIRONMENT)
):
return [
'--no-cache-dir',
]
return [
'--cache-dir',
self.project.pip_cache_path,
]
def venv_bin(self, filename=None):
"""
Return path to the virtualenv bin path, or a specific binary.
:param filename: If specified, add this filename to the path return
:returns: Path to virtualenv bin or filename in virtualenv bin
"""
parts = [self.venv_path(), 'bin']
if filename is not None:
parts.append(filename)
return os.path.join(*parts)
def environment_json_path(self):
"""Return the path to the ``readthedocs-environment.json`` file."""
return os.path.join(
self.venv_path(),
'readthedocs-environment.json',
)
@property
def is_obsolete(self):
"""
Determine if the environment is obsolete for different reasons.
It checks the the data stored at ``readthedocs-environment.json`` and
compares it with the one to be used. In particular:
* the Python version (e.g. 2.7, 3, 3.6, etc)
* the Docker image name
* the Docker image hash
* the environment variables hash
:returns: ``True`` when it's obsolete and ``False`` otherwise
:rtype: bool
"""
# Always returns False if we don't have information about what Python
# version/Docker image was used to create the venv as backward
# compatibility.
if not os.path.exists(self.environment_json_path()):
return False
try:
with open(self.environment_json_path(), 'r') as fpath:
environment_conf = json.load(fpath)
except (IOError, TypeError, KeyError, ValueError):
log.warning(
'Unable to read/parse readthedocs-environment.json file',
)
# We remove the JSON file here to avoid cycling over time with a
# corrupted file.
os.remove(self.environment_json_path())
return True
env_python = environment_conf.get('python', {})
env_build = environment_conf.get('build', {})
env_vars_hash = environment_conf.get('env_vars_hash', None)
# By defaulting non-existent options to ``None`` we force a wipe since
# we don't know how the environment was created
env_python_version = env_python.get('version', None)
env_build_image = env_build.get('image', None)
env_build_hash = env_build.get('hash', None)
if isinstance(self.build_env, DockerBuildEnvironment):
build_image = self.config.build.image or DOCKER_IMAGE
image_hash = self.build_env.image_hash
else:
# e.g. LocalBuildEnvironment
build_image = None
image_hash = None
# If the user define the Python version just as a major version
# (e.g. ``2`` or ``3``) we won't know exactly which exact version was
# used to create the venv but we can still compare it against the new
# one coming from the project version config.
return any([
env_python_version != self.config.python_full_version,
env_build_image != build_image,
env_build_hash != image_hash,
env_vars_hash != self._get_env_vars_hash(),
])
def _get_env_vars_hash(self):
"""
Returns the sha256 hash of all the environment variables and their values.
If there are no environment variables configured for the associated project,
it returns sha256 hash of empty string.
"""
m = hashlib.sha256()
env_vars = self.version.project.environment_variables(
public_only=self.version.is_external
)
for variable, value in env_vars.items():
hash_str = f'_{variable}_{value}_'
m.update(hash_str.encode('utf-8'))
return m.hexdigest()
def save_environment_json(self):
"""
Save on builders disk data about the environment used to build docs.
The data is saved as a ``.json`` file with this information on it:
- python.version
- build.image
- build.hash
- env_vars_hash
"""
data = {
'python': {
'version': self.config.python_full_version,
},
'env_vars_hash': self._get_env_vars_hash(),
}
if isinstance(self.build_env, DockerBuildEnvironment):
build_image = self.config.build.image or DOCKER_IMAGE
data.update({
'build': {
'image': build_image,
'hash': self.build_env.image_hash,
},
})
with open(self.environment_json_path(), 'w') as fpath:
# Compatibility for Py2 and Py3. ``io.TextIOWrapper`` expects
# unicode but ``json.dumps`` returns str in Py2.
fpath.write(str(json.dumps(data)))
class Virtualenv(PythonEnvironment):
"""
A virtualenv_ environment.
.. _virtualenv: https://virtualenv.pypa.io/
"""
def venv_path(self):
return os.path.join(self.project.doc_path, 'envs', self.version.slug)
def setup_base(self):
"""
Create a virtualenv, invoking ``python -mvirtualenv``.
.. note::
``--no-download`` was removed because of the pip breakage,
it was sometimes installing pip 20.0 which broke everything
https://github.com/readthedocs/readthedocs.org/issues/6585
Important not to add empty string arguments, see:
https://github.com/readthedocs/readthedocs.org/issues/7322
"""
cli_args = [
'-mvirtualenv',
]
if self.config.python.use_system_site_packages:
cli_args.append('--system-site-packages')
# Append the positional destination argument
cli_args.append(
self.venv_path(),
)
self.build_env.run(
self.config.python_interpreter,
*cli_args,
# Don't use virtualenv bin that doesn't exist yet
bin_path=None,
# Don't use the project's root, some config files can interfere
cwd=None,
)
def install_core_requirements(self):
"""Install basic Read the Docs requirements into the virtualenv."""
pip_install_cmd = [
self.venv_bin(filename='python'),
'-m',
'pip',
'install',
'--upgrade',
*self._pip_cache_cmd_argument(),
]
# Install latest pip and setuptools first,
# so it is used when installing the other requirements.
pip_version = self.project.get_feature_value(
Feature.DONT_INSTALL_LATEST_PIP,
# 20.3 uses the new resolver by default.
positive='pip<20.3',
negative='pip',
)
cmd = pip_install_cmd + [pip_version, 'setuptools']
self.build_env.run(
*cmd, bin_path=self.venv_bin(), cwd=self.checkout_path
)
requirements = [
'mock==1.0.1',
'pillow==5.4.1',
'alabaster>=0.7,<0.8,!=0.7.5',
'commonmark==0.8.1',
'recommonmark==0.5.0',
]
if self.config.doctype == 'mkdocs':
requirements.append(
self.project.get_feature_value(
Feature.DEFAULT_TO_MKDOCS_0_17_3,
positive='mkdocs==0.17.3',
negative=self.project.get_feature_value(
Feature.USE_MKDOCS_LATEST,
positive='mkdocs<1.1',
negative='mkdocs',
),
),
)
else:
requirements.extend([
self.project.get_feature_value(
Feature.USE_SPHINX_LATEST,
positive='sphinx',
negative='sphinx<2',
),
# If defaulting to Sphinx 2+, we need to push the latest theme
# release as well. `<0.5.0` is not compatible with Sphinx 2+
self.project.get_feature_value(
Feature.USE_SPHINX_LATEST,
positive='sphinx-rtd-theme',
negative='sphinx-rtd-theme<0.5',
),
self.project.get_feature_value(
Feature.USE_SPHINX_RTD_EXT_LATEST,
positive='readthedocs-sphinx-ext',
negative='readthedocs-sphinx-ext<2.2',
),
])
cmd = copy.copy(pip_install_cmd)
if self.config.python.use_system_site_packages:
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
cmd.append('-I')
cmd.extend(requirements)
self.build_env.run(
*cmd,
bin_path=self.venv_bin(),
cwd=self.checkout_path # noqa - no comma here in py27 :/
)
def install_requirements_file(self, install):
"""
Install a requirements file using pip.
:param install: A install object from the config module.
:type install: readthedocs.config.models.PythonInstallRequirements
"""
requirements_file_path = install.requirements
if requirements_file_path is None:
# This only happens when the config file is from v1.
# We try to find a requirements file.
builder_class = get_builder_class(self.config.doctype)
docs_dir = (
builder_class(
build_env=self.build_env,
python_env=self,
).docs_dir()
)
paths = [docs_dir, '']
req_files = ['pip_requirements.txt', 'requirements.txt']
for path, req_file in itertools.product(paths, req_files):
test_path = os.path.join(self.checkout_path, path, req_file)
if os.path.exists(test_path):
requirements_file_path = os.path.relpath(
test_path,
self.checkout_path,
)
break
if requirements_file_path:
args = [
self.venv_bin(filename='python'),
'-m',
'pip',
'install',
]
if self.project.has_feature(Feature.PIP_ALWAYS_UPGRADE):
args += ['--upgrade']
args += [
'--exists-action=w',
*self._pip_cache_cmd_argument(),
'-r',
requirements_file_path,
]
self.build_env.run(
*args,
cwd=self.checkout_path,
bin_path=self.venv_bin(),
)
def list_packages_installed(self):
"""List packages installed in pip."""
args = [
self.venv_bin(filename='python'),
'-m',
'pip',
'list',
# Inlude pre-release versions.
'--pre',
]
self.build_env.run(
*args,
cwd=self.checkout_path,
bin_path=self.venv_bin(),
)
class Conda(PythonEnvironment):
"""
A Conda_ environment.
.. _Conda: https://conda.io/docs/
"""
def venv_path(self):
return os.path.join(self.project.doc_path, 'conda', self.version.slug)
def conda_bin_name(self):
"""
Decide whether use ``mamba`` or ``conda`` to create the environment.
Return ``mamba`` if the project has ``CONDA_USES_MAMBA`` feature and
``conda`` otherwise. This will be the executable name to be used when
creating the conda environment.
``mamba`` is really fast to solve dependencies and download channel
metadata on startup.
See https://github.com/QuantStack/mamba
"""
if self.project.has_feature(Feature.CONDA_USES_MAMBA):
return 'mamba'
return 'conda'
def _update_conda_startup(self):
"""
Update ``conda`` before use it for the first time.
This makes the Docker image to use the latest version of ``conda``
independently the version of Miniconda that it has installed.
"""
self.build_env.run(
# TODO: use ``self.conda_bin_name()`` once ``mamba`` is installed in
# the Docker image
'conda',
'update',
'--yes',
'--quiet',
'--name=base',
'--channel=defaults',
'conda',
cwd=self.checkout_path,
)
def _install_mamba(self):
self.build_env.run(
'conda',
'install',
'--yes',
'--quiet',
'--name=base',
'--channel=conda-forge',
'python=3.7',
'mamba',
cwd=self.checkout_path,
)
def setup_base(self):
conda_env_path = os.path.join(self.project.doc_path, 'conda')
version_path = os.path.join(conda_env_path, self.version.slug)
if os.path.exists(version_path):
# Re-create conda directory each time to keep fresh state
log.info(
LOG_TEMPLATE,
{
'project': self.project.slug,
'version': self.version.slug,
'msg': 'Removing existing conda directory',
},
)
shutil.rmtree(version_path)
if self.project.has_feature(Feature.UPDATE_CONDA_STARTUP):
self._update_conda_startup()
if self.project.has_feature(Feature.CONDA_APPEND_CORE_REQUIREMENTS):
self._append_core_requirements()
self._show_environment_yaml()
# TODO: remove it when ``mamba`` is installed in the Docker image
if self.project.has_feature(Feature.CONDA_USES_MAMBA):
self._install_mamba()
self.build_env.run(
self.conda_bin_name(),
'env',
'create',
'--quiet',
'--name',
self.version.slug,
'--file',
self.config.conda.environment,
bin_path=None, # Don't use conda bin that doesn't exist yet
cwd=self.checkout_path,
)
def _show_environment_yaml(self):
"""Show ``environment.yml`` file in the Build output."""
self.build_env.run(
'cat',
self.config.conda.environment,
cwd=self.checkout_path,
)
def _append_core_requirements(self):
"""
Append Read the Docs dependencies to Conda environment file.
This help users to pin their dependencies properly without us upgrading
them in the second ``conda install`` run.
See https://github.com/readthedocs/readthedocs.org/pull/5631
"""
try:
inputfile = codecs.open(
os.path.join(
self.checkout_path,
self.config.conda.environment,
),
encoding='utf-8',
mode='r',
)
environment = parse_yaml(inputfile)
except IOError:
log.warning(
'There was an error while reading Conda environment file.',
)
except ParseError:
log.warning(
'There was an error while parsing Conda environment file.',
)
else:
# Append conda dependencies directly to ``dependencies`` and pip
# dependencies to ``dependencies.pip``
pip_requirements, conda_requirements = self._get_core_requirements()
dependencies = environment.get('dependencies', [])
pip_dependencies = {'pip': pip_requirements}
for item in dependencies:
if isinstance(item, dict) and 'pip' in item:
# NOTE: pip can be ``None``
pip_requirements.extend(item.get('pip') or [])
dependencies.remove(item)
break
dependencies.append(pip_dependencies)
dependencies.extend(conda_requirements)
environment.update({'dependencies': dependencies})
try:
outputfile = codecs.open(
os.path.join(
self.checkout_path,
self.config.conda.environment,
),
encoding='utf-8',
mode='w',
)
yaml.safe_dump(environment, outputfile)
except IOError:
log.warning(
'There was an error while writing the new Conda '
'environment file.',
)
def _get_core_requirements(self):
# Use conda for requirements it packages
conda_requirements = [
'mock',
'pillow',
]
if self.project.has_feature(Feature.CONDA_USES_MAMBA):
conda_requirements.append('pip')
# Install pip-only things.
pip_requirements = [
'recommonmark',
]
if self.config.doctype == 'mkdocs':
pip_requirements.append('mkdocs')
else:
pip_requirements.append('readthedocs-sphinx-ext')
conda_requirements.extend(['sphinx', 'sphinx_rtd_theme'])
return pip_requirements, conda_requirements
def install_core_requirements(self):
"""Install basic Read the Docs requirements into the Conda env."""
if self.project.has_feature(Feature.CONDA_APPEND_CORE_REQUIREMENTS):
# Skip install core requirements since they were already appended to
# the user's ``environment.yml`` and installed at ``conda env
# create`` step.
return
pip_requirements, conda_requirements = self._get_core_requirements()
# Install requirements via ``conda install`` command if they were
# not appended to the ``environment.yml`` file.
cmd = [
self.conda_bin_name(),
'install',
'--yes',
'--quiet',
'--name',
self.version.slug,
]
cmd.extend(conda_requirements)
self.build_env.run(
*cmd,
cwd=self.checkout_path,
)
# Install requirements via ``pip install``
pip_cmd = [
self.venv_bin(filename='python'),
'-m',
'pip',
'install',
'-U',
*self._pip_cache_cmd_argument(),
]
pip_cmd.extend(pip_requirements)
self.build_env.run(
*pip_cmd,
bin_path=self.venv_bin(),
cwd=self.checkout_path # noqa - no comma here in py27 :/
)
def install_requirements_file(self, install):
# as the conda environment was created by using the ``environment.yml``
# defined by the user, there is nothing to update at this point
pass
def list_packages_installed(self):
"""List packages installed in conda."""
args = [
self.conda_bin_name(),
'list',
'--name',
self.version.slug,
]
self.build_env.run(
*args,
cwd=self.checkout_path,
bin_path=self.venv_bin(),
)
|
|
"""
Neutron module for interacting with OpenStack Neutron
.. versionadded:: 2018.3.0
:depends:shade
Example configuration
.. code-block:: yaml
neutron:
cloud: default
.. code-block:: yaml
neutron:
auth:
username: admin
password: password123
user_domain_name: mydomain
project_name: myproject
project_domain_name: myproject
auth_url: https://example.org:5000/v3
identity_api_version: 3
"""
HAS_SHADE = False
try:
import shade
HAS_SHADE = True
except ImportError:
pass
__virtualname__ = "neutronng"
def __virtual__():
"""
Only load this module if shade python module is installed
"""
if HAS_SHADE:
return __virtualname__
return (
False,
"The neutronng execution module failed to load: shade python module is not available",
)
def compare_changes(obj, **kwargs):
"""
Compare two dicts returning only keys that exist in the first dict and are
different in the second one
"""
changes = {}
for key, value in obj.items():
if key in kwargs:
if value != kwargs[key]:
changes[key] = kwargs[key]
return changes
def _clean_kwargs(keep_name=False, **kwargs):
"""
Sanatize the arguments for use with shade
"""
if "name" in kwargs and not keep_name:
kwargs["name_or_id"] = kwargs.pop("name")
return __utils__["args.clean_kwargs"](**kwargs)
def setup_clouds(auth=None):
"""
Call functions to create Shade cloud objects in __context__ to take
advantage of Shade's in-memory caching across several states
"""
get_operator_cloud(auth)
get_openstack_cloud(auth)
def get_operator_cloud(auth=None):
"""
Return an operator_cloud
"""
if auth is None:
auth = __salt__["config.option"]("neutron", {})
if "shade_opcloud" in __context__:
if __context__["shade_opcloud"].auth == auth:
return __context__["shade_opcloud"]
__context__["shade_opcloud"] = shade.operator_cloud(**auth)
return __context__["shade_opcloud"]
def get_openstack_cloud(auth=None):
"""
Return an openstack_cloud
"""
if auth is None:
auth = __salt__["config.option"]("neutron", {})
if "shade_oscloud" in __context__:
if __context__["shade_oscloud"].auth == auth:
return __context__["shade_oscloud"]
__context__["shade_oscloud"] = shade.openstack_cloud(**auth)
return __context__["shade_oscloud"]
def network_create(auth=None, **kwargs):
"""
Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_network(**kwargs)
def network_delete(auth=None, **kwargs):
"""
Delete a network
name_or_id
Name or ID of the network being deleted
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_delete name_or_id=network1
salt '*' neutronng.network_delete name_or_id=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_network(**kwargs)
def list_networks(auth=None, **kwargs):
"""
List networks
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.list_networks
salt '*' neutronng.list_networks \
filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}'
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_networks(**kwargs)
def network_get(auth=None, **kwargs):
"""
Get a single network
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_get name=XLB4
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_network(**kwargs)
def subnet_create(auth=None, **kwargs):
"""
Create a subnet
network_name_or_id
The unique name or ID of the attached network. If a non-unique name is
supplied, an exception is raised.
cidr
The CIDR
ip_version
The IP version, which is 4 or 6.
enable_dhcp : False
Set to ``True`` if DHCP is enabled and ``False`` if disabled
subnet_name
The name of the subnet
tenant_id
The ID of the tenant who owns the network. Only administrative users
can specify a tenant ID other than their own.
allocation_pools
A list of dictionaries of the start and end addresses for the
allocation pools.
gateway_ip
The gateway IP address. When you specify both ``allocation_pools`` and
``gateway_ip``, you must ensure that the gateway IP does not overlap
with the specified allocation pools.
disable_gateway_ip : False
Set to ``True`` if gateway IP address is disabled and ``False`` if
enabled. It is not allowed with ``gateway_ip``.
dns_nameservers
A list of DNS name servers for the subnet
host_routes
A list of host route dictionaries for the subnet
ipv6_ra_mode
IPv6 Router Advertisement mode. Valid values are ``dhcpv6-stateful``,
``dhcpv6-stateless``, or ``slaac``.
ipv6_address_mode
IPv6 address mode. Valid values are ``dhcpv6-stateful``,
``dhcpv6-stateless``, or ``slaac``.
use_default_subnetpool
If ``True``, use the default subnetpool for ``ip_version`` to obtain a
CIDR. It is required to pass ``None`` to the ``cidr`` argument when
enabling this option.
CLI Example:
.. code-block:: bash
salt '*' neutronng.subnet_create network_name_or_id=network1
subnet_name=subnet1
salt '*' neutronng.subnet_create subnet_name=subnet2\
network_name_or_id=network2 enable_dhcp=True \
allocation_pools='[{"start": "192.168.199.2",\
"end": "192.168.199.254"}]'\
gateway_ip='192.168.199.1' cidr=192.168.199.0/24
salt '*' neutronng.subnet_create network_name_or_id=network1 \
subnet_name=subnet1 dns_nameservers='["8.8.8.8", "8.8.8.7"]'
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.create_subnet(**kwargs)
def subnet_update(auth=None, **kwargs):
"""
Update a subnet
name_or_id
Name or ID of the subnet to update
subnet_name
The new name of the subnet
enable_dhcp
Set to ``True`` if DHCP is enabled and ``False`` if disabled
gateway_ip
The gateway IP address. When you specify both allocation_pools and
gateway_ip, you must ensure that the gateway IP does not overlap with
the specified allocation pools.
disable_gateway_ip : False
Set to ``True`` if gateway IP address is disabled and False if enabled.
It is not allowed with ``gateway_ip``.
allocation_pools
A list of dictionaries of the start and end addresses for the
allocation pools.
dns_nameservers
A list of DNS name servers for the subnet
host_routes
A list of host route dictionaries for the subnet
.. code-block:: bash
salt '*' neutronng.subnet_update name=subnet1 subnet_name=subnet2
salt '*' neutronng.subnet_update name=subnet1 dns_nameservers='["8.8.8.8", "8.8.8.7"]'
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.update_subnet(**kwargs)
def subnet_delete(auth=None, **kwargs):
"""
Delete a subnet
name
Name or ID of the subnet to update
CLI Example:
.. code-block:: bash
salt '*' neutronng.subnet_delete name=subnet1
salt '*' neutronng.subnet_delete \
name=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_subnet(**kwargs)
def list_subnets(auth=None, **kwargs):
"""
List subnets
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.list_subnets
salt '*' neutronng.list_subnets \
filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}'
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_subnets(**kwargs)
def subnet_get(auth=None, **kwargs):
"""
Get a single subnet
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.subnet_get name=subnet1
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_subnet(**kwargs)
def security_group_create(auth=None, **kwargs):
"""
Create a security group. Use security_group_get to create default.
project_id
The project ID on which this security group will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_create name=secgroup1 \
description="Very secure security group"
salt '*' neutronng.security_group_create name=secgroup1 \
description="Very secure security group" \
project_id=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_security_group(**kwargs)
def security_group_update(secgroup=None, auth=None, **kwargs):
"""
Update a security group
secgroup
Name, ID or Raw Object of the security group to update
name
New name for the security group
description
New description for the security group
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_update secgroup=secgroup1 \
description="Very secure security group"
salt '*' neutronng.security_group_update secgroup=secgroup1 \
description="Very secure security group" \
project_id=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.update_security_group(secgroup, **kwargs)
def security_group_delete(auth=None, **kwargs):
"""
Delete a security group
name_or_id
The name or unique ID of the security group
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_delete name_or_id=secgroup1
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_security_group(**kwargs)
def security_group_get(auth=None, **kwargs):
"""
Get a single security group. This will create a default security group
if one does not exist yet for a particular project id.
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_get \
name=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.security_group_get \
name=default\
filters='{"tenant_id":"2e778bb64ca64a199eb526b5958d8710"}'
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_security_group(**kwargs)
def security_group_rule_create(auth=None, **kwargs):
"""
Create a rule in a security group
secgroup_name_or_id
The security group name or ID to associate with this security group
rule. If a non-unique group name is given, an exception is raised.
port_range_min
The minimum port number in the range that is matched by the security
group rule. If the protocol is TCP or UDP, this value must be less than
or equal to the port_range_max attribute value. If nova is used by the
cloud provider for security groups, then a value of None will be
transformed to -1.
port_range_max
The maximum port number in the range that is matched by the security
group rule. The port_range_min attribute constrains the port_range_max
attribute. If nova is used by the cloud provider for security groups,
then a value of None will be transformed to -1.
protocol
The protocol that is matched by the security group rule. Valid values
are ``None``, ``tcp``, ``udp``, and ``icmp``.
remote_ip_prefix
The remote IP prefix to be associated with this security group rule.
This attribute matches the specified IP prefix as the source IP address
of the IP packet.
remote_group_id
The remote group ID to be associated with this security group rule
direction
Either ``ingress`` or ``egress``; the direction in which the security
group rule is applied. For a compute instance, an ingress security
group rule is applied to incoming (ingress) traffic for that instance.
An egress rule is applied to traffic leaving the instance
ethertype
Must be IPv4 or IPv6, and addresses represented in CIDR must match the
ingress or egress rules
project_id
Specify the project ID this security group will be created on
(admin-only)
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_rule_create\
secgroup_name_or_id=secgroup1
salt '*' neutronng.security_group_rule_create\
secgroup_name_or_id=secgroup2 port_range_min=8080\
port_range_max=8080 direction='egress'
salt '*' neutronng.security_group_rule_create\
secgroup_name_or_id=c0e1d1ce-7296-405e-919d-1c08217be529\
protocol=icmp project_id=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.create_security_group_rule(**kwargs)
def security_group_rule_delete(auth=None, **kwargs):
"""
Delete a security group
name_or_id
The unique ID of the security group rule
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_rule_delete name_or_id=1dcac318a83b4610b7a7f7ba01465548
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_security_group_rule(**kwargs)
|
|
import logging
import requests
from django.shortcuts import render
from ..utils.lgUtils import *
from ..utils.earthquakesUtils import *
from ..utils.cylinders.cylindersHeatMap import *
from django.http import HttpResponse
from json.decoder import JSONDecodeError
from django.views.decorators.csrf import csrf_exempt
logger = logging.getLogger("django")
def getEarthquakesExact(request):
start_time = time.time()
logger.info("Getting Earthquakes")
dateFrom = request.POST['dateFrom']
dateToNowParam = request.POST.get('dateToNow', 0)
if dateToNowParam == str(1):
dateTo = time.strftime("%Y-%m-%d")
else:
dateTo = request.POST['dateTo']
createTourParam = request.POST.get('createTour', 0)
createTour = createTourParam == str(1)
sparkIp = getSparkIp()
max_lat = request.POST['max_lat']
min_lat = request.POST['min_lat']
max_lon = request.POST['max_lon']
min_lon = request.POST['min_lon']
center_lat = (float(max_lat) + float(min_lat)) / 2
center_lon = (float(max_lon) + float(min_lon)) / 2
try:
response = requests.get('http://' + sparkIp + ':5000/getEarthquakesInterval?dateFrom=' + dateFrom +
'&dateTo=' + dateTo + '&max_lat=' + max_lat +
'&min_lat=' + min_lat + '&max_lon=' + max_lon + '&min_lon=' + min_lon)
jsonData = json.loads(response.json())
except requests.exceptions.ConnectionError:
return render(request, '500.html')
except JSONDecodeError:
return render(request, '500.html')
numberObtained = len(jsonData)
logger.info("Obtained " + str(numberObtained) + " earthquakes")
logger.debug("--- %s getting the data---" % (time.time() - start_time))
if numberObtained == 0:
return render(request, 'floybd/earthquakes/viewEarthquakes.html',
{'noData': True})
start_time = time.time()
fileUrl = createKml(jsonData, createTour, numberObtained, request)
logger.debug("--- %s seconds creating KML---" % (time.time() - start_time))
return render(request, 'floybd/earthquakes/viewEarthquakes.html',
{'kml': fileUrl, 'center_lat': center_lat,
'center_lon': center_lon, 'dateFrom': dateFrom, 'dateTo': dateTo,
'createTour': createTourParam})
def getEarthquakesApprox(request):
start_time = time.time()
logger.info("Getting Earthquakes with quadrants")
dateFrom = request.POST['dateFrom']
dateToNowParam = request.POST.get('dateToNow', 0)
if dateToNowParam == str(1):
dateTo = time.strftime("%Y-%m-%d")
else:
dateTo = request.POST['dateTo']
createTourParam = request.POST.get('createTour', 0)
createTour = createTourParam == str(1)
sparkIp = getSparkIp()
max_lat = request.POST['max_lat']
min_lat = request.POST['min_lat']
maxY = getYQuadrant(float(max_lat))
minY = getYQuadrant(float(min_lat))
if minY > maxY:
tmpAux = minY
minY = maxY
maxY = tmpAux
max_lon = request.POST['max_lon']
min_lon = request.POST['min_lon']
maxX = getXQuadrant(float(max_lon))
minX = getXQuadrant(float(min_lon))
if minX > maxX:
tmpAux = minX
minX = maxX
maxX = tmpAux
center_lat = (float(max_lat) + float(min_lat)) / 2
center_lon = (float(max_lon) + float(min_lon)) / 2
logger.debug("maxY: " + str(maxY))
logger.debug("minY: " + str(minY))
logger.debug("maxX: " + str(maxX))
logger.debug("minX: " + str(minX))
try:
response = requests.get('http://' + sparkIp + ':5000/getEarthquakesIntervalWithQuadrants?dateFrom=' + dateFrom
+ '&dateTo=' + dateTo
+ '&maxY=' + str(maxY)
+ '&minY=' + str(minY)
+ '&maxX=' + str(maxX)
+ '&minX=' + str(minX))
jsonData = json.loads(response.json())
except requests.exceptions.ConnectionError:
return render(request, '500.html')
except JSONDecodeError:
return render(request, '500.html')
numberObtained = len(jsonData)
logger.info("Obtained " + str(numberObtained) + " earthquakes")
logger.debug("--- %s getting the data---" % (time.time() - start_time))
if numberObtained == 0:
return render(request, 'floybd/earthquakes/viewEarthquakes.html',
{'noData': True})
start_time = time.time()
fileUrl = createKml(jsonData, createTour, numberObtained, request)
logger.debug("--- %s seconds creating KML---" % (time.time() - start_time))
return render(request, 'floybd/earthquakes/viewEarthquakes.html',
{'kml': fileUrl, 'center_lat': center_lat,
'center_lon': center_lon, 'dateFrom': dateFrom, 'dateTo': dateTo,
'createTour': createTourParam})
def getHeatMap(request):
logger.info("Getting Heat Map")
dateFrom = request.POST['dateFrom']
dateToNowParam = request.POST.get('dateToNow', 0)
if dateToNowParam == str(1):
dateTo = time.strftime("%Y-%m-%d")
else:
dateTo = request.POST['dateTo']
sparkIp = getSparkIp()
try:
response = requests.get('http://' + sparkIp + ':5000/getEarthquakesInterval?dateFrom=' + dateFrom +
'&dateTo=' + dateTo)
jsonData = json.loads(response.json())
except requests.exceptions.ConnectionError:
return render(request, '500.html')
except JSONDecodeError:
return render(request, '500.html')
numberObtained = len(jsonData)
logger.info("Obtained " + str(numberObtained) + " earthquakes")
if numberObtained == 0:
return render(request, 'floybd/earthquakes/viewEarthquakesHeatMap.html',
{'noData': True})
data = getEartquakesArray(jsonData, False)
return render(request, 'floybd/earthquakes/viewEarthquakesHeatMap.html', {'data': data, 'dateFrom': dateFrom,
'dateTo': dateTo,
'numberObtained': numberObtained})
def getEartquakesArray(jsonData, includeDescription):
data = []
for row in jsonData:
if includeDescription:
data.append([row.get("latitude"), row.get("longitude"), row.get("magnitude"), row.get("place"),
row.get("fecha")])
else:
data.append([row.get("latitude"), row.get("longitude"), row.get("magnitude")])
return data
def generateHeapMapKml(request):
logger.info("Generating HeatMap")
dateFrom = request.POST['dateFrom']
dateToNowParam = request.POST.get('dateToNow', 0)
if dateToNowParam == str(1):
dateTo = time.strftime("%Y-%m-%d")
else:
dateTo = request.POST['dateTo']
try:
response = requests.get('http://' + getSparkIp() + ':5000/getEarthquakesInterval?dateFrom=' + dateFrom +
'&dateTo=' + dateTo)
jsonData = json.loads(response.json())
except requests.exceptions.ConnectionError:
return render(request, '500.html')
except JSONDecodeError:
return render(request, '500.html')
dataMapsJs = getEartquakesArray(jsonData, False)
numberObtained = len(jsonData)
logger.info("Obtained " + str(numberObtained) + " earthquakes")
if numberObtained == 0:
return render(request, 'floybd/earthquakes/viewEarthquakesHeatMap.html',
{'noData': True})
data = getEartquakesArray(jsonData, True)
fileName = "earthquakesHeatMap.kmz"
currentDir = os.getcwd()
dir1 = os.path.join(currentDir, "static/kmls")
dirPath2 = os.path.join(dir1, fileName)
millis = int(round(time.time() * 1000))
cylinder = CylindersKmlHeatmap(fileName, data)
cylinder.makeKMZ(dirPath2)
command = "echo 'http://" + getDjangoIp() + ":" + getDjangoPort(request) + \
"/static/demos/lastWeekEarthquakesHeatMap.kmz?a=" + str(millis) + \
"\n'http://" + getDjangoIp() + ":" + getDjangoPort(request) + \
"/static/demos/WorldTour.kmz?a=" + str(millis) + " | sshpass -p " + getLGPass() \
+ " ssh lg@" + getLGIp() + " 'cat - > /var/www/html/kmls.txt'"
os.system(command)
time.sleep(2)
sendFlyToToLG(20.21078636181624, -111.3376967642952, 0, 1.372480247294665, 0, 14562650.06788917, 2)
time.sleep(2)
playTour("WorldTour")
return render(request, 'floybd/earthquakes/viewEarthquakesHeatMap.html', {'data': dataMapsJs, 'dateFrom': dateFrom,
'dateTo': dateTo,
'numberObtained': numberObtained})
def populateInfoWindow(row, jsonData):
latitude = row["latitude"]
longitude = row["longitude"]
depth = row["depth"]
magnitude = row["magnitude"]
fecha = row["fecha"]
datetimeStr = datetime.datetime.fromtimestamp(int(fecha) / 1000).strftime('%Y-%m-%d %H:%M:%S')
url = jsonData.get("properties").get("url")
contentString = '<link rel = "stylesheet" href = ' \
'"https://code.getmdl.io/1.3.0/material.blue_grey-red.min.css" / > ' + \
'<link rel="stylesheet" href="https://fonts.googleapis.com/css?' \
'family=Roboto:regular,bold,italic,thin,light,bolditalic,black,medium&lang=en"/>' + \
'<style>body{max-width:560px !important;width:560px !important;}</style>' + \
'<table max-width="560" width="560" style="font-family: Roboto;"><tr><td>' + \
'<div id="content">' + '<div id="siteNotice">' + '</div>' + \
'<h1 id="firstHeading" class="thirdHeading" style="text-align:center">' + \
str(row["place"]) + '</h1>' + \
'<h2 id="firstHeading" class="thirdHeading" style="text-align:center">Date: ' + \
str(datetimeStr) + '</h2>' + \
'<h3 id="firstHeading" class="thirdHeading" style="text-align:center">Magnitude: ' + \
str(magnitude) + '</h3>' + \
'<div id="bodyContent" style="text-align: center;">' + \
'<div class="demo-charts mdl-color--white mdl-shadow--2dp mdl-cell' \
' mdl-cell--6-col mdl-grid" style="width: 98%">' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;"><b>Latitude</b>:</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;">' + str(latitude) + '</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;"><b>Longitude</b>:</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;">' + str(longitude) + '</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;"><b>Depth</b>:</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;">' + str(depth) + ' km</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;">More Info :</p>' + \
'</div>' + \
'<div class="mdl-cell mdl-cell--3-col mdl-layout-spacer">' + \
'<p style="font-size:1.5em;color:#474747;line-height:0.5;"><a href=' + str(url) + \
' target="_blank">USGS</a></p>' + \
'</div>' + \
'</div>' + \
'</div></div>' + \
'</td></tr></table>'
return contentString
def createKml(jsonData, createTour, numberObtained, request):
kml = simplekml.Kml()
tour = kml.newgxtour(name="EarthquakesTour")
playlist = tour.newgxplaylist()
flyToDuration = 3
balloonDuration = 1
if numberObtained > 1000:
balloonDuration = numberObtained / 1000
logger.info("Default duration: " + str(balloonDuration))
for row in jsonData:
place = row["place"]
latitude = row["latitude"]
longitude = row["longitude"]
magnitude = row["magnitude"]
fecha = row["fecha"]
datetimeStr = datetime.datetime.fromtimestamp(int(fecha) / 1000).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
try:
geoJson = replaceJsonString(str(row["geojson"]))
infowindow = populateInfoWindow(row, geoJson)
except JSONDecodeError:
logger.error('Error decoding json')
logger.error(str(row["geojson"]))
continue
try:
if magnitude is not None:
absMagnitude = abs(float(magnitude))
color = simplekml.Color.grey
if absMagnitude <= 2:
color = simplekml.Color.green
elif 2 < absMagnitude <= 5:
color = simplekml.Color.orange
elif absMagnitude > 5:
color = simplekml.Color.red
if createTour:
playlist.newgxwait(gxduration=3 * balloonDuration)
polycircle = polycircles.Polycircle(latitude=latitude, longitude=longitude,
radius=2000 * absMagnitude, number_of_vertices=100)
pol = kml.newpolygon(name="", description=infowindow, outerboundaryis=polycircle.to_kml())
pol.style.polystyle.color = color
pol.style.polystyle.fill = 0
pol.style.polystyle.outline = 1
pol.style.linestyle.color = color
pol.style.linestyle.width = 10
pol.style.balloonstyle.bgcolor = simplekml.Color.lightblue
pol.style.balloonstyle.text = "$[description]"
if createTour:
pol.visibility = 0
# Fly To the atmosphere
flyto = playlist.newgxflyto(gxduration=flyToDuration,
gxflytomode=simplekml.GxFlyToMode.smooth)
flyto.camera.longitude = longitude
flyto.camera.latitude = latitude
flyto.camera.altitude = 15000000
flyto.camera.range = 15000000
flyto.camera.tilt = 0
playlist.newgxwait(gxduration=flyToDuration)
# Go Back To the point
flyto = playlist.newgxflyto(gxduration=flyToDuration,
gxflytomode=simplekml.GxFlyToMode.smooth)
flyto.camera.longitude = longitude
flyto.camera.latitude = latitude
flyto.camera.altitude = 100000
flyto.camera.range = 100000
flyto.camera.tilt = 0
playlist.newgxwait(gxduration=flyToDuration)
simulateEarthquake(playlist, latitude, longitude, absMagnitude)
animatedupdateshow = playlist.newgxanimatedupdate(gxduration=balloonDuration / 10)
animatedupdateshow.update.change = '<Placemark targetId="{0}">' \
'<visibility>1</visibility></Placemark>' \
.format(pol.placemark.id)
for i in range(1, 11):
polycircleAux = polycircles.Polycircle(latitude=latitude, longitude=longitude,
radius=(200 * i) * absMagnitude, number_of_vertices=100)
polAux = kml.newpolygon(name="", description="", outerboundaryis=polycircleAux.to_kml())
polAux.style.polystyle.color = color
polAux.style.polystyle.fill = 1
polAux.style.polystyle.outline = 1
polAux.style.linestyle.color = color
polAux.style.linestyle.width = 1
polAux.visibility = 0
polAux.style.balloonstyle.displaymode = simplekml.DisplayMode.hide
polAux.style.balloonstyle.text = "$[description]"
animatedupdateshow = playlist.newgxanimatedupdate(gxduration=balloonDuration / 10)
animatedupdateshow.update.change = '<Placemark targetId="{0}">' \
'<visibility>1</visibility></Placemark>' \
.format(polAux.placemark.id)
animatedupdatehide = playlist.newgxanimatedupdate(gxduration=balloonDuration / 10)
animatedupdatehide.update.change = '<Placemark targetId="{0}">' \
'<visibility>0</visibility></Placemark>' \
.format(polAux.placemark.id)
playlist.newgxwait(gxduration=balloonDuration / 10)
animatedupdateshow = playlist.newgxanimatedupdate(gxduration=balloonDuration * 2)
animatedupdateshow.update.change = '<Placemark targetId="{0}"><visibility>1</visibility>' \
'<gx:balloonVisibility>1</gx:balloonVisibility></Placemark>' \
.format(pol.placemark.id)
playlist.newgxwait(gxduration=10)
animatedupdatehide = playlist.newgxanimatedupdate(gxduration=balloonDuration * 2)
animatedupdatehide.update.change = '<Placemark targetId="{0}">' \
'<gx:balloonVisibility>0</gx:balloonVisibility></Placemark>' \
.format(pol.placemark.id)
else:
pol.visibility = 1
else:
earthquake = kml.newpoint(name=place,
description=infowindow,
coords=[(longitude, latitude)])
earthquake.timestamp.when = datetimeStr
except ValueError:
kml.newpoint(name=place, description=infowindow, coords=[(longitude, latitude)])
logger.error(str(absMagnitude))
if createTour:
playlist.newgxwait(gxduration=3 * balloonDuration)
fileName = "earthquakes.kml"
currentDir = os.getcwd()
dir1 = os.path.join(currentDir, "static/kmls")
dirPath2 = os.path.join(dir1, fileName)
logger.info("Saving kml: " + str(dirPath2))
kml.save(dirPath2)
ip = getDjangoIp()
fileUrl = "http://" + ip + ":" + getDjangoPort(request) + "/static/kmls/" + fileName
return fileUrl
def sendConcreteValuesToLG(request):
createTourParam = request.POST.get('createTour', 0)
createTour = createTourParam == str(1)
center_lat = request.POST['center_lat']
center_lon = request.POST['center_lon']
ip = getDjangoIp()
fileName = "earthquakes.kml"
fileUrl = "http://" + ip + ":" + getDjangoPort(request) + "/static/kmls/" + fileName
sendKmlToLG(fileName, request)
sendFlyToToLG(center_lat, center_lon, 15000000, 0, 0, 15000000, 2)
if createTour:
currentDir = os.getcwd()
dir1 = os.path.join(currentDir, "static/kmls")
dirPath2 = os.path.join(dir1, fileName)
fileBytes = os.path.getsize(dirPath2)
megas = (fileBytes / 1024) / 1000
logger.info("Size of the KML:" + str(os.path.getsize(dirPath2)))
waitTime = megas / 5
logger.info("Waiting to start the tour..." + str(waitTime) + " seconds")
time.sleep(waitTime)
logger.info("Starting the tour!")
playTour("EarthquakesTour")
return render(request, 'floybd/earthquakes/viewEarthquakes.html',
{'kml': fileUrl, 'center_lat': center_lat,
'center_lon': center_lon})
@csrf_exempt
def demoLastWeekEarthquakesHeatmap(request):
millis = int(round(time.time() * 1000))
command = "echo 'http://" + getDjangoIp() + ":" + getDjangoPort(request) + \
"/static/demos/lastWeekEarthquakesHeatMap.kmz?a=" + str(millis) + \
"\n'http://" + getDjangoIp() + ":" + getDjangoPort(request) + \
"/static/demos/WorldTour.kmz?a=" + str(millis) + " | sshpass -p " + getLGPass() \
+ " ssh lg@" + getLGIp() + " 'cat - > /var/www/html/kmls.txt'"
os.system(command)
time.sleep(2)
sendFlyToToLG(20.21078636181624, -111.3376967642952, 0, 0, 0, 6000000, 2)
time.sleep(2)
playTour("WorldTour")
return HttpResponse(status=204)
@csrf_exempt
def demoLastWeekEarthquakes(request):
sendDemoKmlToLG("lastWeekEarthquakes.kmz", request)
time.sleep(10)
playTour("LastWeekEarthquakesTour")
return HttpResponse(status=204)
def simulateEarthquake(playlist, latitude, longitude, magnitude):
for i in range(0, int(10 * magnitude)):
bounce = 5 if (i % 2 == 0) else 0
flyto = playlist.newgxflyto(gxduration=0.01)
flyto.camera.longitude = longitude
flyto.camera.latitude = latitude
flyto.camera.altitude = 150000
flyto.camera.range = 150000
flyto.camera.tilt = bounce
playlist.newgxwait(gxduration=0.01)
|
|
################################
# EvoMan FrameWork - V1.0 2016 #
# Author: Karine Miras #
# karine.smiras@gmail.com #
################################
import sys
import numpy
import struct
import binascii
import Base
from Base.SpriteConstants import *
from Base.SpriteDefinition import *
from sensors import *
# player proctile
class Bullet_p(pygame.sprite.Sprite):
image = pygame.image.load('evoman/images/bullet_r.png')
def __init__(self, location, direction, n_twist, *groups):
super(Bullet_p, self).__init__(*groups)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = direction
self.n_twist = n_twist
# fits image according to the side the player is turned to
if self.direction == 1:
self.image = pygame.image.load('evoman/images/bullet_r.png')
else:
self.image = pygame.image.load('evoman/images/bullet_l.png')
def update(self, dt, game):
# removes bullets objetcs when they transpass the screen limits
if self.rect.right<1 or self.rect.left>736 or self.rect.top <1 or self.rect.bottom>512 :
self.kill()
game.player.twists[self.n_twist] = None
return
self.rect.x += self.direction * 600 * dt # moving on the X axis (left or tight). It adds 600*dt forward at each general game loop loop iteration, where dt controls the frames limit.
# checks collision of player's bullet with the enemy
if self.rect.colliderect(game.enemy.rect):
# if enemy is not imune
if game.enemy.imune == 0:
# enemy loses life points, according to the difficult level of the game (the more difficult, the less it loses)
game.enemy.life = max(0, game.enemy.life-(20/game.level))
if game.enemyn == 4:
# makes enemy imune to player's shooting.
game.enemy.imune = 1
# removes the bullet off the screen after collision.
self.kill()
game.player.twists[self.n_twist] = None
game.enemy.hurt = 5
# player sprite
class Player(pygame.sprite.Sprite):
def __init__(self, location, enemyn, level, *groups):
super(Player, self).__init__(*groups)
self.spriteDefinition = SpriteDefinition('evoman/images/EvoManSprites.png', 0, 0, 43, 59)
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.resting = 0
self.dy = 0
self.direction = 1
self.alternate = 1
self.gun_cooldown = 0
self.max_life = 100
self.life = self.max_life
self.atacked = 0
self.hurt = 0
self.shooting = 0
self.inwater = 0
self.twists = []
self.vx = 0
self.vy = 0
self.hy = 0
self.sensors = None
def update(self, dt, game):
# if the enemies are not atacking with the freezing atack (prevents player from making any movements or atacking) and also the 'start game' marker is 1.
if game.freeze_p == 0 and game.start == 1:
# checks water environment flag to regulate movements speed
if self.inwater == 1:
self.vx = 0.5
self.vy = 0.5
self.hy = -2000
else:
self.vx = 1
self.vy = 1
self.hy = -900
# defines game mode for player action
if game.playermode == 'human': # player controlled by keyboard/joystick
# if joystick is connected, initializes it.
if game.joy > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
# tests if the button/key was pressed or released.
# if the player is jumping, the release stops the jump before its maximum high is achieved
press = 0
release = 0
for event in game.event:
if event.type == pygame.JOYBUTTONDOWN or event.type == pygame.KEYDOWN:
press = 1
else:
press = 0
if event.type == pygame.JOYBUTTONUP or event.type == pygame.KEYUP:
release = 1
else:
release = 0
# gets pressed key value
key = pygame.key.get_pressed()
# gets joystick value for axis x (left/right)
left = 0
if game.joy > 0:
if round(joystick.get_axis(0)) == -1:
left = 1
if key[pygame.K_LEFT]:
left = 1
right = 0
if game.joy > 0:
if round(joystick.get_axis(0)) == 1:
right = 1
if key[pygame.K_RIGHT]:
right = 1
# gets joystick/key value for jumping
jump = 0
if game.joy > 0:
if int(joystick.get_button(2)) == 1 and press == 1:
jump = 1
if key[pygame.K_SPACE] and press == 1:
jump = 1
# gets joystick/key value for shooting
shoot = 0
if game.joy > 0:
if int(joystick.get_button(3)) == 1 and press == 1:
shoot = 1
if key[pygame.K_LSHIFT] and press == 1:
shoot = 1
elif game.playermode == 'ai': # player controlled by AI algorithm
# calls the controller providing game sensors
actions = game.player_controller.control(self.sensors.get(game), game.pcont)
if len(actions) < 5:
game.print_logs("ERROR: Player controller must return 5 decision variables.")
sys.exit(0)
left = actions[0]
right = actions[1]
jump = actions[2]
shoot = actions[3]
release = actions[4]
# if the button is released before the jumping maximum height, them player stops going up.
if release == 1 and self.resting == 0:
self.dy = 0
# copies last position state of the player
last = self.rect.copy()
# movements on the axis x (left)
if left:
self.rect.x -= 200 * dt * self.vx
self.direction = -1
# animation, running images alternation
if self.alternate == 1:
self.updateSprite(SpriteConstants.START_RUNNING, SpriteConstants.LEFT)
if self.alternate == 4 or self.alternate == 10:
self.updateSprite(SpriteConstants.RUNNING_STEP1, SpriteConstants.LEFT)
if self.alternate == 7:
self.updateSprite(SpriteConstants.RUNNING_STEP2, SpriteConstants.LEFT)
self.alternate += 1
if self.alternate > 12:
self.alternate = 1
# movements on the axis x (right)
elif right:
self.rect.x += 200 * dt * self.vx
self.direction = 1
# animation, running player images alternation
if self.alternate == 1:
self.updateSprite(SpriteConstants.START_RUNNING, SpriteConstants.RIGHT)
if self.alternate == 4 or self.alternate == 10:
self.updateSprite(SpriteConstants.RUNNING_STEP1, SpriteConstants.RIGHT)
if self.alternate == 7:
self.updateSprite(SpriteConstants.RUNNING_STEP2, SpriteConstants.RIGHT)
self.alternate += 1
if self.alternate > 12:
self.alternate = 1
else:
# animation, standing up images
if self.direction == -1:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
# if player is touching the floor, he is allowed to jump
if self.resting == 1 and jump == 1:
self.dy = self.hy
# gravity
self.dy = min(400, self.dy + 100)
self.rect.y += self.dy * dt * self.vy
# changes the image when player jumps
if self.resting == 0 :
if self.direction == -1:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.RIGHT)
new = self.rect # copies new (after movement) position state of the player
# controls screen walls and platforms limits agaist player
self.resting = 0
for cell in game.tilemap.layers['triggers'].collide(new, 'blockers'):
blockers = cell['blockers']
if 'l' in blockers and last.right <= cell.left and new.right > cell.left and last.bottom>cell.top:
new.right = cell.left
if 'r' in blockers and last.left >= cell.right and new.left < cell.right and last.bottom>cell.top:
new.left = cell.right
if 't' in blockers and last.bottom <= cell.top and new.bottom > cell.top:
self.resting = 1 # player touches the floor
new.bottom = cell.top
self.dy = 0
if 'b' in blockers and last.top >= cell.bottom and new.top < cell.bottom:
new.top = cell.bottom
# shoots, limiting time between bullets.
if shoot == 1 and not self.gun_cooldown:
self.shooting = 5
self.atacked = 1 # marks if the player has atacked enemy
# creates bullets objects according to the direction.
if self.direction > 0:
self.twists.append(Bullet_p(self.rect.midright, 1, len(self.twists), game.sprite_p))
else:
self.twists.append(Bullet_p(self.rect.midleft, -1, len(self.twists), game.sprite_p))
self.gun_cooldown = 0.4 # marks time to the bullet for allowing next bullets
# sound effects
if game.sound == "on" and game.playermode == "human":
sound = pygame.mixer.Sound('evoman/sounds/scifi003.wav')
c = pygame.mixer.Channel(2)
c.set_volume(1)
c.play(sound)
else:
self.atacked = 0
# decreases time for limitating bullets
self.gun_cooldown = max(0, self.gun_cooldown - dt)
# hurt player animation
if self.hurt > 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.RIGHT)
self.hurt -= 1
self.hurt = max(0,self.hurt)
self.shooting -= 1
self.shooting = max(0,self.shooting)
# shooting animation
if self.shooting > 0:
if self.resting == 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING_JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING_JUMPING, SpriteConstants.RIGHT)
else:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.RIGHT)
# kills player in case he touches killers stuff, like spikes.
for cell in game.tilemap.layers['triggers'].collide(self.rect, 'killers'):
game.player.life = 0
# focuses screen center on player
game.tilemap.set_focus(new.x, new.y)
else:
game.tilemap.set_focus(self.rect.x, self.rect.y)
def updateSprite(self, state, direction):
self.image = self.spriteDefinition.getImage(state, direction)
|
|
from __future__ import division
from __future__ import unicode_literals
from django.utils.six import BytesIO
from PIL import Image, ImageOps
from .datastructures import FilteredImage, SizedImage
from .registry import versatileimagefield_registry
class CroppedImage(SizedImage):
"""
A SizedImage subclass that creates a 'cropped' image.
See the `process_image` method for more details.
"""
filename_key = 'crop'
def ppoi_as_str(self):
return "%s__%s" % (
str(self.ppoi[0]).replace('.', '-'),
str(self.ppoi[1]).replace('.', '-')
)
def get_filename_key(self):
return "%s-c%s" % (
self.filename_key,
self.ppoi_as_str()
)
def crop_on_centerpoint(self, image, width, height, ppoi=(0.5, 0.5)):
"""
Returns a PIL Image instance cropped from `image` (at the aspect
ratio provided by dividing `width` / `height`), sized down
to `width`x`height`. Any 'excess pixels' are trimmed away in respect
to the pixel of `image` that corresponds to `ppoi` (Primary Point
of Interest).
`image`: A PIL Image instance
`width`: Integer, width of the image to return (in pixels)
`height`: Integer, height of the image to return (in pixels)
`ppoi`: A 2-tuple of floats with values greater than 0 and less than 1
These values are converted into a cartesian coordinate that
signifies the 'center pixel' which the crop will center on
(to trim the excess from the 'long side').
Determines whether to trim away pixels from either the left/right or
top/bottom sides by comparing the aspect ratio of `image` vs the
aspect ratio of `width`x`height`.
Will trim from the left/right sides if the aspect ratio of `image`
is greater-than-or-equal-to the aspect ratio of `width`x`height`.
Will trim from the top/bottom sides if the aspect ration of `image`
is less-than the aspect ratio or `width`x`height`.
Similar to Kevin Cazabon's ImageOps.fit method but uses the
ppoi value as an absolute centerpoint (as opposed as a
percentage to trim off the 'long sides').
"""
ppoi_x_axis = int(image.size[0] * ppoi[0])
ppoi_y_axis = int(image.size[1] * ppoi[1])
center_pixel_coord = (ppoi_x_axis, ppoi_y_axis)
# Calculate the aspect ratio of `image`
orig_aspect_ratio = float(
image.size[0]
) / float(
image.size[1]
)
crop_aspect_ratio = float(width) / float(height)
# Figure out if we're trimming from the left/right or top/bottom
if orig_aspect_ratio >= crop_aspect_ratio:
# `image` is wider than what's needed,
# crop from left/right sides
orig_crop_width = int(
(crop_aspect_ratio * float(image.size[1])) + 0.5
)
orig_crop_height = image.size[1]
crop_boundary_top = 0
crop_boundary_bottom = orig_crop_height
crop_boundary_left = center_pixel_coord[0] - (orig_crop_width // 2)
crop_boundary_right = crop_boundary_left + orig_crop_width
if crop_boundary_left < 0:
crop_boundary_left = 0
crop_boundary_right = crop_boundary_left + orig_crop_width
elif crop_boundary_right > image.size[0]:
crop_boundary_right = image.size[0]
crop_boundary_left = image.size[0] - orig_crop_width
else:
# `image` is taller than what's needed,
# crop from top/bottom sides
orig_crop_width = image.size[0]
orig_crop_height = int(
(float(image.size[0]) / crop_aspect_ratio) + 0.5
)
crop_boundary_left = 0
crop_boundary_right = orig_crop_width
crop_boundary_top = center_pixel_coord[1] - (orig_crop_height // 2)
crop_boundary_bottom = crop_boundary_top + orig_crop_height
if crop_boundary_top < 0:
crop_boundary_top = 0
crop_boundary_bottom = crop_boundary_top + orig_crop_height
elif crop_boundary_bottom > image.size[1]:
crop_boundary_bottom = image.size[1]
crop_boundary_top = image.size[1] - orig_crop_height
# Cropping the image from the original image
cropped_image = image.crop(
(
crop_boundary_left,
crop_boundary_top,
crop_boundary_right,
crop_boundary_bottom
)
)
# Resizing the newly cropped image to the size specified
# (as determined by `width`x`height`)
return cropped_image.resize(
(width, height),
Image.ANTIALIAS
)
def process_image(self, image, image_format, save_kwargs,
width, height):
"""
Returns a BytesIO instance of `image` cropped to `width` and `height`
Cropping will first reduce an image down to its longest side
and then crop inwards centered on the Primary Point of Interest
(as specified by `self.ppoi`)
"""
imagefile = BytesIO()
palette = image.getpalette()
cropped_image = self.crop_on_centerpoint(
image,
width,
height,
self.ppoi
)
# Using ImageOps.fit on GIFs can introduce issues with their palette
# Solution derived from: http://stackoverflow.com/a/4905209/1149774
if image_format == 'GIF':
cropped_image.putpalette(palette)
cropped_image.save(
imagefile,
**save_kwargs
)
return imagefile
class ThumbnailImage(SizedImage):
"""
Sizes an image down to fit within a bounding box
See the `process_image()` method for more information
"""
filename_key = 'thumbnail'
def process_image(self, image, image_format, save_kwargs,
width, height):
"""
Returns a BytesIO instance of `image` that will fit
within a bounding box as specified by `width`x`height`
"""
imagefile = BytesIO()
image.thumbnail(
(width, height),
Image.ANTIALIAS
)
image.save(
imagefile,
**save_kwargs
)
return imagefile
class InvertImage(FilteredImage):
"""
Inverts the colors of an image.
See the `process_image()` for more specifics
"""
def process_image(self, image, image_format, save_kwargs={}):
"""
Returns a BytesIO instance of `image` with inverted colors
"""
imagefile = BytesIO()
inv_image = ImageOps.invert(image)
inv_image.save(
imagefile,
**save_kwargs
)
return imagefile
versatileimagefield_registry.register_sizer('crop', CroppedImage)
versatileimagefield_registry.register_sizer('thumbnail', ThumbnailImage)
versatileimagefield_registry.register_filter('invert', InvertImage)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import zipfile
from action import base_manager
from action import user_manager
from bottle import redirect
from bottle import request
from entity import category as category_entity
from entity import image as image_entity
from entity import recipe as recipe_entity
from entity import synonym as synonym_entity
from entity import tag as tag_entity
from entity import url as url_entity
from helper import hint
from helper import translator
from helper import url
class RecipeManager(base_manager.BaseManager):
""" Handle recipe related actions.
Constants:
HINT_COOKIE -- Name of hint cookie (string).
HINT_DELETE -- Value of delete hint cookie (string).
HINT_EDIT -- Value of edit hint cookie (string).
HINT_EXISTS -- Value of exists hint cookie (string).
HINT_NAME -- Name of cookie which stores name of last changed recipe (string).
HINT_NEW -- Value of new hint cookie (string).
Member:
db -- The database connection.
hints -- List of hints which occurred during action handling (list hint).
"""
HINT_COOKIE = 'show_hint'
HINT_DELETE = 'delete'
HINT_EDIT = 'edit'
HINT_EXISTS = 'exists'
HINT_NAME = 'last_name'
HINT_NEW = 'new'
HINT_NEW_EXISTS = 'new-exists'
def __init__(self, db):
self.db = db
self.hints = []
def action(self, language, indexer, static_path, image_path, recipe_json,
id=None):
""" Handle actions. If id is given it is assumed that an existing
recipe is edited. Returns recipe to show. """
_ = translator.Translator.instance(language)
is_new = id is None
is_edit = self.get_form('edit') is not None
is_delete = self.get_form('delete') is not None
is_import = self.get_form('import') is not None
# Actions
result = None
if is_edit:
categories = self.__read_categories()
images = self.__read_images(language, static_path, image_path)
synonyms = self.__read_synonyms()
tags = self.__read_tags()
urls = self.__read_urls()
result = recipe_entity.Recipe()
if not is_new:
result = recipe_entity.Recipe.find_pk(self.db, id)
result.categories = categories
result.description = self.get_form('description')
result.images = images
result.info = self.get_form('info')
result.ingredients = self.get_form('ingredients')
result.rating = int(self.get_form('rating'))
result.serving_size = self.get_form('serving-size')
result.synonyms = synonyms
result.tags = tags
result.title = self.get_form('title')
result.urls = urls
result = self.__finalize_recipe(result, is_new, indexer)
elif is_delete:
recipe = recipe_entity.Recipe.find_pk(self.db, id)
# Update search index.
scheme = indexer.scheme()
writer = indexer.open_index(scheme)
indexer.remove_from_index(writer, [recipe])
indexer.close_index()
recipe.delete(self.db)
self.set_cookie(self.HINT_COOKIE, self.HINT_DELETE)
self.set_cookie(self.HINT_NAME, recipe.title)
redirect(url.Url.from_path(['manage', 'recipe']))
elif is_import:
result = recipe_entity.Recipe()
import_file = request.files.get('import-file')
if not import_file:
hint_text = _('Please select an import file.')
self.hints.append(hint.Hint(hint_text))
else:
with zipfile.ZipFile(import_file.file) as zip:
json_bytes = zip.read(recipe_json)
recipe_dict = json.loads(json_bytes.decode('utf-8'))
synonyms = []
for synonym_name in recipe_dict['synonyms']:
synonym = synonym_entity.Synonym(name=synonym_name)
synonyms.append(synonym)
urls = []
for url_dict in recipe_dict['urls']:
u = url_entity.Url(name=url_dict['name'],
url=url_dict['url'])
urls.append(u)
self.__create_img_dir(static_path, image_path)
images = []
for image_name in recipe_dict['images']:
# Check file extension.
name, extension = os.path.splitext(image_name)
if self.__check_img_extension(extension, language):
# Save image.
path = self.__get_image_path(static_path,
image_path, name,
extension)
image = self.__save_image(static_path, path,
zip.read(image_name))
images.append(image)
result.description = recipe_dict['description']
result.images = images
result.info = recipe_dict['info']
result.ingredients = recipe_dict['ingredients']
result.serving_size = recipe_dict['serving_size']
result.synonyms = synonyms
result.title = recipe_dict['title']
result.urls = urls
result = self.__finalize_recipe(result, True, indexer)
elif is_new:
result = recipe_entity.Recipe()
else:
result = recipe_entity.Recipe.find_pk(self.db, id)
self.__show_hints(language)
return result
def __check_img_extension(self, extension, language):
""" Checks the given extension. Returns true if extension
is valid else false. """
_ = translator.Translator.instance(language)
result = True
if extension.lower() not in ('.png', '.jpg', '.jpeg', '.gif'):
text = _('Extension "{}" is not an allowed image type.')\
.format(extension)
self.hints.append(hint.Hint(text))
result = False
return result
@staticmethod
def __create_img_dir(static_path, image_root_path):
""" Create image directory if not exists. """
if not os.path.exists(static_path+image_root_path):
os.mkdir(static_path+image_root_path)
def __finalize_recipe(self, recipe, is_new, indexer):
""" Final steps in saving a new or updated recipe. Returns recipe
or redirects. """
if is_new:
manager = user_manager.UserManager(self.db)
recipe.author = manager.current_user()
if not recipe.title:
hint_text = _('Title must not be empty.')
self.hints.append(hint.Hint(hint_text))
else:
# First check if the recipe already exists and is new.
new_exists = is_new and \
recipe_entity.Recipe.title_exists(self.db, recipe.title)
# Save the recipe.
recipe.save(self.db)
# Update search index.
scheme = indexer.scheme()
writer = indexer.open_index(scheme)
indexer.fill_index(writer, [recipe])
indexer.close_index()
if new_exists:
type = self.HINT_NEW_EXISTS
elif is_new:
type = self.HINT_NEW
else:
type = self.HINT_EDIT
self.set_cookie(self.HINT_COOKIE, type)
self.set_cookie(self.HINT_NAME, recipe.title)
redirect(url.Url.from_path(['manage', 'recipe', str(recipe.id)]))
return recipe
@staticmethod
def __get_image_path(static_path, image_root_path, name, extension):
""" Returns image path for given name and extension. """
image_path = image_root_path
image_path += name
image_path += extension
path_counter = 0
# Create a unique name.
while os.path.exists(static_path + image_path):
image_path = image_root_path
image_path += name
image_path += str(path_counter)
image_path += extension
path_counter += 1
return image_path
def __read_categories(self):
""" Read categories and return them. """
categories = []
for category_id in request.forms.getall('categories'):
category = category_entity.Category.find_pk(self.db, category_id)
categories.append(category)
return categories
def __read_images(self, language, static_path, image_root_path):
""" Read images and return them. """
_ = translator.Translator.instance(language)
self.__create_img_dir(static_path, image_root_path)
images = []
# Read images from form.
image_counter = 0
image_path = self.get_form('image-'+str(image_counter))
while image_path is not None:
if image_path:
image = image_entity.Image(path=image_path)
images.append(image)
image_counter += 1
image_path = self.get_form('image-'+str(image_counter))
image_counter = 0
# Check images and write to file system.
image_upload = request.files.get('new-image-'+str(image_counter))
while image_upload is not None:
# Check file extension.
name, extension = os.path.splitext(image_upload.filename)
if not self.__check_img_extension(extension, language):
image_counter += 1
image_upload = request.files.get('new-image-'+str(image_counter))
continue
image_path = self.__get_image_path(static_path, image_root_path,
name, extension)
# Save image and restart with next one.
#image_upload.save(self.STATIC_PATH + image_path)
image = self.__save_image(static_path, image_path,
image_upload.file.read())
images.append(image)
image_counter += 1
image_upload = request.files.get('new-image-'+str(image_counter))
return images
def __read_synonyms(self):
""" Read synonyms and return them. """
synonyms = []
synonym_counter = 0
synonym_name = self.get_form('synonym-'+str(synonym_counter))
while synonym_name is not None:
if synonym_name:
synonym = synonym_entity.Synonym(name=synonym_name)
synonyms.append(synonym)
synonym_counter += 1
synonym_name = self.get_form('synonym-'+str(synonym_counter))
return synonyms
def __read_tags(self):
""" Read tags and return them. """
tags = []
for tag_id in request.forms.getall('tags'):
tag = tag_entity.Tag.find_pk(self.db, tag_id)
tags.append(tag)
return tags
def __read_urls(self):
""" Read urls and return them. """
urls = []
url_counter = 0
url_url = self.get_form('url-url-'+str(url_counter))
while url_url is not None:
if url_url:
url_name = self.get_form('url-name-'+str(url_counter))
url = url_entity.Url(name=url_name, url=url_url)
urls.append(url)
url_counter += 1
url_url = self.get_form('url-url-'+str(url_counter))
return urls
@staticmethod
def __save_image(static_path, image_path, image_bytes):
""" Save given image. Returns image object. """
with open(static_path + image_path, 'wb') as out_file:
out_file.write(image_bytes)
return image_entity.Image(path=image_path)
def __show_hints(self, language):
""" Show hints if cookies are set. """
_ = translator.Translator.instance(language)
hint_cookie = self.get_cookie(self.HINT_COOKIE)
name_cookie = self.get_cookie(self.HINT_NAME)
if hint_cookie and name_cookie:
if hint_cookie == self.HINT_NEW:
hint_text = _('New recipe "{}" has been created.')
elif hint_cookie == self.HINT_EDIT:
hint_text = _('Recipe "{}" has been updated.')
elif hint_cookie == self.HINT_NEW_EXISTS:
hint_text = _('New recipe "{}" has been created. '
'A recipe with the same title already exists.')
else:
hint_text = _('Recipe "{}" has been removed.')
hint_text = hint_text.format(name_cookie)
self.hints.append(hint.Hint(hint_text))
self.delete_cookie(self.HINT_COOKIE)
self.delete_cookie(self.HINT_NAME)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2015, GoodData(R) Corporation. All rights reserved
from builtins import object
import mock
import os
import pytest
import shutil
import smoker.client as smoker_client
from smoker.client import cli as smoker_cli
import socket
from tests.server.smoker_test_resources import client_mock_result
from tests.server.smoker_test_resources.client_mock_result\
import rest_api_response
from tests.server.smoker_test_resources.client_mock_result import TMP_DIR
class TestHost(object):
"""Unit tests for the client.Host class"""
hostname = socket.gethostname()
def test_create_host_instance(self):
host = smoker_client.Host('%s:8086' % self.hostname)
assert host.url == 'http://%s:8086' % self.hostname
assert not host.links
host = smoker_client.Host('%s' % self.hostname)
assert host.url == 'http://%s:8086' % self.hostname
assert not host.links
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_load_about(self):
# Mock: http://${hostname}:8089/ load_about
host = smoker_client.Host('%s:8086' % self.hostname)
assert not host.links
assert host.load_about() == client_mock_result.about_response
assert host.links == client_mock_result.links
assert host.name == client_mock_result.about_response['about']['host']
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_result_will_be_cleared_after_getting(self):
# Mock: http://${hostname}:8089/ load_about
host = smoker_client.Host('%s:8086' % self.hostname)
host.load_about()
assert host.get_result() == client_mock_result.about_response
assert not host.get_result()
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_force_run(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/processes open(resource='processes')
# Mock: http://${hostname}:8089/processes/# open(uri='/processes/#')
expected = client_mock_result.force_plugin_run_response['Uptime']
host = smoker_client.Host('%s:8086' % self.hostname)
host.load_about()
plugins = {'Uptime': dict()}
assert host.force_run(plugins)['plugins']['items'][0] == expected
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_force_run_with_invalid_plugin_name(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/processes open(resource='processes')
host = smoker_client.Host('%s:8086' % self.hostname)
host.load_about()
plugins = {'InvalidPlugin': dict()}
assert host.force_run(plugins) is False
assert host.get_result() == client_mock_result.about_response
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_load_about_before_open_resource(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/plugins open(resource='plugins')
host = smoker_client.Host('%s:8086' % self.hostname)
assert not host.open(resource='plugins')
host.load_about()
assert host.open(resource='plugins')
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_open_with_invalid_uri_and_resource(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/InvalidUri open(uri='/InvalidUri')
# Mock: http://${hostname}:8089/InvalidResource
# open(resource='InvalidResource')
expected_exc = 'Argument uri or resource have to be submitted'
host = smoker_client.Host('%s:8086' % self.hostname)
host.load_about()
assert not host.open(uri='/InvalidUri')
assert not host.open(resource='InvalidResource')
with pytest.raises(Exception) as exc_info:
host.open()
assert expected_exc in repr(exc_info.value)
class TestClient(object):
"""Unit tests for the client.Client class"""
hostname = socket.gethostname()
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_create_client_instance(self):
# Mock: http://${hostname}:8089/ load_about
cli = smoker_client.Client(['%s:8086' % self.hostname])
assert cli.hosts[0].load_about() == client_mock_result.about_response
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_get_plugins_with_filter_is_none(self):
cli = smoker_client.Client(['%s:8086' % self.hostname])
with pytest.raises(TypeError) as exc_info:
cli.get_plugins()
assert "'NoneType' object is not iterable" in repr(exc_info.value)
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_get_plugins(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/plugins open(resource='plugins')
# Need confirm the format of filters. Look likes It doesn't work
# filters = { 'Category': 'system'}
# filters = ('Category', 'system')
# filters = ['Uname', 'Uptime']
cli = smoker_client.Client(['%s:8086' % self.hostname])
result = cli.get_plugins(filters=list())
assert self.hostname in result
assert cli.hosts[0].load_about() == client_mock_result.about_response
for x in ['Uname', 'Hostname', 'Uptime']:
assert x in result[self.hostname]['plugins']
result = cli.get_plugins(filters=list(), exclude_plugins=['Uname'])
assert 'Hostname' and 'Uptime' in result[self.hostname]['plugins']
assert 'Uname' not in result[self.hostname]['plugins']
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_open_with_invalid_uri_and_resource(self):
# Mock: http://${hostname}:8089/ load_about
cli = smoker_client.Client(['%s:8086' % self.hostname])
expected_exc = 'Argument uri or resource have to be submitted'
expected_response = client_mock_result.about_response
assert cli.open(uri='/InvalidUri')[self.hostname] == expected_response
cli.open(resource='InvalidResource') == expected_response
with pytest.raises(Exception) as exc_info:
cli.open()
assert expected_exc in repr(exc_info.value)
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_force_run(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/processes open(resource='processes')
# Mock: http://${hostname}:8089/processes/# open(uri='/processes/#')
cli = smoker_client.Client(['%s:8086' % self.hostname])
plugins = cli.get_plugins(filters=list(),
exclude_plugins=['Hostname', 'Uname'])
result = cli.force_run(plugins)[self.hostname]
assert result['status'] == 'OK'
result = result['plugins']
assert 'Uptime' in result
assert 'Uname' and 'Hostname' not in result
assert 'forcedResult' in result['Uptime']
assert result['Uptime']['forcedResult']['status'] == 'OK'
assert result['Uptime']['links']['self'] == '/plugins/Uptime'
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_force_run_with_WARN_result(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/processes open(resource='processes')
# Mock: http://${hostname}:8089/processes/# open(uri='/processes/#')
cli = smoker_client.Client(['%s:8086' % self.hostname])
plugins = cli.get_plugins(filters=list(),
exclude_plugins=['Hostname'])
result = cli.force_run(plugins)[self.hostname]
assert result['status'] == 'WARN'
result = result['plugins']
assert 'Uptime' and 'Uname' in result
assert 'Hostname' not in result
assert 'forcedResult' in result['Uptime'] and result['Uname']
assert result['Uptime']['forcedResult']['status'] == 'OK'
assert result['Uname']['forcedResult']['status'] == 'WARN'
assert result['Uptime']['links']['self'] == '/plugins/Uptime'
assert result['Uname']['links']['self'] == '/plugins/Uname'
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_force_run_with_ERROR_result(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/processes open(resource='processes')
# Mock: http://${hostname}:8089/processes/# open(uri='/processes/#')
cli = smoker_client.Client(['%s:8086' % self.hostname])
plugins = cli.get_plugins(filters=list())
result = cli.force_run(plugins)[self.hostname]
assert result['status'] == 'ERROR'
result = result['plugins']
assert 'Uptime' and 'Uname' and 'Hostname' in result
assert 'forcedResult' in result['Uptime'] and result['Uname'] \
and result['Hostname']
assert result['Uptime']['forcedResult']['status'] == 'OK'
assert result['Uname']['forcedResult']['status'] == 'WARN'
assert result['Hostname']['forcedResult']['status'] == 'ERROR'
assert result['Uptime']['links']['self'] == '/plugins/Uptime'
assert result['Uname']['links']['self'] == '/plugins/Uname'
assert result['Hostname']['links']['self'] == '/plugins/Hostname'
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_dump_tap_result(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/plugins open(resource='plugins')
cli = smoker_client.Client(['%s:8086' % self.hostname])
plugins = cli.get_plugins(filters=list())
expected = '\n'.join(client_mock_result.tap_result_all_plugins)
assert smoker_cli.dump_tap(plugins) == expected
plugins = cli.get_plugins(filters=list(), exclude_plugins=['Hostname'])
expected = '\n'.join(client_mock_result.tap_result_uptime_uname)
assert smoker_cli.dump_tap(plugins) == expected
plugins = cli.get_plugins(filters=list(), exclude_plugins=['Uname'])
expected = '\n'.join(client_mock_result.tap_result_uptime_hostname)
assert smoker_cli.dump_tap(plugins) == expected
plugins = cli.get_plugins(filters=list(), exclude_plugins=['Uptime'])
expected = '\n'.join(client_mock_result.tap_result_hostname_uname)
assert smoker_cli.dump_tap(plugins) == expected
@mock.patch('urllib.request.urlopen', rest_api_response)
def test_plugins_to_xml_result(self):
# Mock: http://${hostname}:8089/ load_about
# Mock: http://${hostname}:8089/plugins open(resource='plugins')
cli = smoker_client.Client(['%s:8086' % self.hostname])
plugins = cli.get_plugins(filters=list())
expected = '\n'.join(client_mock_result.xml_result_all_plugins)
result = smoker_cli.plugins_to_xml(plugins)
assert result == expected
plugins = cli.get_plugins(filters=list(), exclude_plugins=['Hostname'])
expected = '\n'.join(client_mock_result.xml_result_uptime_uname)
result = smoker_cli.plugins_to_xml(plugins)
assert result == expected
plugins = cli.get_plugins(filters=list(), exclude_plugins=['Uname'])
expected = '\n'.join(client_mock_result.xml_result_uptime_hostname)
result = smoker_cli.plugins_to_xml(plugins)
assert result == expected
plugins = cli.get_plugins(filters=list(), exclude_plugins=['Uptime'])
expected = '\n'.join(client_mock_result.xml_result_hostname_uname)
result = smoker_cli.plugins_to_xml(plugins)
assert result == expected
class TestCleanUp(object):
"""Clean up all temporary files used by Mock"""
def test_clean_up(self):
if os.path.exists(TMP_DIR):
shutil.rmtree(TMP_DIR)
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import datetime as dt
from decimal import Decimal
from ...util_tests import TestLogin
from evesrp import db
from evesrp.models import Request, Action, AbsoluteModifier, RelativeModifier,\
ActionType, PrettyDecimal
from evesrp.auth import PermissionType
from evesrp.auth.models import User, Pilot, Division, Permission
from evesrp.util import utc
from evesrp import views
class TestRequest(TestLogin):
def setUp(self):
super(TestRequest, self).setUp()
with self.app.test_request_context():
d1 = Division('Division One')
d2 = Division('Division Two')
db.session.add(d1)
db.session.add(d2)
# Yup, the Gyrobus killmail
mock_killmail = dict(
id=12842852,
ship_type='Erebus',
corporation='Ever Flow',
alliance='Northern Coalition.',
killmail_url=('http://eve-kill.net/?a=kill_detail'
'&kll_id=12842852'),
base_payout=73957900000,
kill_timestamp=dt.datetime(2012, 3, 25, 0, 44, 0,
tzinfo=utc),
system='92D-OI',
constellation='XHYS-O',
region='Venal',
pilot_id=133741,
)
Pilot(self.normal_user, 'eLusi0n', 133741)
Request(self.normal_user, 'Original details', d1,
mock_killmail.items())
db.session.commit()
self.request_path = '/request/12842852/'
def _add_permission(self, user_name, permission,
division_name='Division One'):
"""Helper to grant permissions to the division the request is in."""
with self.app.test_request_context():
division = Division.query.filter_by(name=division_name).one()
user = User.query.filter_by(name=user_name).one()
Permission(division, permission, user)
db.session.commit()
@property
def request(self):
return Request.query.get(12842852)
class TestRequestAccess(TestRequest):
def test_basic_request_access(self):
# Grab some clients
# The normal user is the submitter
norm_client = self.login(self.normal_name)
admin_client = self.login(self.admin_name)
# Users always have access to requests they've submitted
resp = norm_client.get(self.request_path)
self.assertEqual(resp.status_code, 200)
self.assertIn('Lossmail', resp.get_data(as_text=True))
resp = admin_client.get(self.request_path)
self.assertEqual(resp.status_code, 403)
def _test_permission_access(self, user_name, permission,
division_name, accessible=True):
self._add_permission(user_name, permission, division_name)
# Get a client and fire off the request
client = self.login(user_name)
resp = client.get(self.request_path)
if accessible:
self.assertEqual(resp.status_code, 200)
self.assertIn('Lossmail', resp.get_data(as_text=True))
else:
self.assertEqual(resp.status_code, 403)
def test_review_same_division_access(self):
self._test_permission_access(self.admin_name, PermissionType.review,
'Division One')
def test_review_other_division_access(self):
self._test_permission_access(self.admin_name, PermissionType.review,
'Division Two', False)
def test_pay_same_division_access(self):
self._test_permission_access(self.admin_name, PermissionType.pay,
'Division One')
def test_pay_other_division_access(self):
self._test_permission_access(self.admin_name, PermissionType.pay,
'Division Two', False)
class TestRequestSetPayout(TestRequest):
def _test_set_payout(self, user_name, permission, permissable=True):
if permission is not None:
self._add_permission(user_name, permission)
client = self.login(user_name)
test_payout = 42
with client as c:
resp = client.post(self.request_path, follow_redirects=True, data={
'id_': 'payout',
'value': test_payout})
self.assertEqual(resp.status_code, 200)
with self.app.test_request_context():
payout = self.request.payout
base_payout = self.request.base_payout
if permissable:
real_test_payout = test_payout * 1000000
self.assertIn(PrettyDecimal(real_test_payout).currency(),
resp.get_data(as_text=True))
self.assertEqual(payout, real_test_payout)
else:
self.assertIn('Only reviewers can change the base payout.',
resp.get_data(as_text=True))
self.assertEqual(base_payout, Decimal('73957900000'))
def test_reviewer_set_base_payout(self):
self._test_set_payout(self.admin_name, PermissionType.review)
def test_payer_set_base_payout(self):
self._test_set_payout(self.admin_name, PermissionType.pay, False)
def test_submitter_set_base_payout(self):
self._test_set_payout(self.normal_name, None, False)
def test_set_payout_invalid_request_state(self):
statuses = (
ActionType.approved,
ActionType.paid,
ActionType.rejected,
ActionType.incomplete,
)
self._add_permission(self.normal_name, PermissionType.review)
self._add_permission(self.normal_name, PermissionType.pay)
client = self.login()
for status in statuses:
with self.app.test_request_context():
if status == ActionType.paid:
self.request.status = ActionType.approved
self.request.status = status
db.session.commit()
resp = client.post(self.request_path, follow_redirects=True, data={
'id_': 'payout',
'value': '42'})
self.assertIn('The request must be in the evaluating state '
'to change the base payout.', resp.get_data(as_text=True))
with self.app.test_request_context():
self.request.status = ActionType.evaluating
db.session.commit()
class TestRequestAddModifiers(TestRequest):
def _test_add_modifier(self, user_name, permissible=True):
client = self.login(user_name)
resp = client.post(self.request_path, follow_redirects=True, data={
'id_': 'modifier',
'value': '10',
'type_': 'abs-bonus',})
self.assertEqual(resp.status_code, 200)
with self.app.test_request_context():
modifiers = self.request.modifiers.all()
modifiers_length = len(modifiers)
if modifiers_length > 0:
first_value = modifiers[0].value
if permissible:
self.assertEqual(modifiers_length, 1)
self.assertEqual(first_value, 10000000)
else:
self.assertEqual(modifiers_length, 0)
self.assertIn('Only reviewers can add modifiers.',
resp.get_data(as_text=True))
def test_reviewer_add_modifier(self):
self._add_permission(self.admin_name, PermissionType.review)
self._test_add_modifier(self.admin_name)
def test_payer_add_modifier(self):
self._add_permission(self.admin_name, PermissionType.pay)
self._test_add_modifier(self.admin_name, False)
def test_submitter_add_modifier(self):
self._test_add_modifier(self.normal_name, False)
class TestRequestVoidModifiers(TestRequest):
def _add_modifier(self, user_name, value, absolute=True):
with self.app.test_request_context():
user = User.query.filter_by(name=user_name).one()
if absolute:
mod = AbsoluteModifier(self.request, user, '', value)
else:
mod = RelativeModifier(self.request, user, '', value)
db.session.commit()
return mod.id
def _test_void_modifier(self, user_name, permissible=True):
self._add_permission(self.admin_name, PermissionType.review)
mod_id = self._add_modifier(self.admin_name, 10)
client = self.login(user_name)
resp = client.post(self.request_path, follow_redirects=True, data={
'id_': 'void',
'modifier_id': mod_id})
self.assertEqual(resp.status_code, 200)
with self.app.test_request_context():
payout = self.request.payout
if permissible:
self.assertEqual(payout, Decimal(73957900000))
else:
self.assertEqual(payout, Decimal(73957900000) + 10)
self.assertIn('You must be a reviewer to be able to void',
resp.get_data(as_text=True))
def test_reviewer_void_modifier(self):
self._add_permission(self.normal_name, PermissionType.review)
self._test_void_modifier(self.normal_name)
def test_payer_void_modifier(self):
self._add_permission(self.normal_name, PermissionType.pay)
self._test_void_modifier(self.normal_name, False)
def test_submitter_void_modifier(self):
self._test_void_modifier(self.normal_name, False)
def test_modifier_evaluation(self):
with self.app.test_request_context():
self._add_permission(self.admin_name, PermissionType.review)
self.assertEqual(self.request.payout, Decimal(73957900000))
self._add_modifier(self.admin_name, Decimal(10000000))
self.assertEqual(self.request.payout,
Decimal(73957900000) + Decimal(10000000))
self._add_modifier(self.admin_name, Decimal('-0.1'), False)
self.assertEqual(self.request.payout,
(Decimal(73957900000) + Decimal(10000000)) *
(1 + Decimal('-0.1')))
class TestChangeDivision(TestRequest):
def setUp(self):
super(TestChangeDivision, self).setUp()
with self.app.test_request_context():
db.session.add(Division('Division Three'))
db.session.commit()
def _send_request(self, user_name):
with self.app.test_request_context():
new_division_id = Division.query.filter_by(
name='Division Two').one().id
client = self.login(user_name)
return client.post(self.request_path + 'division/',
follow_redirects=True,
data={'division': new_division_id})
def test_submitter_change_submit_division(self):
self._add_permission(self.normal_name, PermissionType.submit,
'Division Two')
self._add_permission(self.normal_name, PermissionType.submit,
'Division Three')
resp = self._send_request(self.normal_name)
self.assertEqual(resp.status_code, 200)
with self.app.test_request_context():
d2 = Division.query.filter_by(name='Division Two').one()
self.assertEqual(self.request.division, d2)
def test_submitter_change_nonsubmit_division(self):
resp = self._send_request(self.normal_name)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"No other divisions", resp.get_data(as_text=True))
with self.app.test_request_context():
d1 = Division.query.filter_by(name='Division One').one()
self.assertEqual(self.request.division, d1)
def test_submitter_change_division_finalized(self):
self._add_permission(self.normal_name, PermissionType.submit,
'Division Two')
self._add_permission(self.normal_name, PermissionType.submit,
'Division Three')
self._add_permission(self.admin_name, PermissionType.admin)
with self.app.test_request_context():
Action(self.request, self.admin_user, type_=ActionType.rejected)
db.session.commit()
resp = self._send_request(self.normal_name)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"in a finalized state", resp.get_data(as_text=True))
with self.app.test_request_context():
d1 = Division.query.filter_by(name='Division One').one()
self.assertEqual(self.request.division, d1)
def test_reviewer_change_division(self):
self._add_permission(self.admin_name, PermissionType.review)
self._add_permission(self.normal_name, PermissionType.submit,
'Division Two')
self._add_permission(self.normal_name, PermissionType.submit,
'Division Three')
resp = self._send_request(self.normal_name)
self.assertEqual(resp.status_code, 200)
with self.app.test_request_context():
d2 = Division.query.filter_by(name='Division Two').one()
self.assertEqual(self.request.division, d2)
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
"""
ROS msg library for Python
Implements: U{http://ros.org/wiki/msg}
"""
import os
import sys
from . base import InvalidMsgSpec, EXT_MSG, MSG_DIR, SEP, log
from . names import is_legal_resource_name, is_legal_resource_base_name, package_resource_name, resource_name
#TODOXXX: unit test
def bare_msg_type(msg_type):
"""
Compute the bare data type, e.g. for arrays, get the underlying array item type
:param msg_type: ROS msg type (e.g. 'std_msgs/String'), ``str``
:returns: base type, ``str``
"""
if msg_type is None:
return None
if '[' in msg_type:
return msg_type[:msg_type.find('[')]
return msg_type
def resolve_type(msg_type, package_context):
"""
Resolve type name based on current package context.
NOTE: in ROS Diamondback, 'Header' resolves to
'std_msgs/Header'. In previous releases, it resolves to
'roslib/Header' (REP 100).
e.g.::
resolve_type('String', 'std_msgs') -> 'std_msgs/String'
resolve_type('String[]', 'std_msgs') -> 'std_msgs/String[]'
resolve_type('std_msgs/String', 'foo') -> 'std_msgs/String'
resolve_type('uint16', 'std_msgs') -> 'uint16'
resolve_type('uint16[]', 'std_msgs') -> 'uint16[]'
"""
bt = bare_msg_type(msg_type)
if bt in BUILTIN_TYPES:
return msg_type
elif bt == HEADER:
return HEADER_FULL_NAME
elif SEP in msg_type:
return msg_type
else:
return "%s%s%s"%(package_context, SEP, msg_type)
#NOTE: this assumes that we aren't going to support multi-dimensional
def parse_type(msg_type):
"""
Parse ROS message field type
:param msg_type: ROS field type, ``str``
:returns: base_type, is_array, array_length, ``(str, bool, int)``
:raises: :exc:`ValueError` If *msg_type* cannot be parsed
"""
if not msg_type:
raise ValueError("Invalid empty type")
if '[' in msg_type:
var_length = msg_type.endswith('[]')
splits = msg_type.split('[')
if len(splits) > 2:
raise ValueError("Currently only support 1-dimensional array types: %s"%msg_type)
if var_length:
return msg_type[:-2], True, None
else:
try:
length = int(splits[1][:-1])
return splits[0], True, length
except ValueError:
raise ValueError("Invalid array dimension: [%s]"%splits[1][:-1])
else:
return msg_type, False, None
################################################################################
# name validation
def is_valid_msg_type(x):
"""
:returns: True if the name is a syntatically legal message type name, ``bool``
"""
if not x or len(x) != len(x.strip()):
return False
base = bare_msg_type(x)
if not is_legal_resource_name(base):
return False
#parse array indicies
x = x[len(base):]
state = 0
i = 0
for c in x:
if state == 0:
if c != '[':
return False
state = 1 #open
elif state == 1:
if c == ']':
state = 0 #closed
else:
try:
int(c)
except:
return False
return state == 0
def is_valid_constant_type(x):
"""
:returns: ``True`` if the name is a legal constant type. Only simple types are allowed, ``bool``
"""
return x in PRIMITIVE_TYPES
def is_valid_msg_field_name(x):
"""
:returns: ``True`` if the name is a syntatically legal message field name, ``bool``
"""
return is_legal_resource_base_name(x)
# msg spec representation ##########################################
class Constant(object):
"""
Container class for holding a Constant declaration
Attributes:
- ``type``
- ``name``
- ``val``
- ``val_text``
"""
__slots__ = ['type', 'name', 'val', 'val_text']
def __init__(self, type_, name, val, val_text):
"""
:param type_: constant type, ``str``
:param name: constant name, ``str``
:param val: constant value, ``str``
:param val_text: Original text definition of *val*, ``str``
"""
if type is None or name is None or val is None or val_text is None:
raise ValueError('Constant must have non-None parameters')
self.type = type_
self.name = name.strip() #names are always stripped of whitespace
self.val = val
self.val_text = val_text
def __eq__(self, other):
if not isinstance(other, Constant):
return False
return self.type == other.type and self.name == other.name and self.val == other.val
def __repr__(self):
return "%s %s=%s"%(self.type, self.name, self.val)
def __str__(self):
return "%s %s=%s"%(self.type, self.name, self.val)
class Field(object):
"""
Container class for storing information about a single field in a MsgSpec
Attributes:
- ``name``
- ``type``
- ``base_type``
- ``is_array``
- ``array_len``
- ``is_builtin``
- ``is_header``
"""
def __init__(self, name, type):
self.name = name
self.type = type
(self.base_type, self.is_array, self.array_len) = parse_type(type)
self.is_header = is_header_type(self.type)
self.is_builtin = is_builtin(self.base_type)
def __eq__(self, other):
if not isinstance(other, Field):
return False
else:
return self.name == other.name and \
self.type == other.type
def __repr__(self):
return "[%s, %s, %s, %s, %s]"%(self.name, self.type, self.base_type, self.is_array, self.array_len)
class MsgSpec(object):
"""
Container class for storing loaded msg description files. Field
types and names are stored in separate lists with 1-to-1
correspondence. MsgSpec can also return an md5 of the source text.
"""
def __init__(self, types, names, constants, text, full_name, package = '', short_name = ''):
"""
:param types: list of field types, in order of declaration, ``[str]``
:param names: list of field names, in order of declaration, ``[str]``
:param constants: List of :class:`Constant` declarations, ``[Constant]``
:param text: text of declaration, ``str``
:raises: :exc:`InvalidMsgSpec` If spec is invalid (e.g. fields with the same name)
"""
alt_package, alt_short_name = package_resource_name(full_name)
if not package:
package = alt_package
if not short_name:
short_name = alt_short_name
self.types = types
if len(set(names)) != len(names):
raise InvalidMsgSpec("Duplicate field names in message: %s"%names)
self.names = names
self.constants = constants
assert len(self.types) == len(self.names), "len(%s) != len(%s)"%(self.types, self.names)
#Header.msg support
if (len(self.types)):
self.header_present = self.types[0] == HEADER_FULL_NAME and self.names[0] == 'header'
else:
self.header_present = False
self.text = text
self.full_name = full_name
self.short_name = short_name
self.package = package
try:
self._parsed_fields = [Field(name, type) for (name, type) in zip(self.names, self.types)]
except ValueError as e:
raise InvalidMsgSpec("invalid field: %s"%(e))
def fields(self):
"""
:returns: zip list of types and names (e.g. [('int32', 'x'), ('int32', 'y')], ``[(str,str),]``
"""
return list(zip(self.types, self.names)) #py3k
def parsed_fields(self):
"""
:returns: list of :class:`Field` classes, ``[Field,]``
"""
return self._parsed_fields
def has_header(self):
"""
:returns: ``True`` if msg decription contains a 'Header header'
declaration at the beginning, ``bool``
"""
return self.header_present
def __eq__(self, other):
if not other or not isinstance(other, MsgSpec):
return False
return self.types == other.types and self.names == other.names and \
self.constants == other.constants and self.text == other.text and \
self.full_name == other.full_name and self.short_name == other.short_name and \
self.package == other.package
def __ne__(self, other):
if not other or not isinstance(other, MsgSpec):
return True
return not self.__eq__(other)
def __repr__(self):
if self.constants:
return "MsgSpec[%s, %s, %s]"%(repr(self.constants), repr(self.types), repr(self.names))
else:
return "MsgSpec[%s, %s]"%(repr(self.types), repr(self.names))
def __str__(self):
return self.text
# .msg file routines ##############################################################
# adjustable constants, in case we change our minds
HEADER = 'Header'
TIME = 'time'
DURATION = 'duration'
HEADER_FULL_NAME = 'std_msgs/Header'
def is_header_type(msg_type):
"""
:param msg_type: message type name, ``str``
:returns: ``True`` if *msg_type* refers to the ROS Header type, ``bool``
"""
# for backwards compatibility, include roslib/Header. REP 100
return msg_type in [HEADER, HEADER_FULL_NAME, 'roslib/Header']
# time and duration types are represented as aggregate data structures
# for the purposes of serialization from the perspective of
# roslib.msgs. genmsg_py will do additional special handling is required
# to convert them into rospy.msg.Time/Duration instances.
## time as msg spec. time is unsigned
TIME_MSG = "uint32 secs\nuint32 nsecs"
## duration as msg spec. duration is just like time except signed
DURATION_MSG = "int32 secs\nint32 nsecs"
## primitive types are those for which we allow constants, i.e. have primitive representation
PRIMITIVE_TYPES = ['int8','uint8','int16','uint16','int32','uint32','int64','uint64','float32','float64',
'string',
'bool',
# deprecated:
'char','byte']
BUILTIN_TYPES = PRIMITIVE_TYPES + [TIME, DURATION]
def is_builtin(msg_type_name):
"""
:param msg_type_name: name of message type, ``str``
:returns: True if msg_type_name is a builtin/primitive type, ``bool``
"""
return msg_type_name in BUILTIN_TYPES
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineExtensionImagesOperations(object):
"""VirtualMachineExtensionImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-30"
self.config = config
def get(
self, location, publisher_name, type, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineExtensionImage or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.compute.v2016_03_30.models.VirtualMachineExtensionImage or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineExtensionImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_types(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineExtensionImage]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_versions(
self, location, publisher_name, type, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineExtensionImage]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc.
# @author: Bo Link, VMware, Inc.
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.vshield.common import (
constants as vcns_const)
from neutron.plugins.vmware.vshield.common.constants import RouterStatus
from neutron.plugins.vmware.vshield.common import exceptions
from neutron.plugins.vmware.vshield.tasks.constants import TaskState
from neutron.plugins.vmware.vshield.tasks.constants import TaskStatus
from neutron.plugins.vmware.vshield.tasks import tasks
LOG = logging.getLogger(__name__)
class EdgeApplianceDriver(object):
def __init__(self):
# store the last task per edge that has the latest config
self.updated_task = {
'nat': {},
'route': {},
}
def _assemble_edge(self, name, appliance_size="compact",
deployment_container_id=None, datacenter_moid=None,
enable_aesni=True, hypervisor_assist=False,
enable_fips=False, remote_access=False):
edge = {
'name': name,
'fqdn': name,
'hypervisorAssist': hypervisor_assist,
'type': 'gatewayServices',
'enableAesni': enable_aesni,
'enableFips': enable_fips,
'cliSettings': {
'remoteAccess': remote_access
},
'appliances': {
'applianceSize': appliance_size
},
'vnics': {
'vnics': []
}
}
if deployment_container_id:
edge['appliances']['deploymentContainerId'] = (
deployment_container_id)
if datacenter_moid:
edge['datacenterMoid'] = datacenter_moid,
return edge
def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
appliance = {}
if resource_pool_id:
appliance['resourcePoolId'] = resource_pool_id
if datastore_id:
appliance['datastoreId'] = datastore_id
return appliance
def _assemble_edge_vnic(self, name, index, portgroup_id,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
enable_proxy_arp=False,
enable_send_redirects=True,
is_connected=True,
mtu=1500):
vnic = {
'index': index,
'name': name,
'type': type,
'portgroupId': portgroup_id,
'mtu': mtu,
'enableProxyArp': enable_proxy_arp,
'enableSendRedirects': enable_send_redirects,
'isConnected': is_connected
}
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'IpAddressesDto'
}
vnic['addressGroups'] = {
'addressGroups': [address_group]
}
return vnic
def _edge_status_to_level(self, status):
if status == 'GREEN':
status_level = RouterStatus.ROUTER_STATUS_ACTIVE
elif status in ('GREY', 'YELLOW'):
status_level = RouterStatus.ROUTER_STATUS_DOWN
else:
status_level = RouterStatus.ROUTER_STATUS_ERROR
return status_level
def _enable_loadbalancer(self, edge):
if not edge.get('featureConfigs') or (
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'loadbalancer_4.0',
'enabled': True})
def get_edge_status(self, edge_id):
try:
response = self.vcns.get_edge_status(edge_id)[1]
status_level = self._edge_status_to_level(
response['edgeStatus'])
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get edge status:\n%s"),
e.response)
status_level = RouterStatus.ROUTER_STATUS_ERROR
try:
desc = jsonutils.loads(e.response)
if desc.get('errorCode') == (
vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
status_level = RouterStatus.ROUTER_STATUS_DOWN
except ValueError:
LOG.exception(e.response)
return status_level
def get_edges_statuses(self):
edges_status_level = {}
edges = self._get_edges()
for edge in edges['edgePage'].get('data', []):
edge_id = edge['id']
status = edge['edgeStatus']
edges_status_level[edge_id] = self._edge_status_to_level(status)
return edges_status_level
def _update_interface(self, task):
edge_id = task.userdata['edge_id']
config = task.userdata['config']
LOG.debug(_("VCNS: start updating vnic %s"), config)
try:
self.vcns.update_interface(edge_id, config)
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n"
"%(response)s"), {
'config': config,
'response': e.response})
raise e
except Exception as e:
LOG.exception(_("VCNS: Failed to update vnic %d"),
config['index'])
raise e
return TaskStatus.COMPLETED
def update_interface(self, router_id, edge_id, index, network,
address=None, netmask=None, secondary=None,
jobdata=None):
LOG.debug(_("VCNS: update vnic %(index)d: %(addr)s %(netmask)s"), {
'index': index, 'addr': address, 'netmask': netmask})
if index == vcns_const.EXTERNAL_VNIC_INDEX:
name = vcns_const.EXTERNAL_VNIC_NAME
intf_type = 'uplink'
elif index == vcns_const.INTERNAL_VNIC_INDEX:
name = vcns_const.INTERNAL_VNIC_NAME
intf_type = 'internal'
else:
msg = _("Vnic %d currently not supported") % index
raise exceptions.VcnsGeneralException(msg)
config = self._assemble_edge_vnic(
name, index, network, address, netmask, secondary, type=intf_type)
userdata = {
'edge_id': edge_id,
'config': config,
'jobdata': jobdata
}
task_name = "update-interface-%s-%d" % (edge_id, index)
task = tasks.Task(task_name, router_id,
self._update_interface, userdata=userdata)
task.add_result_monitor(self.callbacks[0].interface_update_result)
self.task_manager.add(task)
return task
def _deploy_edge(self, task):
userdata = task.userdata
name = userdata['router_name']
LOG.debug(_("VCNS: start deploying edge %s"), name)
request = userdata['request']
try:
header = self.vcns.deploy_edge(request)[0]
objuri = header['location']
job_id = objuri[objuri.rfind("/") + 1:]
response = self.vcns.get_edge_id(job_id)[1]
edge_id = response['edgeId']
LOG.debug(_("VCNS: deploying edge %s"), edge_id)
userdata['edge_id'] = edge_id
status = TaskStatus.PENDING
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: deploy edge failed for router %s."),
name)
raise e
return status
def _status_edge(self, task):
edge_id = task.userdata['edge_id']
try:
response = self.vcns.get_edge_deploy_status(edge_id)[1]
task.userdata['retries'] = 0
system_status = response.get('systemStatus', None)
if system_status is None:
status = TaskStatus.PENDING
elif system_status == 'good':
status = TaskStatus.COMPLETED
else:
status = TaskStatus.ERROR
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Edge %s status query failed."), edge_id)
raise e
except Exception as e:
retries = task.userdata.get('retries', 0) + 1
if retries < 3:
task.userdata['retries'] = retries
msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. "
"Retry %(retries)d.") % {
'edge_id': edge_id,
'retries': retries}
LOG.exception(msg)
status = TaskStatus.PENDING
else:
msg = _("VCNS: Unable to retrieve edge %s status. "
"Abort.") % edge_id
LOG.exception(msg)
status = TaskStatus.ERROR
LOG.debug(_("VCNS: Edge %s status"), edge_id)
return status
def _result_edge(self, task):
router_name = task.userdata['router_name']
edge_id = task.userdata.get('edge_id')
if task.status != TaskStatus.COMPLETED:
LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s "
"for %(name)s, status %(status)d"), {
'edge_id': edge_id,
'name': router_name,
'status': task.status
})
else:
LOG.debug(_("VCNS: Edge %(edge_id)s deployed for "
"router %(name)s"), {
'edge_id': edge_id, 'name': router_name
})
def _delete_edge(self, task):
edge_id = task.userdata['edge_id']
LOG.debug(_("VCNS: start destroying edge %s"), edge_id)
status = TaskStatus.COMPLETED
if edge_id:
try:
self.vcns.delete_edge(edge_id)
except exceptions.ResourceNotFound:
pass
except exceptions.VcnsApiException as e:
msg = _("VCNS: Failed to delete %(edge_id)s:\n"
"%(response)s") % {
'edge_id': edge_id, 'response': e.response}
LOG.exception(msg)
status = TaskStatus.ERROR
except Exception:
LOG.exception(_("VCNS: Failed to delete %s"), edge_id)
status = TaskStatus.ERROR
return status
def _get_edges(self):
try:
return self.vcns.get_edges()[1]
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response)
raise e
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
wait_for_exec=False, loadbalancer_enable=True):
task_name = 'deploying-%s' % name
edge_name = name
edge = self._assemble_edge(
edge_name, datacenter_moid=self.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size='large', remote_access=True)
appliance = self._assemble_edge_appliance(self.resource_pool_id,
self.datastore_id)
if appliance:
edge['appliances']['appliances'] = [appliance]
vnic_external = self._assemble_edge_vnic(
vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
self.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
vnic_inside = self._assemble_edge_vnic(
vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
internal_network,
vcns_const.INTEGRATION_EDGE_IPADDRESS,
vcns_const.INTEGRATION_SUBNET_NETMASK,
type="internal")
edge['vnics']['vnics'].append(vnic_inside)
if loadbalancer_enable:
self._enable_loadbalancer(edge)
userdata = {
'request': edge,
'router_name': name,
'jobdata': jobdata
}
task = tasks.Task(task_name, router_id,
self._deploy_edge,
status_callback=self._status_edge,
result_callback=self._result_edge,
userdata=userdata)
task.add_executed_monitor(self.callbacks[0].edge_deploy_started)
task.add_result_monitor(self.callbacks[0].edge_deploy_result)
self.task_manager.add(task)
if wait_for_exec:
# waitl until the deploy task is executed so edge_id is available
task.wait(TaskState.EXECUTED)
return task
def delete_edge(self, router_id, edge_id, jobdata=None):
task_name = 'delete-%s' % edge_id
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'jobdata': jobdata
}
task = tasks.Task(task_name, router_id, self._delete_edge,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].edge_delete_result)
self.task_manager.add(task)
return task
def _assemble_nat_rule(self, action, original_address,
translated_address,
vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
enabled=True):
nat_rule = {}
nat_rule['action'] = action
nat_rule['vnic'] = vnic_index
nat_rule['originalAddress'] = original_address
nat_rule['translatedAddress'] = translated_address
nat_rule['enabled'] = enabled
return nat_rule
def get_nat_config(self, edge_id):
try:
return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get nat config:\n%s"),
e.response)
raise e
def _create_nat_rule(self, task):
# TODO(fank): use POST for optimization
# return rule_id for future reference
rule = task.userdata['rule']
LOG.debug(_("VCNS: start creating nat rules: %s"), rule)
edge_id = task.userdata['edge_id']
nat = self.get_nat_config(edge_id)
location = task.userdata['location']
del nat['version']
if location is None or location == vcns_const.APPEND:
nat['rules']['natRulesDtos'].append(rule)
else:
nat['rules']['natRulesDtos'].insert(location, rule)
try:
self.vcns.update_nat_config(edge_id, nat)
status = TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = TaskStatus.ERROR
return status
def create_snat_rule(self, router_id, edge_id, src, translated,
jobdata=None, location=None):
LOG.debug(_("VCNS: create snat rule %(src)s/%(translated)s"), {
'src': src, 'translated': translated})
snat_rule = self._assemble_nat_rule("snat", src, translated)
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'rule': snat_rule,
'location': location,
'jobdata': jobdata
}
task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated)
task = tasks.Task(task_name, router_id, self._create_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].snat_create_result)
self.task_manager.add(task)
return task
def _delete_nat_rule(self, task):
# TODO(fank): pass in rule_id for optimization
# handle routes update for optimization
edge_id = task.userdata['edge_id']
address = task.userdata['address']
addrtype = task.userdata['addrtype']
LOG.debug(_("VCNS: start deleting %(type)s rules: %(addr)s"), {
'type': addrtype, 'addr': address})
nat = self.get_nat_config(edge_id)
del nat['version']
status = TaskStatus.COMPLETED
for nat_rule in nat['rules']['natRulesDtos']:
if nat_rule[addrtype] == address:
rule_id = nat_rule['ruleId']
try:
self.vcns.delete_nat_rule(edge_id, rule_id)
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to delete snat rule:\n"
"%s"), e.response)
status = TaskStatus.ERROR
return status
def delete_snat_rule(self, router_id, edge_id, src, jobdata=None):
LOG.debug(_("VCNS: delete snat rule %s"), src)
userdata = {
'edge_id': edge_id,
'address': src,
'addrtype': 'originalAddress',
'jobdata': jobdata
}
task_name = "delete-snat-%s-%s" % (edge_id, src)
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].snat_delete_result)
self.task_manager.add(task)
return task
def create_dnat_rule(self, router_id, edge_id, dst, translated,
jobdata=None, location=None):
# TODO(fank): use POST for optimization
# return rule_id for future reference
LOG.debug(_("VCNS: create dnat rule %(dst)s/%(translated)s"), {
'dst': dst, 'translated': translated})
dnat_rule = self._assemble_nat_rule(
"dnat", dst, translated)
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'rule': dnat_rule,
'location': location,
'jobdata': jobdata
}
task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated)
task = tasks.Task(task_name, router_id, self._create_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].dnat_create_result)
self.task_manager.add(task)
return task
def delete_dnat_rule(self, router_id, edge_id, translated,
jobdata=None):
# TODO(fank): pass in rule_id for optimization
LOG.debug(_("VCNS: delete dnat rule %s"), translated)
userdata = {
'edge_id': edge_id,
'address': translated,
'addrtype': 'translatedAddress',
'jobdata': jobdata
}
task_name = "delete-dnat-%s-%s" % (edge_id, translated)
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].dnat_delete_result)
self.task_manager.add(task)
return task
def _update_nat_rule(self, task):
# TODO(fank): use POST for optimization
# return rule_id for future reference
edge_id = task.userdata['edge_id']
if task != self.updated_task['nat'][edge_id]:
# this task does not have the latest config, abort now
# for speedup
return TaskStatus.ABORT
rules = task.userdata['rules']
LOG.debug(_("VCNS: start updating nat rules: %s"), rules)
nat = {
'featureType': 'nat',
'rules': {
'natRulesDtos': rules
}
}
try:
self.vcns.update_nat_config(edge_id, nat)
status = TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = TaskStatus.ERROR
return status
def update_nat_rules(self, router_id, edge_id, snats, dnats,
jobdata=None):
LOG.debug(_("VCNS: update nat rule\n"
"SNAT:%(snat)s\n"
"DNAT:%(dnat)s\n"), {
'snat': snats, 'dnat': dnats})
nat_rules = []
for dnat in dnats:
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated']))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst']))
for snat in snats:
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated']))
userdata = {
'edge_id': edge_id,
'rules': nat_rules,
'jobdata': jobdata,
}
task_name = "update-nat-%s" % edge_id
task = tasks.Task(task_name, router_id, self._update_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].nat_update_result)
self.updated_task['nat'][edge_id] = task
self.task_manager.add(task)
return task
def _update_routes(self, task):
edge_id = task.userdata['edge_id']
if (task != self.updated_task['route'][edge_id] and
task.userdata.get('skippable', True)):
# this task does not have the latest config, abort now
# for speedup
return TaskStatus.ABORT
gateway = task.userdata['gateway']
routes = task.userdata['routes']
LOG.debug(_("VCNS: start updating routes for %s"), edge_id)
static_routes = []
for route in routes:
static_routes.append({
"description": "",
"vnic": vcns_const.INTERNAL_VNIC_INDEX,
"network": route['cidr'],
"nextHop": route['nexthop']
})
request = {
"staticRoutes": {
"staticRoutes": static_routes
}
}
if gateway:
request["defaultRoute"] = {
"description": "default-gateway",
"gatewayAddress": gateway,
"vnic": vcns_const.EXTERNAL_VNIC_INDEX
}
try:
self.vcns.update_routes(edge_id, request)
status = TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to update routes:\n%s"),
e.response)
status = TaskStatus.ERROR
return status
def update_routes(self, router_id, edge_id, gateway, routes,
skippable=True, jobdata=None):
if gateway:
gateway = gateway.split('/')[0]
userdata = {
'edge_id': edge_id,
'gateway': gateway,
'routes': routes,
'skippable': skippable,
'jobdata': jobdata
}
task_name = "update-routes-%s" % (edge_id)
task = tasks.Task(task_name, router_id, self._update_routes,
userdata=userdata)
task.add_result_monitor(self.callbacks[0].routes_update_result)
self.updated_task['route'][edge_id] = task
self.task_manager.add(task)
return task
def create_lswitch(self, name, tz_config, tags=None,
port_isolation=False, replication_mode="service"):
lsconfig = {
'display_name': utils.check_and_truncate(name),
"tags": tags or [],
"type": "LogicalSwitchConfig",
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
"transport_zones": tz_config
}
if port_isolation is bool:
lsconfig["port_isolation_enabled"] = port_isolation
if replication_mode:
lsconfig["replication_mode"] = replication_mode
response = self.vcns.create_lswitch(lsconfig)[1]
return response
def delete_lswitch(self, lswitch_id):
self.vcns.delete_lswitch(lswitch_id)
def get_loadbalancer_config(self, edge_id):
try:
header, response = self.vcns.get_loadbalancer_config(
edge_id)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get service config"))
return response
def enable_service_loadbalancer(self, edge_id):
config = self.get_loadbalancer_config(
edge_id)
if not config['enabled']:
config['enabled'] = True
try:
self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to enable loadbalancer "
"service config"))
|
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the basic, interface-agnostic workflow for importing and
autotagging music files.
"""
from __future__ import print_function
import os
import logging
import pickle
import itertools
from collections import defaultdict
from beets import autotag
from beets import library
from beets import dbcore
from beets import plugins
from beets import util
from beets import config
from beets.util import pipeline
from beets.util import syspath, normpath, displayable_path
from beets.util.enumeration import enum
from beets import mediafile
action = enum(
'SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID',
'ALBUMS', name='action'
)
QUEUE_SIZE = 128
SINGLE_ARTIST_THRESH = 0.25
VARIOUS_ARTISTS = u'Various Artists'
# Global logger.
log = logging.getLogger('beets')
class ImportAbort(Exception):
"""Raised when the user aborts the tagging operation.
"""
pass
# Utilities.
def _duplicate_check(lib, task):
"""Check whether an album already exists in the library. Returns a
list of Album objects (empty if no duplicates are found).
"""
assert task.choice_flag in (action.ASIS, action.APPLY)
artist, album = task.chosen_ident()
if artist is None:
# As-is import with no artist. Skip check.
return []
found_albums = []
cur_paths = set(i.path for i in task.items if i)
for album_cand in lib.albums(dbcore.MatchQuery('albumartist', artist)):
if album_cand.album == album:
# Check whether the album is identical in contents, in which
# case it is not a duplicate (will be replaced).
other_paths = set(i.path for i in album_cand.items())
if other_paths == cur_paths:
continue
found_albums.append(album_cand)
return found_albums
def _item_duplicate_check(lib, task):
"""Check whether an item already exists in the library. Returns a
list of Item objects.
"""
assert task.choice_flag in (action.ASIS, action.APPLY)
artist, title = task.chosen_ident()
found_items = []
query = dbcore.AndQuery((
dbcore.MatchQuery('artist', artist),
dbcore.MatchQuery('title', title),
))
for other_item in lib.items(query):
# Existing items not considered duplicates.
if other_item.path == task.item.path:
continue
found_items.append(other_item)
return found_items
def _infer_album_fields(task):
"""Given an album and an associated import task, massage the
album-level metadata. This ensures that the album artist is set
and that the "compilation" flag is set automatically.
"""
assert task.is_album
assert task.items
changes = {}
if task.choice_flag == action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_albumartist, freq = util.plurality(
[i.albumartist or i.artist for i in task.items])
if freq == len(task.items) or (freq > 1 and
float(freq) / len(task.items) >= SINGLE_ARTIST_THRESH):
# Single-artist album.
changes['albumartist'] = plur_albumartist
changes['comp'] = False
else:
# VA.
changes['albumartist'] = VARIOUS_ARTISTS
changes['comp'] = True
elif task.choice_flag == action.APPLY:
# Applying autotagged metadata. Just get AA from the first
# item.
for item in task.items:
if item is not None:
first_item = item
break
else:
assert False, "all items are None"
if not first_item.albumartist:
changes['albumartist'] = first_item.artist
if not first_item.mb_albumartistid:
changes['mb_albumartistid'] = first_item.mb_artistid
else:
assert False
# Apply new metadata.
for item in task.items:
if item is not None:
for k, v in changes.iteritems():
setattr(item, k, v)
def _resume():
"""Check whether an import should resume and return a boolean or the
string 'ask' indicating that the user should be queried.
"""
return config['import']['resume'].as_choice([True, False, 'ask'])
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
with open(config['statefile'].as_filename()) as f:
return pickle.load(f)
except (IOError, EOFError):
return {}
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
with open(config['statefile'].as_filename(), 'w') as f:
pickle.dump(state, f)
except IOError as exc:
log.error(u'state file could not be written: %s' % unicode(exc))
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
PROGRESS_KEY = 'tagprogress'
def progress_set(toppath, paths):
"""Record that tagging for the given `toppath` was successful up to
`paths`. If paths is None, then clear the progress value (indicating
that the tagging completed).
"""
state = _open_state()
if PROGRESS_KEY not in state:
state[PROGRESS_KEY] = {}
if paths is None:
# Remove progress from file.
if toppath in state[PROGRESS_KEY]:
del state[PROGRESS_KEY][toppath]
else:
state[PROGRESS_KEY][toppath] = paths
_save_state(state)
def progress_get(toppath):
"""Get the last successfully tagged subpath of toppath. If toppath
has no progress information, returns None.
"""
state = _open_state()
if PROGRESS_KEY not in state:
return None
return state[PROGRESS_KEY].get(toppath)
# Similarly, utilities for manipulating the "incremental" import log.
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
HISTORY_KEY = 'taghistory'
def history_add(paths):
"""Indicate that the import of the album in `paths` is completed and
should not be repeated in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
state[HISTORY_KEY] = set()
state[HISTORY_KEY].add(tuple(paths))
_save_state(state)
def history_get():
"""Get the set of completed path tuples in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
return set()
return state[HISTORY_KEY]
# Abstract session class.
class ImportSession(object):
"""Controls an import action. Subclasses should implement methods to
communicate with the user or otherwise make decisions.
"""
def __init__(self, lib, logfile, paths, query):
"""Create a session. `lib` is a Library object. `logfile` is a
file-like object open for writing or None if no logging is to be
performed. Either `paths` or `query` is non-null and indicates
the source of files to be imported.
"""
self.lib = lib
self.logfile = logfile
self.paths = paths
self.query = query
# Normalize the paths.
if self.paths:
self.paths = map(normpath, self.paths)
def _amend_config(self):
"""Make implied changes the importer configuration.
"""
# FIXME: Maybe this function should not exist and should instead
# provide "decision wrappers" like "should_resume()", etc.
iconfig = config['import']
# Incremental and progress are mutually exclusive.
if iconfig['incremental']:
iconfig['resume'] = False
# When based on a query instead of directories, never
# save progress or try to resume.
if self.query is not None:
iconfig['resume'] = False
iconfig['incremental'] = False
# Copy and move are mutually exclusive.
if iconfig['move']:
iconfig['copy'] = False
# Only delete when copying.
if not iconfig['copy']:
iconfig['delete'] = False
def tag_log(self, status, paths):
"""Log a message about a given album to logfile. The status should
reflect the reason the album couldn't be tagged.
"""
if self.logfile:
print(u'{0} {1}'.format(status, displayable_path(paths)),
file=self.logfile)
self.logfile.flush()
def log_choice(self, task, duplicate=False):
"""Logs the task's current choice if it should be logged. If
``duplicate``, then this is a secondary choice after a duplicate was
detected and a decision was made.
"""
paths = task.paths if task.is_album else [task.item.path]
if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump).
if task.remove_duplicates:
self.tag_log('duplicate-replace', paths)
elif task.choice_flag in (action.ASIS, action.APPLY):
self.tag_log('duplicate-keep', paths)
elif task.choice_flag is (action.SKIP):
self.tag_log('duplicate-skip', paths)
else:
# Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is action.ASIS:
self.tag_log('asis', paths)
elif task.choice_flag is action.SKIP:
self.tag_log('skip', paths)
def should_resume(self, path):
raise NotImplementedError
def choose_match(self, task):
raise NotImplementedError
def resolve_duplicate(self, task):
raise NotImplementedError
def choose_item(self, task):
raise NotImplementedError
def run(self):
"""Run the import task.
"""
self._amend_config()
# Set up the pipeline.
if self.query is None:
stages = [read_tasks(self)]
else:
stages = [query_tasks(self)]
if config['import']['singletons']:
# Singleton importer.
if config['import']['autotag']:
stages += [item_lookup(self), item_query(self)]
else:
stages += [item_progress(self)]
else:
# Whole-album importer.
if config['import']['group_albums']:
# Split directory tasks into one task for each album
stages += [group_albums(self)]
if config['import']['autotag']:
# Only look up and query the user when autotagging.
stages += [initial_lookup(self), user_query(self)]
else:
# When not autotagging, just display progress.
stages += [show_progress(self)]
stages += [apply_choices(self)]
for stage_func in plugins.import_stages():
stages.append(plugin_stage(self, stage_func))
stages += [manipulate_files(self)]
stages += [finalize(self)]
pl = pipeline.Pipeline(stages)
# Run the pipeline.
try:
if config['threaded']:
pl.run_parallel(QUEUE_SIZE)
else:
pl.run_sequential()
except ImportAbort:
# User aborted operation. Silently stop.
pass
# The importer task class.
class ImportTask(object):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
"""
def __init__(self, toppath=None, paths=None, items=None):
self.toppath = toppath
self.paths = paths
self.items = items
self.sentinel = False
self.remove_duplicates = False
self.is_album = True
self.choice_flag = None
@classmethod
def done_sentinel(cls, toppath):
"""Create an ImportTask that indicates the end of a top-level
directory import.
"""
obj = cls(toppath)
obj.sentinel = True
return obj
@classmethod
def progress_sentinel(cls, toppath, paths):
"""Create a task indicating that a single directory in a larger
import has finished. This is only required for singleton
imports; progress is implied for album imports.
"""
obj = cls(toppath, paths)
obj.sentinel = True
return obj
@classmethod
def item_task(cls, item):
"""Creates an ImportTask for a single item."""
obj = cls()
obj.item = item
obj.is_album = False
return obj
def set_candidates(self, cur_artist, cur_album, candidates, rec):
"""Sets the candidates for this album matched by the
`autotag.tag_album` method.
"""
assert self.is_album
assert not self.sentinel
self.cur_artist = cur_artist
self.cur_album = cur_album
self.candidates = candidates
self.rec = rec
def set_null_candidates(self):
"""Set the candidates to indicate no album match was found.
"""
self.cur_artist = None
self.cur_album = None
self.candidates = None
self.rec = None
def set_item_candidates(self, candidates, rec):
"""Set the match for a single-item task."""
assert not self.is_album
assert self.item is not None
self.candidates = candidates
self.rec = rec
def set_choice(self, choice):
"""Given an AlbumMatch or TrackMatch object or an action constant,
indicates that an action has been selected for this task.
"""
assert not self.sentinel
# Not part of the task structure:
assert choice not in (action.MANUAL, action.MANUAL_ID)
assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS):
self.choice_flag = choice
self.match = None
else:
if self.is_album:
assert isinstance(choice, autotag.AlbumMatch)
else:
assert isinstance(choice, autotag.TrackMatch)
self.choice_flag = action.APPLY # Implicit choice.
self.match = choice
def save_progress(self):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.sentinel and self.paths is None:
# "Done" sentinel.
progress_set(self.toppath, None)
elif self.sentinel or self.is_album:
# "Directory progress" sentinel for singletons or a real
# album task, which implies the same.
progress_set(self.toppath, self.paths)
def save_history(self):
"""Save the directory in the history for incremental imports.
"""
if self.is_album and self.paths and not self.sentinel:
history_add(self.paths)
# Logical decisions.
def should_write_tags(self):
"""Should new info be written to the files' metadata?"""
if self.choice_flag == action.APPLY:
return True
elif self.choice_flag in (action.ASIS, action.TRACKS, action.SKIP):
return False
else:
assert False
def should_skip(self):
"""After a choice has been made, returns True if this is a
sentinel or it has been marked for skipping.
"""
return self.sentinel or self.choice_flag == action.SKIP
# Convenient data.
def chosen_ident(self):
"""Returns identifying metadata about the current choice. For
albums, this is an (artist, album) pair. For items, this is
(artist, title). May only be called when the choice flag is ASIS
(in which case the data comes from the files' current metadata)
or APPLY (data comes from the choice).
"""
assert self.choice_flag in (action.ASIS, action.APPLY)
if self.is_album:
if self.choice_flag is action.ASIS:
return (self.cur_artist, self.cur_album)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.album)
else:
if self.choice_flag is action.ASIS:
return (self.item.artist, self.item.title)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.title)
def imported_items(self):
"""Return a list of Items that should be added to the library.
If this is an album task, return the list of items in the
selected match or everything if the choice is ASIS. If this is a
singleton task, return a list containing the item.
"""
if self.is_album:
if self.choice_flag == action.ASIS:
return list(self.items)
elif self.choice_flag == action.APPLY:
return self.match.mapping.keys()
else:
assert False
else:
return [self.item]
# Utilities.
def prune(self, filename):
"""Prune any empty directories above the given file. If this
task has no `toppath` or the file path provided is not within
the `toppath`, then this function has no effect. Similarly, if
the file still exists, no pruning is performed, so it's safe to
call when the file in question may not have been removed.
"""
if self.toppath and not os.path.exists(filename):
util.prune_dirs(os.path.dirname(filename),
self.toppath,
clutter=config['clutter'].as_str_seq())
# Full-album pipeline stages.
def read_tasks(session):
"""A generator yielding all the albums (as ImportTask objects) found
in the user-specified list of paths. In the case of a singleton
import, yields single-item tasks instead.
"""
# Look for saved progress.
if _resume():
resume_dirs = {}
for path in session.paths:
resume_dir = progress_get(path)
if resume_dir:
# Either accept immediately or prompt for input to decide.
if _resume() is True:
do_resume = True
log.warn('Resuming interrupted import of %s' % path)
else:
do_resume = session.should_resume(path)
if do_resume:
resume_dirs[path] = resume_dir
else:
# Clear progress; we're starting from the top.
progress_set(path, None)
# Look for saved incremental directories.
if config['import']['incremental']:
incremental_skipped = 0
history_dirs = history_get()
for toppath in session.paths:
# Check whether the path is to a file.
if config['import']['singletons'] and \
not os.path.isdir(syspath(toppath)):
try:
item = library.Item.from_path(toppath)
except mediafile.UnreadableFileError:
log.warn(u'unreadable file: {0}'.format(
util.displayable_path(toppath)
))
continue
yield ImportTask.item_task(item)
continue
# A flat album import merges all items into one album.
if config['import']['flat'] and not config['import']['singletons']:
all_items = []
for _, items in autotag.albums_in_dir(toppath):
all_items += items
yield ImportTask(toppath, toppath, all_items)
yield ImportTask.done_sentinel(toppath)
continue
# Produce paths under this directory.
if _resume():
resume_dir = resume_dirs.get(toppath)
for path, items in autotag.albums_in_dir(toppath):
# Skip according to progress.
if _resume() and resume_dir:
# We're fast-forwarding to resume a previous tagging.
if path == resume_dir:
# We've hit the last good path! Turn off the
# fast-forwarding.
resume_dir = None
continue
# When incremental, skip paths in the history.
if config['import']['incremental'] and tuple(path) in history_dirs:
log.debug(u'Skipping previously-imported path: %s' %
displayable_path(path))
incremental_skipped += 1
continue
# Yield all the necessary tasks.
if config['import']['singletons']:
for item in items:
yield ImportTask.item_task(item)
yield ImportTask.progress_sentinel(toppath, path)
else:
yield ImportTask(toppath, path, items)
# Indicate the directory is finished.
yield ImportTask.done_sentinel(toppath)
# Show skipped directories.
if config['import']['incremental'] and incremental_skipped:
log.info(u'Incremental import: skipped %i directories.' %
incremental_skipped)
def query_tasks(session):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
match items from the library.
"""
if config['import']['singletons']:
# Search for items.
for item in session.lib.items(session.query):
yield ImportTask.item_task(item)
else:
# Search for albums.
for album in session.lib.albums(session.query):
log.debug('yielding album %i: %s - %s' %
(album.id, album.albumartist, album.album))
items = list(album.items())
yield ImportTask(None, [album.item_dir()], items)
def initial_lookup(session):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
(items, cur_artist, cur_album, candidates, rec) tuples. If no match
is found, all of the yielded parameters (except items) are None.
"""
task = None
while True:
task = yield task
if task.should_skip():
continue
plugins.send('import_task_start', session=session, task=task)
log.debug('Looking up: %s' % displayable_path(task.paths))
task.set_candidates(
*autotag.tag_album(task.items)
)
def user_query(session):
"""A coroutine for interfacing with the user about the tagging
process.
The coroutine accepts an ImportTask objects. It uses the
session's ``choose_match`` method to determine the ``action`` for
this task. Depending on the action additional stages are exectuted
and the processed task is yielded.
It emits the ``import_task_choice`` event for plugins. Plugins have
acces to the choice via the ``taks.choice_flag`` property and may
choose to change it.
"""
recent = set()
task = None
while True:
task = yield task
if task.should_skip():
continue
# Ask the user for a choice.
choice = session.choose_match(task)
task.set_choice(choice)
session.log_choice(task)
plugins.send('import_task_choice', session=session, task=task)
# As-tracks: transition to singleton workflow.
if task.choice_flag is action.TRACKS:
# Set up a little pipeline for dealing with the singletons.
def emitter(task):
for item in task.items:
yield ImportTask.item_task(item)
yield ImportTask.progress_sentinel(task.toppath, task.paths)
ipl = pipeline.Pipeline([
emitter(task),
item_lookup(session),
item_query(session),
])
task = pipeline.multiple(ipl.pull())
continue
# As albums: group items by albums and create task for each album
if task.choice_flag is action.ALBUMS:
def emitter(task):
yield task
ipl = pipeline.Pipeline([
emitter(task),
group_albums(session),
initial_lookup(session),
user_query(session)
])
task = pipeline.multiple(ipl.pull())
continue
# Check for duplicates if we have a match (or ASIS).
if task.choice_flag in (action.ASIS, action.APPLY):
ident = task.chosen_ident()
# The "recent" set keeps track of identifiers for recently
# imported albums -- those that haven't reached the database
# yet.
if ident in recent or _duplicate_check(session.lib, task):
session.resolve_duplicate(task)
session.log_choice(task, True)
recent.add(ident)
def show_progress(session):
"""This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging. It displays the album
name and artist as the files are added.
"""
task = None
while True:
task = yield task
if task.should_skip():
continue
log.info(displayable_path(task.paths))
# Behave as if ASIS were selected.
task.set_null_candidates()
task.set_choice(action.ASIS)
def apply_choices(session):
"""A coroutine for applying changes to albums and singletons during
the autotag process.
"""
task = None
while True:
task = yield task
if task.should_skip():
continue
items = task.imported_items()
# Clear IDs in case the items are being re-tagged.
for item in items:
item.id = None
item.album_id = None
# Change metadata.
if task.should_write_tags():
if task.is_album:
autotag.apply_metadata(
task.match.info, task.match.mapping
)
else:
autotag.apply_item_metadata(task.item, task.match.info)
plugins.send('import_task_apply', session=session, task=task)
# Infer album-level fields.
if task.is_album:
_infer_album_fields(task)
# Find existing item entries that these are replacing (for
# re-imports). Old album structures are automatically cleaned up
# when the last item is removed.
task.replaced_items = defaultdict(list)
for item in items:
dup_items = session.lib.items(
dbcore.query.BytesQuery('path', item.path)
)
for dup_item in dup_items:
task.replaced_items[item].append(dup_item)
log.debug('replacing item %i: %s' %
(dup_item.id, displayable_path(item.path)))
log.debug('%i of %i items replaced' % (len(task.replaced_items),
len(items)))
# Find old items that should be replaced as part of a duplicate
# resolution.
duplicate_items = []
if task.remove_duplicates:
if task.is_album:
for album in _duplicate_check(session.lib, task):
duplicate_items += album.items()
else:
duplicate_items = _item_duplicate_check(session.lib, task)
log.debug('removing %i old duplicated items' %
len(duplicate_items))
# Delete duplicate files that are located inside the library
# directory.
task.duplicate_paths = []
for duplicate_path in [i.path for i in duplicate_items]:
if session.lib.directory in util.ancestry(duplicate_path):
# Mark the path for deletion in the manipulate_files
# stage.
task.duplicate_paths.append(duplicate_path)
# Add items -- before path changes -- to the library. We add the
# items now (rather than at the end) so that album structures
# are in place before calls to destination().
with session.lib.transaction():
# Remove old items.
for replaced in task.replaced_items.itervalues():
for item in replaced:
item.remove()
for item in duplicate_items:
item.remove()
# Add new ones.
if task.is_album:
# Add an album.
album = session.lib.add_album(items)
task.album_id = album.id
else:
# Add tracks.
for item in items:
session.lib.add(item)
def plugin_stage(session, func):
"""A coroutine (pipeline stage) that calls the given function with
each non-skipped import task. These stages occur between applying
metadata changes and moving/copying/writing files.
"""
task = None
while True:
task = yield task
if task.should_skip():
continue
func(session, task)
# Stage may modify DB, so re-load cached item data.
for item in task.imported_items():
item.load()
def manipulate_files(session):
"""A coroutine (pipeline stage) that performs necessary file
manipulations *after* items have been added to the library.
"""
task = None
while True:
task = yield task
if task.should_skip():
continue
# Remove duplicate files marked for deletion.
if task.remove_duplicates:
for duplicate_path in task.duplicate_paths:
log.debug(u'deleting replaced duplicate %s' %
util.displayable_path(duplicate_path))
util.remove(duplicate_path)
util.prune_dirs(os.path.dirname(duplicate_path),
session.lib.directory)
# Move/copy/write files.
items = task.imported_items()
# Save the original paths of all items for deletion and pruning
# in the next step (finalization).
task.old_paths = [item.path for item in items]
for item in items:
if config['import']['move']:
# Just move the file.
item.move(False)
elif config['import']['copy']:
# If it's a reimport, move in-library files and copy
# out-of-library files. Otherwise, copy and keep track
# of the old path.
old_path = item.path
if task.replaced_items[item]:
# This is a reimport. Move in-library files and copy
# out-of-library files.
if session.lib.directory in util.ancestry(old_path):
item.move(False)
# We moved the item, so remove the
# now-nonexistent file from old_paths.
task.old_paths.remove(old_path)
else:
item.move(True)
else:
# A normal import. Just copy files and keep track of
# old paths.
item.move(True)
if config['import']['write'] and task.should_write_tags():
try:
item.write()
except mediafile.UnreadableFileError as exc:
log.error(u'error while writing ({0}): {0}'.format(
exc,
util.displayable_path(item.path)
))
except util.FilesystemError as exc:
exc.log(log)
# Save new paths.
with session.lib.transaction():
for item in items:
item.store()
# Plugin event.
plugins.send('import_task_files', session=session, task=task)
def finalize(session):
"""A coroutine that finishes up importer tasks. In particular, the
coroutine sends plugin events, deletes old files, and saves
progress. This is a "terminal" coroutine (it yields None).
"""
while True:
task = yield
if task.should_skip():
if _resume():
task.save_progress()
if config['import']['incremental']:
task.save_history()
continue
items = task.imported_items()
# Announce that we've added an album.
if task.is_album:
album = session.lib.get_album(task.album_id)
plugins.send('album_imported',
lib=session.lib, album=album)
else:
for item in items:
plugins.send('item_imported',
lib=session.lib, item=item)
# When copying and deleting originals, delete old files.
if config['import']['copy'] and config['import']['delete']:
new_paths = [os.path.realpath(item.path) for item in items]
for old_path in task.old_paths:
# Only delete files that were actually copied.
if old_path not in new_paths:
util.remove(syspath(old_path), False)
task.prune(old_path)
# When moving, prune empty directories containing the original
# files.
elif config['import']['move']:
for old_path in task.old_paths:
task.prune(old_path)
# Update progress.
if _resume():
task.save_progress()
if config['import']['incremental']:
task.save_history()
# Singleton pipeline stages.
def item_lookup(session):
"""A coroutine used to perform the initial MusicBrainz lookup for
an item task.
"""
task = None
while True:
task = yield task
if task.should_skip():
continue
plugins.send('import_task_start', session=session, task=task)
task.set_item_candidates(*autotag.tag_item(task.item))
def item_query(session):
"""A coroutine that queries the user for input on single-item
lookups.
"""
task = None
recent = set()
while True:
task = yield task
if task.should_skip():
continue
choice = session.choose_item(task)
task.set_choice(choice)
session.log_choice(task)
plugins.send('import_task_choice', session=session, task=task)
# Duplicate check.
if task.choice_flag in (action.ASIS, action.APPLY):
ident = task.chosen_ident()
if ident in recent or _item_duplicate_check(session.lib, task):
session.resolve_duplicate(task)
session.log_choice(task, True)
recent.add(ident)
def item_progress(session):
"""Skips the lookup and query stages in a non-autotagged singleton
import. Just shows progress.
"""
task = None
log.info('Importing items:')
while True:
task = yield task
if task.should_skip():
continue
log.info(displayable_path(task.item.path))
task.set_null_candidates()
task.set_choice(action.ASIS)
def group_albums(session):
"""Group the items of a task by albumartist and album name and create a new
task for each album. Yield the tasks as a multi message.
"""
def group(item):
return (item.albumartist or item.artist, item.album)
task = None
while True:
task = yield task
if task.should_skip():
continue
tasks = []
for _, items in itertools.groupby(task.items, group):
tasks.append(ImportTask(items=list(items)))
tasks.append(ImportTask.progress_sentinel(task.toppath, task.paths))
task = pipeline.multiple(tasks)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
# STDLIB
import io
import os
import sys
import textwrap
import warnings
# LOCAL
from . import exceptions
from . import tree
from astropy.utils.xml import iterparser
from astropy.utils import data
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate',
'reset_vo_warnings']
VERIFY_OPTIONS = ['ignore', 'warn', 'exception']
@deprecated_renamed_argument('pedantic', 'verify', since='5.0')
def parse(source, columns=None, invalid='exception', verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None,
table_id=None, filename=None, unit_format=None,
datatype_mapping=None, _debug_python_based_parser=False):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf
invalid = invalid.lower()
if invalid not in ('exception', 'mask'):
raise ValueError("accepted values of ``invalid`` are: "
"``'exception'`` or ``'mask'``.")
if verify is None:
conf_verify_lowercase = conf.verify.lower()
# We need to allow verify to be booleans as strings since the
# configuration framework doesn't make it easy/possible to have mixed
# types.
if conf_verify_lowercase in ['false', 'true']:
verify = conf_verify_lowercase == 'true'
else:
verify = conf_verify_lowercase
if isinstance(verify, bool):
verify = 'exception' if verify else 'warn'
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
'columns': columns,
'invalid': invalid,
'verify': verify,
'chunk_size': chunk_size,
'table_number': table_number,
'filename': filename,
'unit_format': unit_format,
'datatype_mapping': datatype_mapping
}
if filename is None and isinstance(source, str):
config['filename'] = source
with iterparser.get_xml_iterator(
source,
_debug_python_based_parser=_debug_python_based_parser) as iterator:
return tree.VOTableFile(
config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get('table_number') is None:
kwargs['table_number'] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance")
table.to_xml(file, tabledata_format=tabledata_format,
_debug_python_based_parser=True)
def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import print_code_line, color_print
return_as_str = False
if output is None:
output = io.StringIO()
return_as_str = True
lines = []
votable = None
reset_vo_warnings()
with data.get_readable_fileobj(source, encoding='binary') as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, 'name'):
filename = source.name
elif hasattr(source, 'url'):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify='warn', filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [str(x.message) for x in warning_lines if
issubclass(x.category, exceptions.VOWarning)] + lines
content_buffer.seek(0)
output.write(f"Validation report for {filename}\n\n")
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w['is_something']:
output.write(w['message'])
output.write('\n\n')
else:
line = xml_lines[w['nline'] - 1]
warning = w['warning']
if w['is_warning']:
color = 'yellow'
else:
color = 'red'
color_print(
f"{w['nline']:d}: ", '',
warning or 'EXC', color,
': ', '',
textwrap.fill(
w['message'],
initial_indent=' ',
subsequent_indent=' ').lstrip(),
file=output)
print_code_line(line, w['nchar'], file=output)
output.write('\n')
else:
output.write('astropy.io.votable found no violations.\n\n')
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(
filename, version)
if success != 0:
output.write(
'xmllint schema violations:\n\n')
output.write(stderr.decode('utf-8'))
else:
output.write('xmllint passed\n')
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : path-like or file-like
Path or file object containing a VOTABLE_ xml file.
If file, must be readable.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != 'xml':
return False
break
for start, tag, d, pos in iterator:
if tag != 'VOTABLE':
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
try:
del module.__warningregistry__
except AttributeError:
pass
|
|
##
# Copyright (c) 2015, David Jurgens
#
# All rights reserved. See LICENSE file for details
##
import codecs
import collections
import json
import operator
import random
import logging
from geopy.point import Point
from geopy import distance
import os.path
import itertools
import gzip
from multiprocessing import Process, Queue, cpu_count
from Queue import Full as QueueFull
from Queue import Empty as QueueEmpty
from gimethod import GIMethod, GIModel
import multiprocessing
logger = logging.getLogger(os.path.basename(__file__))
time_per_infer_user = 0
num_users_inferred = 0
time_per_geometric_median = 0
num_geometric_median = 0
class SpatialLabelPropagationModel(GIModel):
def __init__(self, user_id_to_location):
self.user_id_to_location = user_id_to_location
def infer_post_location(self, post):
if not "user" in post:
return None
user = post["user"]
if not "id" in user:
return None
user_id = user["id"]
# If we know this user's location, report their home location
if user_id in self.user_id_to_location:
return self.user_id_to_location[user_id]
else:
return None
def infer_posts_by_user(self, posts):
if len(posts) == 0:
return None
# Each post is assumed originate from the user's home location, so just
# infer the first post's location
home_location = self.infer_post_location(posts[0])
if home_location is None:
return None
# Then fill the array of post locations with the home location
locations = []
for i in range(0, len(posts)):
locations.append((home_location.latitude, home_location.longitude))
return locations
class SpatialLabelPropagation(GIMethod):
def __init__(self):
# Location is represented as a lat/lon geopy Point
self.user_id_to_location = {}
def train_model(self, setting, dataset, model_dir):
"""
Runs spatial label propagation (SLP) on the bi-directional @mention
network present in the dataset. The initial locations for SLP are
set by identifying individuals with at least five GPS-tagged posts
within 15km of each other.
"""
logger.debug('Loading mention network')
mention_network = dataset.bi_mention_network()
all_users = set(mention_network.nodes())
print('Loaded network with %d users and %d edges'
% (mention_network.__len__(), mention_network.size()))
# This dict will contain a mapping from each user ID associated with at
# least 5 posts within a 15km radius to the user's home location
print('Loading known user locations')
user_to_home_loc = {user: loc for (user, loc) in dataset.user_home_location_iter()}
print('Loaded gold-standard locations of %s users (%s)'
% (len(user_to_home_loc),
float(len(user_to_home_loc)) / len(all_users)))
# This dictionary is where we currently think a user is. The subset of
# users with known GPS-based home locations will always have their
# gold-standard location set in this dict (i.e., it's not an estimate)
user_to_estimated_location = {}
# Update the initial data with the gold standard data
user_to_estimated_location.update(user_to_home_loc)
# This dictionary is the next prediction of where we think a user is
# based on its neighbors. This dict is separate from the current
# estiamte to avoid mixing the two estimates during inference time.
user_to_next_estimated_location = {}
# TODO: make this configurable from the settings varaible
num_iterations = 5
#not sure if below line should be included
all_users = all_users | set(user_to_home_loc.keys())
num_users = len(all_users)
for iteration in range(0, num_iterations):
logger.debug('Beginning iteration %s' % iteration)
num_located_at_start = len(user_to_estimated_location)
num_processed = 0
for user_id in all_users:
self.update_user_location(user_id, mention_network,
user_to_home_loc,
user_to_estimated_location,
user_to_next_estimated_location)
num_processed += 1
if num_processed % 100000 == 0:
print('In iteration %d, processed %d users out of %d, located %d'
% (iteration, num_processed, num_users, len(user_to_next_estimated_location)))
num_located_at_end = len(user_to_next_estimated_location)
print('At end of iteraition %s, located %s users (%s new)' %
(iteration, num_located_at_end,
num_located_at_end - num_located_at_start))
# Replace all the old location estimates with what we estimated
# from this iteration
user_to_estimated_location.update(user_to_next_estimated_location)
logger.info("Saving model (%s locations) to %s"
% (len(user_to_estimated_location), model_dir))
return user_to_home_loc, user_to_estimated_location
# Short circuit early if the caller has specified that the model is not
# to be saved into a directory
if model_dir is None:
return SpatialLabelPropagationModel(user_to_estimated_location)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
fh = open(os.path.join(model_dir, 'user-id-to-location.tsv'), 'w')
for user_id, loc in user_to_estimated_location.iteritems():
fh.write("%s\t%s\t%s\n" % (user_id, loc[0], loc[1]))
fh.close()
return SpatialLabelPropagationModel(user_to_estimated_location)
def update_user_location(self, user_id, mention_network,
user_to_home_loc, user_to_estimated_location,
user_to_next_estimated_location):
"""
Uses the provided social network and estimated user locations to update
the location of the specified user_id in the
user_to_next_estimated_location dict. Users who have a home location
(defined from GPS data) will always be updated with their home location.
"""
# Short-circuit if we already know where this user is located
# so that we always preserve the "hint" going forward
if user_id in user_to_home_loc:
user_to_next_estimated_location[user_id] = user_to_home_loc[user_id]
return
# For each of the users in the user's ego network, get their estimated
# location, if any
locations = []
for neighbor_id in mention_network.neighbors_iter(user_id):
if neighbor_id in user_to_estimated_location:
locations.append(user_to_estimated_location[neighbor_id])
# If we have at least one location from the neighbors, use the
# list of locations to infer a location for this individual.
if len(locations) > 0:
# NOTE: the median here could be replaced by any number of
# functions (some of which we tried in the ICWSM paper).
# For example, the social density method that Derek
# suggested would replace the geometric median here as how
# we estimate a user's location from their neighbors.
median = get_geometric_median(locations)
user_to_next_estimated_location[user_id] = median
def load_model(self, model_dir, settings):
"""
Reads in the user-id to location mapping from a file as the trained
model.
"""
user_id_to_location = {}
model_file = gzip.open(os.path.join(model_dir, "user-to-lat-lon.tsv.gz"), 'r')
for line in model_file:
cols = line.split("\t")
user_id = cols[0]
lat = cols[1]
lon = cols[2]
user_id_to_location[user_id] = (float(lat), float(lon))
print 'NUM USERS: %d' % len(user_id_to_location)
return SpatialLabelPropagationModel(user_id_to_location)
def get_user_location(user):
# print user
# print foo
user_id = user["user_id"]
posts = user["posts"]
#print "getting location of %s" % user_id
# This method returns null if no location was found
return user_id, get_home_location(posts)
def get_home_location(posts):
"""
Returns the estimated home location of this user from their GPS-tagged
posts, or None if the user could not be associated with any location
"""
# The list of observed GPS locations for this user
locations = []
# Cycle through the posts and extract GPS locations
for post in posts:
if not "coordinates" in post:
continue
#print post
coords = post["coordinates"]
if coords is None:
continue
#print coords
if not "type" in coords:
continue
coord_type = coords["type"]
if coord_type is None:
continue
if not coord_type == "Point":
continue
coord_arr = coords["coordinates"]
lat = coord_arr[0]
lon = coord_arr[1]
locations.append(Point(lat, lon))
#logger.debug('Found %s GPS-tagged locations' % len(locations))
#print 'Found %s GPS-tagged locations' % len(locations)
# We need at least 5 GPS tweets to infer a reliable home location
if len(locations) < 5:
return None
# See if we can find at least 5 tweets within 15km of each other
if has_home(locations):
# Return the center as a proxy for this user's home location
return get_geometric_median(locations)
else:
# Return that the user has no home location
return None
def has_home(locations):
"""
Returns True if the locations contain a subset of at least five points
that are all within 15km of each other
"""
n = len(locations)
cur_locs = []
for i in range(0, n-4):
# Try adding the next location to start the search
cur_locs.append(locations[i])
for j in range(i+1, n):
# If we recursively find a match starting from the current seed,
# return success
if can_find_home_match(cur_locs, locations, j, n):
return True
# Otherwise, remove the current seed and see if a different location
# can be a member a subset matching the desired constraints
cur_locs.pop()
return False
def can_find_home_match(cur_locs, locations, next_index, n):
"""
Searches the list of locations to see if some combination of locations
starting at next_index can be added to the locations currently in
cur_locs that satisfy the constraint that all locations in cur_locs
must be at most 15km from each other. If 5 such points are found,
return success
"""
# The next location to test
loc2 = locations[next_index]
# Check that the next point that could be added (at next_index) would
# satisfy the distance requirement with the current location group
for loc1 in cur_locs:
if get_distance(loc1, loc2) > 15:
return False
# Push on the next location, to see if we can meet the requirements
# while it is a member of the group
cur_locs.append(locations[next_index])
# If we have 5 locations that are all within 15km, return success!
if len(cur_locs) == 5:
return True
# Search the remaining locations to see if some combination can satisfy
# the requirements when this new location is added to the group
for j in range(next_index+1, n):
if can_find_home_match(cur_locs, locations, j, n):
return True
# Remove the last item added since no match could be found when it is a
# member of the current location group
cur_locs.pop()
return False
def get_geometric_median(coordinates):
"""
Returns the geometric median of the list of locations.
"""
n = len(coordinates)
# The geometric median is only defined for n > 3 points, so just return
# an arbitrary point if we have fewer
if n == 1:
return coordinates[0]
elif n == 2:
return coordinates[random.randint(0, 1)]
min_distance_sum = 10000000
median = None # Point type
# Loop through all the points, finding the point that minimizes the
# geodetic distance to all other points. By construction median will
# always be assigned to some non-None value by the end of the loop.
for i in range(0, n):
p1 = coordinates[i]
dist_sum = 0
for j in range(0, n):
# Skip self-comparison
if i == j:
continue
p2 = coordinates[j]
dist = get_distance(p1, p2)
dist_sum += dist
# Abort early if we already know this isn't the median
if dist_sum > min_distance_sum:
break
if dist_sum < min_distance_sum:
min_distance_sum = dist_sum
median = p1
return median
def get_distance(p1, p2):
"""
Computes the distance between the two latitude-longitude Points using
Vincenty's Formula
"""
return distance.distance(p1, p2).kilometers
|
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import netaddr
from oslo_log import log as logging
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import constants as n_consts
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
HA_DEV_PREFIX = 'ha-'
IP_MONITOR_PROCESS_SERVICE = 'ip_monitor'
class HaRouter(router.RouterInfo):
def __init__(self, state_change_callback, *args, **kwargs):
super(HaRouter, self).__init__(*args, **kwargs)
self.ha_port = None
self.keepalived_manager = None
self.state_change_callback = state_change_callback
@property
def is_ha(self):
# TODO(Carl) Remove when refactoring to use sub-classes is complete.
return self.router is not None
@property
def ha_priority(self):
return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY)
@property
def ha_vr_id(self):
return self.router.get('ha_vr_id')
@property
def ha_state(self):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'r') as f:
return f.read()
except (OSError, IOError):
LOG.debug('Error while reading HA state for %s', self.router_id)
return None
@ha_state.setter
def ha_state(self, new_state):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'w') as f:
f.write(new_state)
except (OSError, IOError):
LOG.error(_LE('Error while writing HA state for %s'),
self.router_id)
def initialize(self, process_monitor):
super(HaRouter, self).initialize(process_monitor)
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if not ha_port:
LOG.error(_LE('Unable to process HA router %s without HA port'),
self.router_id)
return
self.ha_port = ha_port
self._init_keepalived_manager(process_monitor)
self.ha_network_added()
self.update_initial_state(self.state_change_callback)
self.spawn_state_change_monitor(process_monitor)
def _init_keepalived_manager(self, process_monitor):
self.keepalived_manager = keepalived.KeepalivedManager(
self.router['id'],
keepalived.KeepalivedConf(),
process_monitor,
conf_path=self.agent_conf.ha_confs_path,
namespace=self.ns_name)
config = self.keepalived_manager.config
interface_name = self.get_ha_device_name()
subnets = self.ha_port.get('subnets', [])
ha_port_cidrs = [subnet['cidr'] for subnet in subnets]
instance = keepalived.KeepalivedInstance(
'BACKUP',
interface_name,
self.ha_vr_id,
ha_port_cidrs,
nopreempt=True,
advert_int=self.agent_conf.ha_vrrp_advert_int,
priority=self.ha_priority)
instance.track_interfaces.append(interface_name)
if self.agent_conf.ha_vrrp_auth_password:
# TODO(safchain): use oslo.config types when it will be available
# in order to check the validity of ha_vrrp_auth_type
instance.set_authentication(self.agent_conf.ha_vrrp_auth_type,
self.agent_conf.ha_vrrp_auth_password)
config.add_instance(instance)
def enable_keepalived(self):
self.keepalived_manager.spawn()
def disable_keepalived(self):
self.keepalived_manager.disable()
conf_dir = self.keepalived_manager.get_conf_dir()
shutil.rmtree(conf_dir)
def _get_keepalived_instance(self):
return self.keepalived_manager.config.get_instance(self.ha_vr_id)
def _get_primary_vip(self):
return self._get_keepalived_instance().get_primary_vip()
def get_ha_device_name(self):
return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN]
def ha_network_added(self):
interface_name = self.get_ha_device_name()
self.driver.plug(self.ha_port['network_id'],
self.ha_port['id'],
interface_name,
self.ha_port['mac_address'],
namespace=self.ns_name,
prefix=HA_DEV_PREFIX)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs,
namespace=self.ns_name,
preserve_ips=[self._get_primary_vip()])
def ha_network_removed(self):
self.driver.unplug(self.get_ha_device_name(),
namespace=self.ns_name,
prefix=HA_DEV_PREFIX)
self.ha_port = None
def _add_vip(self, ip_cidr, interface, scope=None):
instance = self._get_keepalived_instance()
instance.add_vip(ip_cidr, interface, scope)
def _remove_vip(self, ip_cidr):
instance = self._get_keepalived_instance()
instance.remove_vip_by_ip_address(ip_cidr)
def _clear_vips(self, interface):
instance = self._get_keepalived_instance()
instance.remove_vips_vroutes_by_interface(interface)
def _get_cidrs_from_keepalived(self, interface_name):
instance = self._get_keepalived_instance()
return instance.get_existing_vip_ip_addresses(interface_name)
def get_router_cidrs(self, device):
return set(self._get_cidrs_from_keepalived(device.name))
def routes_updated(self):
new_routes = self.router['routes']
instance = self._get_keepalived_instance()
instance.virtual_routes.extra_routes = [
keepalived.KeepalivedVirtualRoute(
route['destination'], route['nexthop'])
for route in new_routes]
self.routes = new_routes
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
default_gw_rts = []
gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
default_gw = (n_consts.IPv4_ANY if
netaddr.IPAddress(gw_ip).version == 4 else
n_consts.IPv6_ANY)
instance = self._get_keepalived_instance()
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts
if enable_ra_on_gw:
self.driver.configure_ipv6_ra(self.ns_name, interface_name)
def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
"""Only the master should have any IP addresses configured.
Let keepalived manage IPv6 link local addresses, the same way we let
it manage IPv4 addresses. If the router is not in the master state,
we must delete the address first as it is autoconfigured by the kernel.
"""
manager = self.keepalived_manager
if manager.get_process().active:
if self.ha_state != 'master':
conf = manager.get_conf_on_disk()
managed_by_keepalived = conf and ipv6_lladdr in conf
if managed_by_keepalived:
return False
else:
return False
return True
def _disable_ipv6_addressing_on_interface(self, interface_name):
"""Disable IPv6 link local addressing on the device and add it as
a VIP to keepalived. This means that the IPv6 link local address
will only be present on the master.
"""
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address)
if self._should_delete_ipv6_lladdr(ipv6_lladdr):
device.addr.flush(n_consts.IP_VERSION_6)
self._remove_vip(ipv6_lladdr)
self._add_vip(ipv6_lladdr, interface_name, scope='link')
def _add_gateway_vip(self, ex_gw_port, interface_name):
for ip_cidr in common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
self._add_default_gw_virtual_route(ex_gw_port, interface_name)
def add_floating_ip(self, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
self._add_vip(ip_cidr, interface_name)
# TODO(Carl) Should this return status?
# return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
self._remove_vip(ip_cidr)
def internal_network_updated(self, interface_name, ip_cidrs):
self._clear_vips(interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
for ip_cidr in ip_cidrs:
self._add_vip(ip_cidr, interface_name)
def internal_network_added(self, port):
port_id = port['id']
interface_name = self.get_internal_device_name(port_id)
self.driver.plug(port['network_id'],
port_id,
interface_name,
port['mac_address'],
namespace=self.ns_name,
prefix=router.INTERNAL_DEV_PREFIX)
self._disable_ipv6_addressing_on_interface(interface_name)
for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
def internal_network_removed(self, port):
super(HaRouter, self).internal_network_removed(port)
interface_name = self.get_internal_device_name(port['id'])
self._clear_vips(interface_name)
def _get_state_change_monitor_process_manager(self):
return external_process.ProcessManager(
self.agent_conf,
'%s.monitor' % self.router_id,
self.ns_name,
default_cmd_callback=self._get_state_change_monitor_callback())
def _get_state_change_monitor_callback(self):
ha_device = self.get_ha_device_name()
ha_cidr = self._get_primary_vip()
def callback(pid_file):
cmd = [
'neutron-keepalived-state-change',
'--router_id=%s' % self.router_id,
'--namespace=%s' % self.ns_name,
'--conf_dir=%s' % self.keepalived_manager.get_conf_dir(),
'--monitor_interface=%s' % ha_device,
'--monitor_cidr=%s' % ha_cidr,
'--pid_file=%s' % pid_file,
'--state_path=%s' % self.agent_conf.state_path,
'--user=%s' % os.geteuid(),
'--group=%s' % os.getegid()]
return cmd
return callback
def spawn_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
pm.enable()
process_monitor.register(
self.router_id, IP_MONITOR_PROCESS_SERVICE, pm)
def destroy_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
process_monitor.unregister(
self.router_id, IP_MONITOR_PROCESS_SERVICE)
pm.disable()
def update_initial_state(self, callback):
ha_device = ip_lib.IPDevice(
self.get_ha_device_name(),
self.ns_name)
addresses = ha_device.addr.list()
cidrs = (address['cidr'] for address in addresses)
ha_cidr = self._get_primary_vip()
state = 'master' if ha_cidr in cidrs else 'backup'
self.ha_state = state
callback(self.router_id, state)
def external_gateway_added(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
self._add_gateway_vip(ex_gw_port, interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
def external_gateway_updated(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips'])
for old_gateway_cidr in ip_cidrs:
self._remove_vip(old_gateway_cidr)
self._add_gateway_vip(ex_gw_port, interface_name)
def external_gateway_removed(self, ex_gw_port, interface_name):
self._clear_vips(interface_name)
super(HaRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
def delete(self, agent):
self.destroy_state_change_monitor(self.process_monitor)
self.ha_network_removed()
self.disable_keepalived()
super(HaRouter, self).delete(agent)
def process(self, agent):
super(HaRouter, self).process(agent)
if self.ha_port:
self.enable_keepalived()
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'master'):
super(HaRouter, self).enable_radvd(internal_ports)
|
|
# Licensed under the Apache License. See footer for details.
# <build-dir> <cache-dir>
import os
import re
import sys
import json
import time
import semver # from: https://github.com/k-bx/python-semver
import shutil
import urllib
import subprocess
#-------------------------------------------------------------------------------
Program = os.path.basename(sys.argv[0])
BuildDir = sys.argv[1]
CacheDir = sys.argv[2]
TmpDir = os.path.join(BuildDir, "..", "tmp")
DeployDir = os.path.join(os.path.dirname(sys.argv[0]), "..", "deploy")
NodeDir = None
NodeVersionsAll = None
NodeVersionsStable = None
NodeVersionLatest = None
# http://nodejs.org/dist/v0.10.20/node-v0.10.20-linux-x64.tar.gz
# http://nodejs.org/dist/v0.10.20/node-v0.10.20-darwin-x64.tar.gz
DownloadRoot = "http://nodejs.org/dist/"
if sys.platform.startswith("linux"):
Platform = "linux-x64"
elif sys.platform.startswith("darwin"):
Platform = "darwin-x64"
#-------------------------------------------------------------------------------
def main():
timeStart = time.time()
# set up tmp dir
if os.path.exists(TmpDir):
shutil.rmtree(TmpDir)
os.mkdir(TmpDir)
# create cache dir
mkdirp(CacheDir)
log("build dir: %s" % BuildDir)
log("cache dir: %s" % CacheDir)
log("deploy dir: %s" % DeployDir)
log("platform: %s" % Platform)
log()
# get list of versions available from node
nodeVersionsHtml = cacheFileName("nodejs-versions.html")
getCached(nodeVersionsHtml, "http://nodejs.org/dist/")
getNodeVersions(nodeVersionsHtml)
downloadNode(NodeVersionLatest)
log()
downloadWiki()
shutil.rmtree(NodeDir)
# done - print elapsed time
timeElapsed = time.time() - timeStart
log()
log("build time: %.1f seconds" % timeElapsed)
#-------------------------------------------------------------------------------
def fixPackageJSON():
name = os.path.join(BuildDir, "wiki", "package.json")
file = open(name, "r")
contents = file.read()
file.close()
obj = json.loads(contents)
del obj["optionalDependencies"]["level"]
contents = json.dumps(obj, indent=4)
file = open(name, "w")
contents = file.write(contents)
file.close()
#-------------------------------------------------------------------------------
def downloadWiki():
origDir = os.getcwd()
os.chdir(BuildDir)
if os.path.exists("wiki"): shutil.rmtree("wiki")
cmd = ["git", "clone", "https://github.com/WardCunningham/wiki.git"]
runCommandEcho(cmd)
fixPackageJSON()
os.chdir("wiki")
runCommandEcho([os.path.join(NodeDir, "bin", "npm"), "install"])
runCommandEcho([os.path.join(NodeDir, "bin", "npm"), "install", "grunt-cli"])
runCommandEcho([
os.path.join(NodeDir, "bin", "node"),
os.path.join("node_modules", ".bin", "grunt"),
"build"
])
shutil.copy2(
os.path.join(DeployDir, "cf-wiki.coffee"),
BuildDir
)
os.chdir(origDir)
#-------------------------------------------------------------------------------
def downloadNode(version):
global NodeDir
log("downloading and unpacking node %s" % version)
nodeDownload = "%s/v%s/node-v%s-%s.tar.gz" % (
DownloadRoot,
version,
version,
Platform
)
nodeArchive = "node-%s-%s.tar.gz" % (version, Platform)
nodeArchive = cacheFileName(nodeArchive)
getCached(nodeArchive, nodeDownload)
cmd = ["tar", "xvf", nodeArchive, "-C", TmpDir]
runCommandQuiet(cmd)
appBinDir = buildFileName("bin")
NodeDir = tmpFileName("node-v%s-%s" % (version, Platform))
mkdirp(appBinDir)
shutil.copy2(os.path.join(NodeDir, "bin", "node"), os.path.join(appBinDir, "node"))
#-------------------------------------------------------------------------------
def runCommandQuiet(cmd):
env = os.environ.copy()
env["HOME"] = CacheDir
log("running: %s" % (" ".join(cmd)))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(out, err) = proc.communicate()
return (proc.returncode, out, err)
#-------------------------------------------------------------------------------
def runCommandEcho(cmd):
env = os.environ.copy()
env["HOME"] = CacheDir
log()
log("running: %s" % (" ".join(cmd)))
proc = subprocess.Popen(cmd, env=env)
(out, err) = proc.communicate()
log()
return (proc.returncode, out, err)
#-------------------------------------------------------------------------------
def getNodeVersions(nodeVersionsHtml):
global NodeVersionsAll
global NodeVersionsStable
global NodeVersionLatest
nodeVersionsFile = open(nodeVersionsHtml)
nodeVersionsContent = nodeVersionsFile.read()
nodeVersionsFile.close()
pattern = r"<a.*?>v(\d*)\.(\d*)\.(\d*)/</a>"
regex = re.compile(pattern)
NodeVersionsAll = []
NodeVersionsStable = []
for match in regex.finditer(nodeVersionsContent):
version = "%s.%s.%s" % (match.group(1), match.group(2), match.group(3))
NodeVersionsAll.append(version)
minorVersion = int(match.group(2), 10)
if minorVersion % 2 == 0:
NodeVersionsStable.append(version)
NodeVersionsStable.sort(semver.compare)
NodeVersionsStable.reverse()
NodeVersionsAll.sort(semver.compare)
NodeVersionsAll.reverse()
NodeVersionLatest = NodeVersionsStable[0]
#-------------------------------------------------------------------------------
def getPackageJSON():
packageJSONname = buildFileName("package.json")
if not os.path.exists(packageJSONname):
error("file package.json not found")
packageJSONfile = open(packageJSONname)
packageJSONstr = packageJSONfile.read()
packageJSONfile.close()
return json.loads(packageJSONstr)
#-------------------------------------------------------------------------------
def getCached(cacheFile, remoteURL):
# fullFile = cacheFileName(cacheFile)
if os.path.exists(cacheFile):
log("using cached version of %s" % cacheFile)
return
log( "downloading new copy of %s" % cacheFile)
urllib.urlretrieve(remoteURL, cacheFile)
#-------------------------------------------------------------------------------
def tmpFileName(fileName):
return os.path.join(TmpDir, fileName)
#-------------------------------------------------------------------------------
def cacheFileName(fileName):
return os.path.join(CacheDir, fileName)
#-------------------------------------------------------------------------------
def buildFileName(fileName):
return os.path.join(BuildDir, fileName)
#-------------------------------------------------------------------------------
def mkdirp(dir):
if os.path.exists(dir): return
os.makedirs(dir)
#-------------------------------------------------------------------------------
def error(message):
log()
log("*** ERROR ***")
log(message)
log("*** ERROR ***")
sys.exit(1)
#-------------------------------------------------------------------------------
def log(message=""):
if message == "":
print ""
return
print "%s: %s" % (Program, message)
#-------------------------------------------------------------------------------
main()
#-------------------------------------------------------------------------------
# Copyright 2013 Patrick Mueller
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
|
|
from __future__ import with_statement
from cms.views import details
from cms.utils.compat.dj import force_unicode
from cms.api import create_page, create_title
from cms.cms_toolbar import ADMIN_MENU_IDENTIFIER
from cms.toolbar.items import ToolbarAPIMixin, LinkItem, ItemSearchResult
from cms.toolbar.toolbar import CMSToolbar
from cms.middleware.toolbar import ToolbarMiddleware
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from django.contrib.auth.models import AnonymousUser, User, Permission
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.functional import lazy
class ToolbarTestBase(SettingsOverrideTestCase):
def get_page_request(self, page, user, path=None, edit=False, lang_code='en'):
path = path or page and page.get_absolute_url()
if edit:
path += '?edit'
request = RequestFactory().get(path)
request.session = {}
request.user = user
request.LANGUAGE_CODE = lang_code
if edit:
request.GET = {'edit': None}
else:
request.GET = {'edit_off': None}
request.current_page = page
mid = ToolbarMiddleware()
mid.process_request(request)
return request
def get_anon(self):
return AnonymousUser()
def get_staff(self):
staff = User(
username='staff',
email='staff@staff.org',
is_active=True,
is_staff=True,
)
staff.set_password('staff')
staff.save()
staff.user_permissions.add(Permission.objects.get(codename='change_page'))
return staff
def get_nonstaff(self):
nonstaff = User(
username='nonstaff',
email='nonstaff@staff.org',
is_active=True,
is_staff=False,
)
nonstaff.set_password('nonstaff')
nonstaff.save()
nonstaff.user_permissions.add(Permission.objects.get(codename='change_page'))
return nonstaff
def get_superuser(self):
superuser = User(
username='superuser',
email='superuser@superuser.org',
is_active=True,
is_staff=True,
is_superuser=True,
)
superuser.set_password('superuser')
superuser.save()
return superuser
class ToolbarTests(ToolbarTestBase):
settings_overrides = {'CMS_PERMISSION': False}
def test_no_page_anon(self):
request = self.get_page_request(None, self.get_anon(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_left_items() + toolbar.get_right_items()
self.assertEqual(len(items), 0)
def test_no_page_staff(self):
request = self.get_page_request(None, self.get_staff(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_left_items() + toolbar.get_right_items()
# Logo + edit-mode + admin-menu + logout
self.assertEqual(len(items), 3, items)
admin_items = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'Test').get_items()
self.assertEqual(len(admin_items), 6, admin_items)
def test_no_page_superuser(self):
request = self.get_page_request(None, self.get_superuser(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_left_items() + toolbar.get_right_items()
# Logo + edit-mode + admin-menu + logout
self.assertEqual(len(items), 3)
admin_items = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'Test').get_items()
self.assertEqual(len(admin_items), 7, admin_items)
def test_anon(self):
page = create_page('test', 'nav_playground.html', 'en')
request = self.get_page_request(page, self.get_anon())
toolbar = CMSToolbar(request)
items = toolbar.get_left_items() + toolbar.get_right_items()
self.assertEqual(len(items), 0)
def test_nonstaff(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_nonstaff())
toolbar = CMSToolbar(request)
items = toolbar.get_left_items() + toolbar.get_right_items()
# Logo + edit-mode + logout
self.assertEqual(len(items), 0)
def test_template_change_permission(self):
with SettingsOverride(CMS_PERMISSIONS=True):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_nonstaff())
toolbar = CMSToolbar(request)
items = toolbar.get_left_items() + toolbar.get_right_items()
self.assertEqual([item for item in items if item.css_class_suffix == 'templates'], [])
def test_markup(self):
create_page("toolbar-page", "nav_playground.html", "en", published=True)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nav_playground.html')
self.assertContains(response, '<div id="cms_toolbar"')
self.assertContains(response, 'cms.placeholders.js')
self.assertContains(response, 'cms.placeholders.css')
def test_markup_generic_module(self):
create_page("toolbar-page", "col_two.html", "en", published=True)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEquals(response.status_code, 200)
self.assertContains(response, '<div class="cms_submenu-item cms_submenu-item-title"><span>Generic</span>')
def test_markup_flash_custom_module(self):
superuser = self.get_superuser()
create_page("toolbar-page", "col_two.html", "en", published=True)
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'href="LinkPlugin">Add a link')
self.assertContains(response, '<div class="cms_submenu-item cms_submenu-item-title"><span>Different Grouper</span>')
def test_show_toolbar_to_staff(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, self.get_staff(), '/')
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_with_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_without_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=False)
toolbar = CMSToolbar(request)
self.assertFalse(toolbar.show_toolbar)
def test_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_superuser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_left_items() + toolbar.get_right_items()
self.assertEqual(len(items), 7)
def test_no_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_staff(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(page.has_change_permission(request))
self.assertFalse(page.has_publish_permission(request))
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_left_items() + toolbar.get_right_items()
# Logo + edit-mode + templates + page-menu + admin-menu + logout
self.assertEqual(len(items), 6)
def test_no_change_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
user = self.get_staff()
user.user_permissions.all().delete()
request = self.get_page_request(page, user, edit=True)
toolbar = CMSToolbar(request)
self.assertFalse(page.has_change_permission(request))
self.assertFalse(page.has_publish_permission(request))
items = toolbar.get_left_items() + toolbar.get_right_items()
# Logo + page-menu + admin-menu + logout
self.assertEqual(len(items), 3, items)
admin_items = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'Test').get_items()
self.assertEqual(len(admin_items), 6, admin_items)
def test_button_consistency_staff(self):
"""
Tests that the buttons remain even when the language changes.
"""
user = self.get_staff()
cms_page = create_page('test-en', 'nav_playground.html', 'en', published=True)
create_title('de', 'test-de', cms_page)
en_request = self.get_page_request(cms_page, user, edit=True)
en_toolbar = CMSToolbar(en_request)
self.assertEqual(len(en_toolbar.get_left_items() + en_toolbar.get_right_items()), 6)
de_request = self.get_page_request(cms_page, user, path='/de/', edit=True, lang_code='de')
de_toolbar = CMSToolbar(de_request)
self.assertEqual(len(de_toolbar.get_left_items() + de_toolbar.get_right_items()), 6)
class ToolbarAPITests(TestCase):
def test_find_item(self):
api = ToolbarAPIMixin()
first = api.add_link_item('First', 'http://www.example.org')
second = api.add_link_item('Second', 'http://www.example.org')
all_links = api.find_items(LinkItem)
self.assertEqual(len(all_links), 2)
result = api.find_first(LinkItem, name='First')
self.assertNotEqual(result, None)
self.assertEqual(result.index, 0)
self.assertEqual(result.item, first)
result = api.find_first(LinkItem, name='Second')
self.assertNotEqual(result, None)
self.assertEqual(result.index, 1)
self.assertEqual(result.item, second)
no_result = api.find_first(LinkItem, name='Third')
self.assertEqual(no_result, None)
def test_find_item_lazy(self):
lazy_attribute = lazy(lambda x: x, str)('Test')
api = ToolbarAPIMixin()
api.add_link_item(lazy_attribute, None)
result = api.find_first(LinkItem, name='Test')
self.assertNotEqual(result, None)
self.assertEqual(result.index, 0)
def test_not_is_staff(self):
request = RequestFactory().get('/en/?edit')
request.session = {}
request.LANGUAGE_CODE = 'en'
request.user = AnonymousUser()
toolbar = CMSToolbar(request)
self.assertEqual(len(toolbar.get_left_items()), 0)
self.assertEqual(len(toolbar.get_right_items()), 0)
def test_item_search_result(self):
item = object()
result = ItemSearchResult(item, 2)
self.assertEqual(result.item, item)
self.assertEqual(int(result), 2)
result += 2
self.assertEqual(result.item, item)
self.assertEqual(result.index, 4)
|
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import (CInv, msg_block, msg_getdata)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log
# outside the BitcoinTestFramework
pass
class ExampleTest(BitcoinTestFramework):
# Each functional test is a subclass of the BitcoinTestFramework class.
# Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be exlicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run
# self.log before run_test()
# Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present.
# This test uses generate which requires wallet to be compiled
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], self.nodes[1])
self.sync_all([self.nodes[0:1]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
BitcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections will wait for a verack to make sure the
# connection is fully up
self.nodes[0].add_p2p_connection(BaseNode())
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:1]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(
self.nodes[0].getbestblockhash())['time'] + 1
height = 1
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(
self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our
# P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info(
"Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], self.nodes[2])
self.log.info("Add P2P connection to node2")
self.nodes[0].disconnect_p2ps()
self.nodes[2].add_p2p_connection(BaseNode())
self.log.info(
"Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(
list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global
# lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
|
|
#
# pololu/motors/qik2s9v1.py
#
# Usual device on Linux: /dev/ttyUSB0
#
"""
This code was written to work with the Pololu Qik 2s9v1 motor controller.
http://www.pololu.com/catalog/product/1110
by Carl J. Nobile
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__docformat__ = "reStructuredText en"
from .qik import Qik
class Qik2s9v1(Qik):
"""
Implementation of the Pololu motor controller interface for the Qik 2s9v1
board.
"""
DEFAULT_DEVICE_ID = 0x09
DEFAULT_SERIAL_TIMEOUT = 0.262
_COMMAND = {
'get-fw-version': 0x01,
'get-error': 0x02,
'get-config': 0x03,
'set-config': 0x04,
'm0-coast': 0x06,
'm1-coast': 0x07,
'm0-forward-7bit': 0x08,
'm0-forward-8bit': 0x09,
'm0-reverse-7bit': 0x0A,
'm0-reverse-8bit': 0x0B,
'm1-forward-7bit': 0x0C,
'm1-forward-8bit': 0x0D,
'm1-reverse-7bit': 0x0E,
'm1-reverse-8bit': 0x0F,
}
_ERRORS = {
0: 'OK',
1: 'Bit 0 Unused',
2: 'Bit 1 Unused',
4: 'Bit 2 Unused',
8: 'Data Overrun Error',
16: 'Frame Error',
32: 'CRC Error',
64: 'Format Error',
128: 'Timeout Error',
}
DEVICE_ID = 0x00
PWM_PARAM = 0x01
MOTOR_ERR_SHUTDOWN = 0x02
SERIAL_TIMEOUT = 0x03
_CONFIG_NUM = {
DEVICE_ID: 'Device ID',
PWM_PARAM: 'PWM Parameter',
MOTOR_ERR_SHUTDOWN: 'Shutdown Motors on Error',
SERIAL_TIMEOUT: 'Serial Error',
}
_CONFIG_PWM = {
0: (31500, '7-Bit, PWM Frequency 31.5kHz'),
1: (15700, '8-Bit, PWM Frequency 15.7 kHz'),
2: (7800, '7-Bit, PWM Frequency 7.8 kHz'),
3: (3900, '8-Bit, PWM Frequency 3.9 kHz'),
}
_CONFIG_PWM_TO_VALUE = dict(((v[0], k) for k, v in _CONFIG_PWM.items()))
def __init__(self, device, baud=38400, readTimeout=None, writeTimeout=None,
log=None):
super(Qik2s9v1, self).__init__(device, baud, readTimeout, writeTimeout,
log)
self.findConnectedDevices()
def _deviceCallback(self, device, config):
config['version'] = self._getFirmwareVersion(device)
config['pwm'] = self._getConfig(self.PWM_PARAM, device)
config['shutdown'] = self._getConfig(self.MOTOR_ERR_SHUTDOWN, device)
config['timeout'] = self._getSerialTimeout(device)
def getFirmwareVersion(self, device=DEFAULT_DEVICE_ID):
"""
Get the firmware version of the Qik 2s9v1 hardware.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Returns:
An integer indicating the version number.
"""
return self._getFirmwareVersion(device)
def getError(self, device=DEFAULT_DEVICE_ID, message=True):
"""
Get the error message or value stored in the Qik 2s9v1 hardware.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
A list of text messages, integers, or and empty list. See the
`message` parameter above.
"""
return self._getError(device, message)
def getDeviceID(self, device=DEFAULT_DEVICE_ID):
"""
Get the device ID.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Returns:
An integer number of the hardware device ID.
"""
return self._getDeviceID(device)
def getPWMFrequency(self, device=DEFAULT_DEVICE_ID, message=True):
"""
Get the motor shutdown on error status stored on the hardware device.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
A text message or an int. See the `message` parameter above.
"""
return self._getPWMFrequency(device, message)
def getMotorShutdown(self, device=DEFAULT_DEVICE_ID):
"""
Get the motor shutdown on error status stored on the hardware device.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Returns:
Returns `True` when morot will shutdown on and error, else `False`.
"""
return self._getMotorShutdown(device)
def getSerialTimeout(self, device=DEFAULT_DEVICE_ID):
"""
Get the serial timeout stored on the hardware device.
Caution, more that one value returned from the Qik can have the same
actual timeout value according the the formula below. I have verified
this as an idiosyncrasy of the Qik itself. There are only a total of
72 unique values that the Qik can logically use the remaining 56
values are repeats of the 72.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Returns:
The timeout value in seconds.
"""
return self._getSerialTimeout(device)
def setDeviceID(self, value, device=DEFAULT_DEVICE_ID, message=True):
"""
Set the hardware device number. This is only needed if more that one
device is on the same serial buss.
:Parameters:
value : `int`
The device ID to set in the range of 0 - 127.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
A text message or an int. See the `message` parameter above. If
`value` and `device` are the same `OK` or `0` will be returned
depending on the value of `message`.
:Exceptions:
* `SerialException`
IO error indicating there was a problem reading from the serial
connection.
"""
return self._setDeviceID(value, device, message)
def setPWMFrequency(self, pwm, device=DEFAULT_DEVICE_ID, message=True):
"""
Set the PWM frequency.
:Parameters:
pwm : `int`
The PWN frequency to set in hertz.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
A text message or an int. See the `message` parameter above.
:Exceptions:
* `SerialException`
IO error indicating there was a problem reading from the serial
connection.
"""
return self._setPWMFrequency(pwm, device, message)
def setMotorShutdown(self, value, device=DEFAULT_DEVICE_ID, message=True):
"""
Set the motor shutdown on error status stored on the hardware device.
:Parameters:
value : `int`
An integer indicating the effect on the motors when an error occurs.
A `1` will cause the cause the motors to stop on an error and a
`0` will ignore errors keeping the motors running.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
Text message indicating the status of the shutdown error.
A text message or an int. See the `message` parameter above.
:Exceptions:
* `SerialException`
IO error indicating there was a problem reading from the serial
connection.
"""
return self._setMotorShutdown(value, device, message)
def setSerialTimeout(self, timeout, device=DEFAULT_DEVICE_ID, message=True):
"""
Set the serial timeout on the hardware device.
Setting the serial timeout to anything other than zero will cause an
error if the serial line is inactive for the time set. This may not be
a good thing as leaving the Qik idle may be a required event. Why
would you want the Qik to report an error when none actually occurred
and your Qik was just idle? This happens with or without the motors
running.
This also explains why, when the Qik is set at a very low timeout that
the red LED will come on almost immediately. You will not even get a
chance to send it a command before the timeout. This would be like
temporarily bricking your Qik. Not a good thing, though it's easy to
fix by just setting the timeout to 0 again.
OK, so how do we actually use the serial timeout. Good question, the
best way I can think of is to send the Qik a keep alive signal. One
way of doing this is to execute the getError() method at a little less
than half the timeout period. So if the timeout was set to 200ms you
would get the error status every 90ms. The Qik will stay alive unless
the keep alive signal is not seen. This should solve the problem.
However, if the keep alive is sent in a different process or thread
you could get a format error if the keep alive command collides with
any other command.
:Parameters:
timeout : `float` or `int`
The timeout value between 0 - 503.04 seconds, however, any number
can be passed to the argument, the code will find the nearest
allowed value from the 72 that are available.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
Text message indicating the status of the shutdown error.
:Exceptions:
* `SerialException`
IO error indicating there was a problem reading from the serial
connection.
"""
return self._setSerialTimeout(timeout, device, message)
def setM0Coast(self, device=DEFAULT_DEVICE_ID):
"""
Set motor 0 to coast.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
cmd = self._COMMAND.get('m0-coast')
self._writeData(cmd, device)
def setM1Coast(self, device=DEFAULT_DEVICE_ID):
"""
Set motor 1 to coast.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
cmd = self._COMMAND.get('m1-coast')
self._writeData(cmd, device)
def setM0Speed(self, speed, device=DEFAULT_DEVICE_ID):
"""
Set motor 0 speed.
:Parameters:
speed : `int`
Motor speed as an integer. Negative numbers indicate reverse
speeds.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
self._setM0Speed(speed, device)
def setM1Speed(self, speed, device=DEFAULT_DEVICE_ID):
"""
Set motor 1 speed.
:Parameters:
speed : `int`
Motor speed as an integer. Negative numbers indicate reverse
speeds.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
self._setM1Speed(speed, device)
|
|
"""
This example demonstrates how to embed matplotlib WebAgg interactive
plotting in your own web application and framework. It is not
necessary to do all this if you merely want to display a plot in a
browser or use matplotlib's built-in Tornado-based server "on the
side".
The framework being used must support web sockets.
"""
import io
try:
import tornado
except ImportError:
raise RuntimeError("This example requires tornado.")
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.websocket
from matplotlib.backends.backend_webagg_core import (
FigureManagerWebAgg, new_figure_manager_given_figure)
from matplotlib.figure import Figure
import numpy as np
import json
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig
# The following is the content of the web page. You would normally
# generate this using some sort of template facility in your web
# framework, but here we just use Python string formatting.
html_content = """
<html>
<head>
<!-- TODO: There should be a way to include all of the required javascript
and CSS so matplotlib can add to the set in the future if it
needs to. -->
<link rel="stylesheet" href="_static/css/page.css" type="text/css">
<link rel="stylesheet" href="_static/css/boilerplate.css" type="text/css" />
<link rel="stylesheet" href="_static/css/fbm.css" type="text/css" />
<link rel="stylesheet" href="_static/jquery/css/themes/base/jquery-ui.min.css" >
<script src="_static/jquery/js/jquery-1.7.1.min.js"></script>
<script src="_static/jquery/js/jquery-ui.min.js"></script>
<script src="mpl.js"></script>
<script>
/* This is a callback that is called when the user saves
(downloads) a file. Its purpose is really to map from a
figure and file format to a url in the application. */
function ondownload(figure, format) {
window.open('download.' + format, '_blank');
};
$(document).ready(
function() {
/* It is up to the application to provide a websocket that the figure
will use to communicate to the server. This websocket object can
also be a "fake" websocket that underneath multiplexes messages
from multiple figures, if necessary. */
var websocket_type = mpl.get_websocket_type();
var websocket = new websocket_type("%(ws_uri)sws");
// mpl.figure creates a new figure on the webpage.
var fig = new mpl.figure(
// A unique numeric identifier for the figure
%(fig_id)s,
// A websocket object (or something that behaves like one)
websocket,
// A function called when a file type is selected for download
ondownload,
// The HTML element in which to place the figure
$('div#figure'));
}
);
</script>
<title>matplotlib</title>
</head>
<body>
<div id="figure">
</div>
</body>
</html>
"""
class MyApplication(tornado.web.Application):
class MainPage(tornado.web.RequestHandler):
"""
Serves the main HTML page.
"""
def get(self):
manager = self.application.manager
ws_uri = "ws://{req.host}/".format(req=self.request)
content = html_content % {
"ws_uri": ws_uri, "fig_id": manager.num}
self.write(content)
class MplJs(tornado.web.RequestHandler):
"""
Serves the generated matplotlib javascript file. The content
is dynamically generated based on which toolbar functions the
user has defined. Call `FigureManagerWebAgg` to get its
content.
"""
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
"""
Handles downloading of the figure in various file formats.
"""
def get(self, fmt):
manager = self.application.manager
mimetypes = {
'ps': 'application/postscript',
'eps': 'application/postscript',
'pdf': 'application/pdf',
'svg': 'image/svg+xml',
'png': 'image/png',
'jpeg': 'image/jpeg',
'tif': 'image/tiff',
'emf': 'application/emf'
}
self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
buff = io.BytesIO()
manager.canvas.print_figure(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
"""
A websocket for interactive communication between the plot in
the browser and the server.
In addition to the methods required by tornado, it is required to
have two callback methods:
- ``send_json(json_content)`` is called by matplotlib when
it needs to send json to the browser. `json_content` is
a JSON tree (Python dictionary), and it is the responsibility
of this implementation to encode it as a string to send over
the socket.
- ``send_binary(blob)`` is called to send binary image data
to the browser.
"""
supports_binary = True
def open(self):
# Register the websocket with the FigureManager.
manager = self.application.manager
manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
# When the socket is closed, deregister the websocket with
# the FigureManager.
manager = self.application.manager
manager.remove_web_socket(self)
def on_message(self, message):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message)
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = self.application.manager
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, figure):
self.figure = figure
self.manager = new_figure_manager_given_figure(
id(figure), figure)
super(MyApplication, self).__init__([
# Static files for the CSS and JS
(r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': FigureManagerWebAgg.get_static_file_path()}),
# The page that contains all of the pieces
('/', self.MainPage),
('/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
('/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(r'/download.([a-z0-9.]+)', self.Download),
])
if __name__ == "__main__":
figure = create_figure()
application = MyApplication(figure)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
print("http://127.0.0.1:8080/")
print("Press Ctrl+C to quit")
tornado.ioloop.IOLoop.instance().start()
|
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import min_max_axis
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _encode_numpy(values, uniques=None, encode=False, check_unknown=True):
# only used in _encode below, see docstring there for details
if uniques is None:
if encode:
uniques, encoded = np.unique(values, return_inverse=True)
return uniques, encoded
else:
# unique sorts
return np.unique(values)
if encode:
if check_unknown:
diff = _encode_check_unknown(values, uniques)
if diff:
raise ValueError("y contains previously unseen labels: %s"
% str(diff))
encoded = np.searchsorted(uniques, values)
return uniques, encoded
else:
return uniques
def _encode_python(values, uniques=None, encode=False):
# only used in _encode below, see docstring there for details
if uniques is None:
uniques = sorted(set(values))
uniques = np.array(uniques, dtype=values.dtype)
if encode:
table = {val: i for i, val in enumerate(uniques)}
try:
encoded = np.array([table[v] for v in values])
except KeyError as e:
raise ValueError("y contains previously unseen labels: %s"
% str(e))
return uniques, encoded
else:
return uniques
def _encode(values, uniques=None, encode=False, check_unknown=True):
"""Helper function to factorize (find uniques) and encode values.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : array
Values to factorize or encode.
uniques : array, optional
If passed, uniques are not determined from passed values (this
can be because the user specified categories, or because they
already have been determined in fit).
encode : bool, default False
If True, also encode the values into integer codes based on `uniques`.
check_unknown : bool, default True
If True, check for values in ``values`` that are not in ``unique``
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _encode_check_unknown()
twice.
Returns
-------
uniques
If ``encode=False``. The unique values are sorted if the `uniques`
parameter was None (and thus inferred from the data).
(uniques, encoded)
If ``encode=True``.
"""
if values.dtype == object:
try:
res = _encode_python(values, uniques, encode)
except TypeError:
raise TypeError("argument must be a string or number")
return res
else:
return _encode_numpy(values, uniques, encode,
check_unknown=check_unknown)
def _encode_check_unknown(values, uniques, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
uniques : array
Allowed uniques values.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `uniques` (the
unknown values).
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
if values.dtype == object:
uniques_set = set(uniques)
diff = list(set(values) - uniques_set)
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
else:
unique_values = np.unique(values)
diff = list(np.setdiff1d(unique_values, uniques, assume_unique=True))
if return_mask:
if diff:
valid_mask = np.in1d(values, uniques)
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6])
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OrdinalEncoder : encode categorical features
using a one-hot or ordinal encoding scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = _encode(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = _encode(y, encode=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
_, y = _encode(y, uniques=self.classes_, encode=True)
return y
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if len(diff):
raise ValueError(
"y contains previously unseen labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
def _more_tags(self):
return {'X_types': ['1dlabels']}
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer()
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer()
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def _more_tags(self):
return {'X_types': ['1dlabels']}
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = min_max_axis(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels.
All entries should be unique (cannot contain duplicate classes).
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
self._cached_dict = None
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
elif len(set(self.classes)) < len(self.classes):
raise ValueError("The classes argument contains duplicate "
"classes. Remove these duplicates before passing "
"them to MultiLabelBinarizer.")
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
self._cached_dict = None
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = self._build_cache()
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _build_cache(self):
if self._cached_dict is None:
self._cached_dict = dict(zip(self.classes_,
range(len(self.classes_))))
return self._cached_dict
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
unknown = set()
for labels in y:
index = set()
for label in labels:
try:
index.add(class_mapping[label])
except KeyError:
unknown.add(label)
indices.extend(index)
indptr.append(len(indices))
if unknown:
warnings.warn('unknown class(es) {0} will be ignored'
.format(sorted(unknown, key=str)))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
def _more_tags(self):
return {'X_types': ['2dlabels']}
|
|
__author__ = "grburgess"
import itertools
import functools
import numpy as np
from threeML.io.progress_bar import progress_bar
from astromodels import use_astromodels_memoization
class GenericFittedSourceHandler(object):
def __init__(self, analysis_result, new_function, parameter_names, parameters, confidence_level, equal_tailed, *independent_variable_range):
"""
A generic 3ML fitted source post-processor. This should be sub-classed in general
:param analysis_result: a 3ML analysis result
:param new_function: the function to use the fitted values to compute new values
:param parameter_names: a list of parameter names
:param parameters: astromodels parameter dictionary
:param confidence_level: the confidence level to compute error
:param independent_variable_range: the range(s) of independent values to compute the new function over
"""
# bind the class properties
self._analysis_results = analysis_result
self._analysis = analysis_result
self._independent_variable_range = independent_variable_range
self._cl = confidence_level
self._equal_tailed=equal_tailed
self._function = new_function
self._parameter_names = parameter_names
self._parameters = parameters
# if only 1-D then we must place into its own tuple to
# keep from confusing itertools
if len(self._independent_variable_range) == 1:
self._independent_variable_range = (self._independent_variable_range[0],)
# figure out the output shape of the best fit and errors
self._out_shape = tuple(map(len, self._independent_variable_range))
# construct the propagated function
self._build_propagated_function()
# fold the function through its independent values
self._evaluate()
def __add__(self, other):
"""
The basics of adding are handled in the VariatesContainer
:param other: another fitted source handler
:return: a VariatesContainer with the summed values
"""
# assure that the shapes will be the same
assert other._out_shape == self._out_shape, 'cannot sum together arrays with different shapes!'
# this will get the value container for the other values
return self.values + other.values
def __radd__(self, other):
if other == 0:
return self
else:
return self.values + other.values
def _transform(self, value):
"""
dummy transform to be overridden in a subclass
:param value:
:return: transformed value
"""
return value
def update_tag(self, tag, value):
pass
def _build_propagated_function(self):
"""
builds a propagated function using RandomVariates propagation
:return:
"""
arguments = {}
# because we might be using composite functions,
# we have to keep track of parameter names in a non-elegant way
for par,name in zip(self._parameters.values(), self._parameter_names):
if par.free:
this_variate = self._analysis_results.get_variates(par.path)
# Do not use more than 1000 values (would make computation too slow for nothing)
if len(this_variate) > 1000:
this_variate = np.random.choice(this_variate, size=1000)
arguments[name] = this_variate
else:
# use the fixed value rather than a variate
arguments[name] = par.value
# create the propagtor
self._propagated_function = self._analysis_results.propagate(self._function, **arguments)
def _evaluate(self):
"""
calculate the best or mean fit of the new function or
quantity
:return:
"""
# if there are independent variables
if self._independent_variable_range:
variates = []
# scroll through the independent variables
n_iterations = np.product(self._out_shape)
with progress_bar(n_iterations, title="Propagating errors") as p:
with use_astromodels_memoization(False):
for variables in itertools.product(*self._independent_variable_range):
variates.append(self._propagated_function(*variables))
p.increase()
# otherwise just evaluate
else:
variates = self._propagated_function()
# create a variates container
self._propagated_variates = VariatesContainer(variates, self._out_shape, self._cl, self._transform, self._equal_tailed)
@property
def values(self):
"""
:return: The VariatesContainer
"""
return self._propagated_variates
@property
def samples(self):
"""
:return: the raw samples of the variates
"""
return self._propagated_variates.samples
@property
def median(self):
"""
:return: the median of the variates
"""
return self._propagated_variates.median
@property
def average(self):
"""
:return: the average of the variates
"""
return self._propagated_variates.average
@property
def upper_error(self):
"""
:return: the upper error of the variates
"""
return self._propagated_variates.upper_error
@property
def lower_error(self):
"""
:return: the lower error of the variates
"""
return self._propagated_variates.lower_error
def transform(method):
"""
A wrapper to call the _transform method for outputs of Variates container class
:param method:
:return:
"""
@functools.wraps(method)
def wrapped(instance, *args, **kwargs):
return instance._transform(method(instance, *args, **kwargs))
return wrapped
class VariatesContainer(object):
def __init__(self,values, out_shape , cl, transform, equal_tailed=True):
"""
A container to store an *List* of RandomVariates and transform their outputs
to the appropriate shape. This cannot be done with normal numpy array operations
because an array of RandomVariates becomes a normal ndarray. Therefore, we calculate
the averages, errors, etc, and transform those.
Additionally, any unit association must be done post calculation as well because the
numpy array constructor sees a unit array as a regular array and again loses the RandomVariates
properties. Therefore, the transform method is used which applies a function to the output properties,
e.g., a unit association and or conversion.
:param values: a flat List of RandomVariates
:param out_shape: the array shape for the output variables
:param cl: the confidence level to calculate error intervals on
:param transform: a method to transform the outputs
:param equal_tailed: whether to use equal-tailed error intervals or not
"""
self._values = values # type: list
self._out_shape = out_shape #type: tuple
self._cl = cl #type: float
self._equal_tailed = equal_tailed #type: bool
self._transform = transform #type: callable
# calculate mean and median and transform them into the provided
# output shape
self._average = np.array([val.average for val in self._values])
self._average = self._average.reshape(self._out_shape)
self._median = np.array([val.median for val in self._values])
self._median = self._median.reshape(self._out_shape)
# construct the error intervals
upper_error = []
lower_error = []
# if equal tailed errors requested
if equal_tailed:
for val in self._values:
error = val.equal_tail_confidence_interval(self._cl)
upper_error.append(error[1])
lower_error.append(error[0])
else:
# else use the hdp
for val in self._values:
error = val.highest_posterior_density_interval(self._cl)
upper_error.append(error[1])
lower_error.append(error[0])
# reshape the errors into the output shape
self._upper_error = np.array(upper_error).reshape(self._out_shape)
self._lower_error = np.array(lower_error).reshape(self._out_shape)
samples = []
for val in self._values:
samples.append(val.samples)
n_samples = len(samples[0])
samples_shape = list(self._out_shape) + [n_samples]
self._samples_shape = tuple(samples_shape)
self._samples = np.array(samples).reshape(samples_shape)
@property
def values(self):
"""
:return: the list of of RandomVariates
"""
return self._values
@property
@transform
def samples(self):
"""
:return: the transformed raw samples
"""
return self._samples
@property
@transform
def average(self):
"""
:return: the transformed average
"""
return self._average
@property
@transform
def median(self):
"""
:return: the transformed median
"""
return self._median
@property
@transform
def upper_error(self):
"""
:return: the transformed upper error
"""
return self._upper_error
@property
@transform
def lower_error(self):
"""
:return: the transformed lower error
"""
return self._lower_error
def __add__(self, other):
"""
:param other:
:return:
"""
assert other._out_shape == self._out_shape, 'cannot sum together arrays with different shapes!'
# this will get the value container for the other values
other_values = other.values
summed_values = [v+vo for v,vo in zip(self._values, other_values)]
return VariatesContainer(summed_values, self._out_shape, self._cl, self._transform, self._equal_tailed)
def __radd__(self, other):
if other == 0:
return self
else:
other_values = other.values
summed_values = [v + vo for v, vo in zip(self._values, other_values)]
return VariatesContainer(summed_values, self._out_shape, self._cl, self._transform, self._equal_tailed)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.stateless_random_ops import Algorithm
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# A seed for random ops (stateful and stateless) will always be 1024
# bits, all of which will be sent to the C++ code. The actual C++
# implementation of some algorithms may only use a lower part of the bits.
UINT64_HALF_SPAN = 2**63
MAX_INT64 = UINT64_HALF_SPAN - 1
MIN_INT64 = -UINT64_HALF_SPAN
UINT64_SPAN = UINT64_HALF_SPAN * 2
# 'Variable' doesn't support uint32 or uint64 yet (due to reasons explained in
# b/111604096 and cl/171681867), so I use signed int here. I choose int64
# instead of int32 here because `VarHandleOp` doesn't support int32 on GPU.
SEED_TYPE = "int64"
SEED_MIN = MIN_INT64
SEED_MAX = MAX_INT64
SEED_UINT_SPAN = UINT64_SPAN
SEED_TYPE_BITS = 64
SEED_BIT_MASK = 0xFFFFFFFFFFFFFFFF
SEED_SIZE = 16 # in units of SEED_TYPE
STATE_TYPE = SEED_TYPE
ALGORITHM_TYPE = STATE_TYPE
PHILOX_STATE_SIZE = 3
THREEFRY_STATE_SIZE = 2
RNG_ALG_PHILOX = Algorithm.PHILOX.value
RNG_ALG_THREEFRY = Algorithm.THREEFRY.value
DEFAULT_ALGORITHM = RNG_ALG_PHILOX
def non_deterministic_ints(shape, dtype=dtypes.int64):
"""Non-deterministically generates some integers.
This op may use some OS-provided source of non-determinism (e.g. an RNG), so
each execution will give different results.
Args:
shape: the shape of the result.
dtype: (optional) the dtype of the result.
Returns:
a tensor whose element values are non-deterministically chosen.
"""
return gen_stateful_random_ops.non_deterministic_ints(
shape=shape, dtype=dtype)
def _uint_to_int(n):
if isinstance(n, int) and n > SEED_MAX:
n = n - SEED_UINT_SPAN
return n
def _make_1d_state(state_size, seed):
"""Makes a 1-D RNG state.
Args:
state_size: an integer.
seed: an integer or 1-D tensor.
Returns:
a 1-D tensor of shape [state_size] and dtype STATE_TYPE.
"""
if isinstance(seed, six.integer_types):
# chop the Python integer (infinite precision) into chunks of SEED_TYPE
ls = []
for _ in range(state_size):
ls.append(seed & SEED_BIT_MASK)
seed >>= SEED_TYPE_BITS
seed = ls
# to avoid overflow error from ops.convert_to_tensor
seed = nest.map_structure(_uint_to_int, seed)
seed = math_ops.cast(seed, STATE_TYPE)
seed = array_ops.reshape(seed, [-1])
seed = seed[0:state_size]
# Padding with zeros on the *left* if too short. Padding on the right would
# cause a small seed to be used as the "counter" while the "key" is always
# zero (for counter-based RNG algorithms), because in the current memory
# layout counter is stored before key. In such a situation two RNGs with
# two different small seeds may generate overlapping outputs.
seed_size = seed.shape[0]
if seed_size is None:
seed_size = array_ops.shape(seed)[0]
padding_size = math_ops.maximum(state_size - seed_size, 0)
padding = array_ops.zeros([padding_size], seed.dtype)
# can't use `pad` because it doesn't support integer dtypes on GPU
seed = array_ops.concat([padding, seed], axis=0)
seed.set_shape([state_size])
return seed
def _get_counter_size(alg):
if alg == RNG_ALG_PHILOX:
return 2
elif alg == RNG_ALG_THREEFRY:
return 1
else:
raise ValueError(
f"Argument `alg` got unsupported value {alg}. Supported values are "
f"{RNG_ALG_PHILOX} for the Philox algorithm and {RNG_ALG_THREEFRY} for "
f"the ThreeFry algorithm.")
def _get_state_size(alg):
if alg == RNG_ALG_PHILOX:
return PHILOX_STATE_SIZE
elif alg == RNG_ALG_THREEFRY:
return THREEFRY_STATE_SIZE
else:
raise ValueError(
f"Argument `alg` got unsupported value {alg}. Supported values are "
f"{RNG_ALG_PHILOX} for the Philox algorithm and {RNG_ALG_THREEFRY} for "
f"the ThreeFry algorithm.")
def _check_state_shape(shape, alg):
if isinstance(alg, ops.Tensor) and not context.executing_eagerly():
return
shape.assert_is_compatible_with([_get_state_size(int(alg))])
def _make_state_from_seed(seed, alg):
return _make_1d_state(_get_state_size(alg), seed)
@tf_export("random.create_rng_state", "random.experimental.create_rng_state")
def create_rng_state(seed, alg):
"""Creates a RNG state from an integer or a vector.
Example:
>>> tf.random.create_rng_state(
... 1234, "philox")
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([1234, 0, 0])>
>>> tf.random.create_rng_state(
... [12, 34], "threefry")
<tf.Tensor: shape=(2,), dtype=int64, numpy=array([12, 34])>
Args:
seed: an integer or 1-D numpy array.
alg: the RNG algorithm. Can be a string, an `Algorithm` or an integer.
Returns:
a 1-D numpy array whose size depends on the algorithm.
"""
alg = stateless_random_ops.convert_alg_to_int(alg)
return _make_state_from_seed(seed, alg)
def _shape_tensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int64 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int64
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
def _convert_to_state_tensor(t):
# to avoid out-of-range error from ops.convert_to_tensor
t = nest.map_structure(_uint_to_int, t)
return math_ops.cast(t, STATE_TYPE)
def get_replica_id():
rctx = ds_context.get_replica_context()
if rctx is None:
return None
return rctx.replica_id_in_sync_group
@tf_export("random.Generator", "random.experimental.Generator")
class Generator(tracking.AutoTrackable):
"""Random-number generator.
Example:
Creating a generator from a seed:
>>> g = tf.random.Generator.from_seed(1234)
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[ 0.9356609 , 1.0854305 , -0.93788373],
[-0.5061547 , 1.3169702 , 0.7137579 ]], dtype=float32)>
Creating a generator from a non-deterministic state:
>>> g = tf.random.Generator.from_non_deterministic_state()
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...>
All the constructors allow explicitly choosing an Random-Number-Generation
(RNG) algorithm. Supported algorithms are `"philox"` and `"threefry"`. For
example:
>>> g = tf.random.Generator.from_seed(123, alg="philox")
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[ 0.8673864 , -0.29899067, -0.9310337 ],
[-1.5828488 , 1.2481191 , -0.6770643 ]], dtype=float32)>
CPU, GPU and TPU with the same algorithm and seed will generate the same
integer random numbers. Float-point results (such as the output of `normal`)
may have small numerical discrepancies between different devices.
This class uses a `tf.Variable` to manage its internal state. Every time
random numbers are generated, the state of the generator will change. For
example:
>>> g = tf.random.Generator.from_seed(1234)
>>> g.state
<tf.Variable ... numpy=array([1234, 0, 0])>
>>> g.normal(shape=(2, 3))
<...>
>>> g.state
<tf.Variable ... numpy=array([2770, 0, 0])>
The shape of the state is algorithm-specific.
There is also a global generator:
>>> g = tf.random.get_global_generator()
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...>
When creating a generator inside a `tf.distribute.Strategy` scope, each
replica will get a different stream of random numbers.
For example, in this code:
```
strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"])
with strat.scope():
g = tf.random.Generator.from_seed(1)
def f():
return g.normal([])
results = strat.run(f).values
```
`results[0]` and `results[1]` will have different values.
If the generator is seeded (e.g. created via `Generator.from_seed`), the
random numbers will be determined by the seed, even though different replicas
get different numbers. One can think of a random number generated on a
replica as a hash of the replica ID and a "master" random number that may be
common to all replicas. Hence, the whole system is still deterministic.
(Note that the random numbers on different replicas are not correlated, even
if they are deterministically determined by the same seed. They are not
correlated in the sense that no matter what statistics one calculates on them,
there won't be any discernable correlation.)
Generators can be freely saved and restored using `tf.train.Checkpoint`. The
checkpoint can be restored in a distribution strategy with a different number
of replicas than the original strategy. If a replica ID is present in both the
original and the new distribution strategy, its state will be properly
restored (i.e. the random-number stream from the restored point will be the
same as that from the saving point) unless the replicas have already diverged
in their RNG call traces before saving (e.g. one replica has made one RNG call
while another has made two RNG calls). We don't have such guarantee if the
generator is saved in a strategy scope and restored outside of any strategy
scope, or vice versa.
When a generator is created within the scope of
`tf.distribute.experimental.ParameterServerStrategy`, the workers
will share the generator's state (placed on one of the parameter
servers). In this way the workers will still get different
random-number streams, as stated above. (This is similar to replicas
in a `tf.distribute.MirroredStrategy` sequentially accessing a
generator created outside the strategy.) Each RNG call on a worker
will incur a round-trip to a parameter server, which may have
performance impacts. When creating a
`tf.distribute.experimental.ParameterServerStrategy`, please make
sure that the `variable_partitioner` argument won't shard small
variables of shape `[2]` or `[3]` (because generator states must not
be sharded). Ways to avoid sharding small variables include setting
`variable_partitioner` to `None` or to
`tf.distribute.experimental.partitioners.MinSizePartitioner` with a
large enough `min_shard_bytes` (see
`tf.distribute.experimental.ParameterServerStrategy`'s documentation
for more details).
"""
@classmethod
def from_state(cls, state, alg):
"""Creates a generator from a state.
See `__init__` for description of `state` and `alg`.
Args:
state: the new state.
alg: the RNG algorithm.
Returns:
The new generator.
"""
return cls(alg=alg, state=state)
@classmethod
def from_seed(cls, seed, alg=None):
"""Creates a generator from a seed.
A seed is a 1024-bit unsigned integer represented either as a Python
integer or a vector of integers. Seeds shorter than 1024-bit will be
padded. The padding, the internal structure of a seed and the way a seed
is converted to a state are all opaque (unspecified). The only semantics
specification of seeds is that two different seeds are likely to produce
two independent generators (but no guarantee).
Args:
seed: the seed for the RNG.
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
if alg is None:
# TODO(b/170668986): more sophisticated algorithm selection
alg = DEFAULT_ALGORITHM
alg = stateless_random_ops.convert_alg_to_int(alg)
state = create_rng_state(seed, alg)
return cls(state=state, alg=alg)
@classmethod
def from_non_deterministic_state(cls, alg=None):
"""Creates a generator by non-deterministically initializing its state.
The source of the non-determinism will be platform- and time-dependent.
Args:
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
if config.deterministic_ops_enabled():
raise RuntimeError('"from_non_deterministic_state" cannot be called when ' # pylint: disable=g-doc-exception
"determinism is enabled.")
if alg is None:
# TODO(b/170668986): more sophisticated algorithm selection
alg = DEFAULT_ALGORITHM
alg = stateless_random_ops.convert_alg_to_int(alg)
state = non_deterministic_ints(shape=[_get_state_size(alg)],
dtype=SEED_TYPE)
return cls(state=state, alg=alg)
@classmethod
def from_key_counter(cls, key, counter, alg):
"""Creates a generator from a key and a counter.
This constructor only applies if the algorithm is a counter-based algorithm.
See method `key` for the meaning of "key" and "counter".
Args:
key: the key for the RNG, a scalar of type STATE_TYPE.
counter: a vector of dtype STATE_TYPE representing the initial counter for
the RNG, whose length is algorithm-specific.,
alg: the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
counter = _convert_to_state_tensor(counter)
key = _convert_to_state_tensor(key)
alg = stateless_random_ops.convert_alg_to_int(alg)
counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1])
key.shape.assert_is_compatible_with([])
key = array_ops.reshape(key, [1])
state = array_ops.concat([counter, key], 0)
return cls(state=state, alg=alg)
def __init__(self, copy_from=None, state=None, alg=None):
"""Creates a generator.
The new generator will be initialized by one of the following ways, with
decreasing precedence:
(1) If `copy_from` is not None, the new generator is initialized by copying
information from another generator.
(2) If `state` and `alg` are not None (they must be set together), the new
generator is initialized by a state.
Args:
copy_from: a generator to be copied from.
state: a vector of dtype STATE_TYPE representing the initial state of the
RNG, whose length and semantics are algorithm-specific. If it's a
variable, the generator will reuse it instead of creating a new
variable.
alg: the RNG algorithm. Possible values are
`tf.random.Algorithm.PHILOX` for the Philox algorithm and
`tf.random.Algorithm.THREEFRY` for the ThreeFry algorithm
(see paper 'Parallel Random Numbers: As Easy as 1, 2, 3'
[https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]).
The string names `"philox"` and `"threefry"` can also be used.
Note `PHILOX` guarantees the same numbers are produced (given
the same random state) across all architectures (CPU, GPU, XLA etc).
"""
# TODO(b/175072242): Remove distribution-strategy dependencies in this file.
if ds_context.has_strategy():
self._distribution_strategy = ds_context.get_strategy()
else:
self._distribution_strategy = None
if copy_from is not None:
# All other arguments should be None
assert (alg or state) is None
self._state_var = self._create_variable(copy_from.state, dtype=STATE_TYPE,
trainable=False)
self._alg = copy_from.algorithm
else:
assert alg is not None and state is not None
alg = stateless_random_ops.convert_alg_to_int(alg)
if isinstance(state, variables.Variable):
_check_state_shape(state.shape, alg)
self._state_var = state
else:
state = _convert_to_state_tensor(state)
_check_state_shape(state.shape, alg)
self._state_var = self._create_variable(state, dtype=STATE_TYPE,
trainable=False)
self._alg = alg
def _create_variable(self, *args, **kwargs):
"""Creates a variable.
Args:
*args: positional arguments passed along to `variables.Variable.
**kwargs: keyword arguments passed along to `variables.Variable.
Returns:
The created variable.
"""
v = variables.Variable(*args, **kwargs)
if isinstance(v, sharded_variable.ShardedVariable):
# RNG state is an atomic entity representing a 128-bit or
# 192-bit value, so it mustn't be sharded.
raise ValueError(
"tf.random.Generator state is sharded, which is not allowed. When "
"creating a tf.distribute.experimental.ParameterServerStrategy, "
"please make sure that the `variable_partitioner` "
"argument won't shard a "
"small variable of shape [2] or [3]. Ways to avoid sharding small "
"variables include setting `variable_partitioner` to None or to "
"tf.distribute.experimental.partitioners.MinSizePartitioner with a "
"large enough `min_shard_bytes`.")
return v
def reset(self, state):
"""Resets the generator by a new state.
See `__init__` for the meaning of "state".
Args:
state: the new state.
"""
state = _convert_to_state_tensor(state)
state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])
self._state_var.assign(state)
def reset_from_seed(self, seed):
"""Resets the generator by a new seed.
See `from_seed` for the meaning of "seed".
Args:
seed: the new seed.
"""
state = create_rng_state(seed, self.algorithm)
self._state_var.assign(state)
def reset_from_key_counter(self, key, counter):
"""Resets the generator by a new key-counter pair.
See `from_key_counter` for the meaning of "key" and "counter".
Args:
key: the new key.
counter: the new counter.
"""
counter = _convert_to_state_tensor(counter)
key = _convert_to_state_tensor(key)
counter.shape.assert_is_compatible_with(
[_get_state_size(self.algorithm) - 1])
key.shape.assert_is_compatible_with([])
key = array_ops.reshape(key, [1])
state = array_ops.concat([counter, key], 0)
self._state_var.assign(state)
@property
def state(self):
"""The internal state of the RNG."""
return self._state_var
@property
def algorithm(self):
"""The RNG algorithm id (a Python integer or scalar integer Tensor)."""
return self._alg
def _standard_normal(self, shape, dtype):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_normal_v2(
shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)
@property
def key(self):
"""The 'key' part of the state of a counter-based RNG.
For a counter-base RNG algorithm such as Philox and ThreeFry (as
described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3'
[https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]),
the RNG state consists of two parts: counter and key. The output is
generated via the formula: output=hash(key, counter), i.e. a hashing of
the counter parametrized by the key. Two RNGs with two different keys can
be thought as generating two independent random-number streams (a stream
is formed by increasing the counter).
Returns:
A scalar which is the 'key' part of the state, if the RNG algorithm is
counter-based; otherwise it raises a ValueError.
"""
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
return self._state_var[-1]
else:
raise ValueError(
f"This generator uses an unsupported algorithm {alg}. Supported "
f"values are {RNG_ALG_PHILOX} for the Philox algorithm and "
f"{RNG_ALG_THREEFRY} for the ThreeFry algorithm.")
def _skip_single_var(self, var, delta):
# TODO(wangpeng): Cache the cast algorithm instead of casting everytime.
return gen_stateful_random_ops.rng_read_and_skip(
var.handle,
alg=math_ops.cast(self.algorithm, dtypes.int32),
delta=math_ops.cast(delta, dtypes.uint64))
def skip(self, delta):
"""Advance the counter of a counter-based RNG.
Args:
delta: the amount of advancement. The state of the RNG after
`skip(n)` will be the same as that after `normal([n])`
(or any other distribution). The actual increment added to the
counter is an unspecified implementation detail.
Returns:
A `Tensor` of type `int64`.
"""
def update_fn(v):
return self._skip_single_var(v, delta)
# TODO(b/170515001): Always call strategy.extended.update after calling it
# from both replica context and cross-replica context is supported.
if values_util.is_saving_non_distributed():
# Assumes replica context with replica_id=0, since we only save the first
# replica.
return update_fn(self.state)
if self._distribution_strategy is not None:
with ds_context.enter_or_assert_strategy(self._distribution_strategy):
if ds_context.in_cross_replica_context():
# Code that operates on all replicas of a variable cannot be saved
# without retracing.
values_util.mark_as_unsaveable()
if (ds_context.in_cross_replica_context() or
"CentralStorage" in type(self._distribution_strategy).__name__):
# In cross-replica context we need to use strategy.extended.update.
# In CentralStorageStrategy we also need to use
# strategy.extended.update (even for replica context),
# because variable updates here must be within merge_call.
return ds_context.get_strategy().extended.update(
self.state, update_fn)
return update_fn(self.state)
def _preprocess_key(self, key):
if self._distribution_strategy is None:
return key
with ds_context.enter_or_assert_strategy(self._distribution_strategy):
replica_id = get_replica_id()
if replica_id is not None:
replica_id = array_ops.stack([replica_id, 0], axis=0)
replica_id = math_ops.cast(replica_id, dtypes.uint64)
# Conceptually: key = hash(key, replica_id)
key = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(
shape=[1], key=key, counter=replica_id, dtype=dtypes.uint64,
alg=self.algorithm)
return key
def _prepare_key_counter(self, shape):
delta = math_ops.reduce_prod(shape)
counter_key = self.skip(delta)
counter_size = _get_counter_size(self.algorithm)
counter = array_ops.bitcast(counter_key[:counter_size], dtypes.uint64)
key = array_ops.bitcast(counter_key[counter_size:counter_size + 1],
dtypes.uint64)
key = self._preprocess_key(key)
return key, counter
# The following functions return a tensor and as a side effect update
# self._state_var.
def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateful_normal", [shape, mean, stddev]) as name:
shape = _shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = self._standard_normal(shape, dtype=dtype)
return math_ops.add(rnd * stddev, mean, name=name)
def _truncated_normal(self, shape, dtype):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_truncated_normal_v2(
shape=shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)
def truncated_normal(self, shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than
2 standard deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal
values.
"""
with ops.name_scope(
name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _shape_tensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = self._truncated_normal(shape_tensor, dtype=dtype)
mul = rnd * stddev_tensor
return math_ops.add(mul, mean_tensor, name=name)
def _uniform(self, shape, dtype):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_uniform_v2(
shape=shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)
def _uniform_full_int(self, shape, dtype, name=None):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(
shape=shape,
key=key,
counter=counter,
dtype=dtype,
alg=self.algorithm,
name=name)
def uniform(self, shape, minval=0, maxval=None,
dtype=dtypes.float32, name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded. (For float numbers especially
low-precision types like bfloat16, because of
rounding, the result may sometimes include `maxval`.)
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
For full-range random integers, pass `minval=None` and `maxval=None` with an
integer `dtype` (for integer dtypes, `minval` and `maxval` must be both
`None` or both not `None`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
minval: A Tensor or Python value of type `dtype`, broadcastable with
`shape` (for integer types, broadcasting is not supported, so it needs
to be a scalar). The lower bound (included) on the range of random
values to generate. Pass `None` for full-range integers. Defaults to 0.
maxval: A Tensor or Python value of type `dtype`, broadcastable with
`shape` (for integer types, broadcasting is not supported, so it needs
to be a scalar). The upper bound (excluded) on the range of random
values to generate. Pass `None` for full-range integers. Defaults to 1
if `dtype` is floating point.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype.is_integer:
if (minval is None) != (maxval is None):
raise ValueError("For integer dtype {}, minval and maxval must be both "
"`None` or both non-`None`; got minval={} and "
"maxval={}".format(dtype, minval, maxval))
elif maxval is None:
maxval = 1
with ops.name_scope(name, "stateful_uniform",
[shape, minval, maxval]) as name:
shape = _shape_tensor(shape)
if dtype.is_integer and minval is None:
return self._uniform_full_int(shape=shape, dtype=dtype, name=name)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_uniform_int_v2(
shape=shape,
key=key,
counter=counter,
minval=minval,
maxval=maxval,
alg=self.algorithm,
name=name)
else:
rnd = self._uniform(shape=shape, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None):
"""Uniform distribution on an integer type's entire range.
This method is the same as setting `minval` and `maxval` to `None` in the
`uniform` method.
Args:
shape: the shape of the output.
dtype: (optional) the integer type, default to uint64.
name: (optional) the name of the node.
Returns:
A tensor of random numbers of the required shape.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope(name, "stateful_uniform_full_int",
[shape]) as name:
shape = _shape_tensor(shape)
return self._uniform_full_int(shape=shape, dtype=dtype, name=name)
def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None):
"""Outputs random values from a binomial distribution.
The generated values follow a binomial distribution with specified count and
probability of success parameters.
Example:
```python
counts = [10., 20.]
# Probability of success.
probs = [0.8]
rng = tf.random.Generator.from_seed(seed=234)
binomial_samples = rng.binomial(shape=[2], counts=counts, probs=probs)
counts = ... # Shape [3, 1, 2]
probs = ... # Shape [1, 4, 2]
shape = [3, 4, 3, 4, 2]
rng = tf.random.Generator.from_seed(seed=1717)
# Sample shape will be [3, 4, 3, 4, 2]
binomial_samples = rng.binomial(shape=shape, counts=counts, probs=probs)
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
counts: Tensor. The counts of the binomial distribution. Must be
broadcastable with `probs`, and broadcastable with the rightmost
dimensions of `shape`.
probs: Tensor. The probability of success for the
binomial distribution. Must be broadcastable with `counts` and
broadcastable with the rightmost dimensions of `shape`.
dtype: The type of the output. Default: tf.int32
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random binomial
values. For each i, each samples[i, ...] is an independent draw from
the binomial distribution on counts[i] trials with probability of
success probs[i].
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope(name, "binomial", [shape, counts, probs]) as name:
counts = ops.convert_to_tensor(counts, name="counts")
probs = ops.convert_to_tensor(probs, name="probs")
shape_tensor = _shape_tensor(shape)
return gen_stateful_random_ops.stateful_random_binomial(
self.state.handle,
self.algorithm,
shape=shape_tensor,
counts=counts,
probs=probs,
dtype=dtype,
name=name)
# TODO(wangpeng): implement other distributions
def _make_int64_keys(self, shape=()):
# New independent keys are generated via
# `new_key[i] = hash(old_key, counter+i)`, which is exactly what
# `uniform_full_int(dtype=int64)` does for PhiloxRandom_64_128_128 and
# ThreeFry_64_64_64.
return self.uniform_full_int(shape=shape, dtype=dtypes.int64)
def make_seeds(self, count=1):
"""Generates seeds for stateless random ops.
For example:
```python
seeds = get_global_generator().make_seeds(count=10)
for i in range(10):
seed = seeds[:, i]
numbers = stateless_random_normal(shape=[2, 3], seed=seed)
...
```
Args:
count: the number of seed pairs (note that stateless random ops need a
pair of seeds to invoke).
Returns:
A tensor of shape [2, count] and dtype int64.
"""
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
keys = self._make_int64_keys(shape=[count])
# The two seeds for stateless random ops don't have individual semantics
# and are scrambled together, so setting one to zero is fine.
zeros = array_ops.zeros_like(keys)
return array_ops.stack([keys, zeros])
else:
raise ValueError(
f"This generator uses an unsupported algorithm {alg}. Supported "
f"values are {RNG_ALG_PHILOX} for the Philox algorithm and "
f"{RNG_ALG_THREEFRY} for the ThreeFry algorithm.")
def split(self, count=1):
"""Returns a list of independent `Generator` objects.
Two generators are independent of each other in the sense that the
random-number streams they generate don't have statistically detectable
correlations. The new generators are also independent of the old one.
The old generator's state will be changed (like other random-number
generating methods), so two calls of `split` will return different
new generators.
For example:
```python
gens = get_global_generator().split(count=10)
for gen in gens:
numbers = gen.normal(shape=[2, 3])
# ...
gens2 = get_global_generator().split(count=10)
# gens2 will be different from gens
```
The new generators will be put on the current device (possible different
from the old generator's), for example:
```python
with tf.device("/device:CPU:0"):
gen = Generator(seed=1234) # gen is on CPU
with tf.device("/device:GPU:0"):
gens = gen.split(count=10) # gens are on GPU
```
Args:
count: the number of generators to return.
Returns:
A list (length `count`) of `Generator` objects independent of each other.
The new generators have the same RNG algorithm as the old one.
"""
def _key_to_state(alg, key):
# Padding with zeros on the left. The zeros will be the counter.
return [0] * (_get_state_size(alg) - 1) + [key]
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
keys = self._make_int64_keys(shape=[count])
return [Generator(state=_key_to_state(alg, key), alg=alg)
for key in array_ops.unstack(keys, num=count)]
else:
raise ValueError(
f"This generator uses an unsupported algorithm {alg}. Supported "
f"values are {RNG_ALG_PHILOX} for the Philox algorithm and "
f"{RNG_ALG_THREEFRY} for the ThreeFry algorithm.")
# It's not safe to create TF ops before `init_google` is called, so this is
# initialized to None and get a value the first time `get_global_generator` is
# called.
global_generator = None
@tf_export("random.get_global_generator",
"random.experimental.get_global_generator")
def get_global_generator():
"""Retrieves the global generator.
This function will create the global generator the first time it is called,
and the generator will be placed at the default device at that time, so one
needs to be careful when this function is first called. Using a generator
placed on a less-ideal device will incur performance regression.
Returns:
The global `tf.random.Generator` object.
"""
global global_generator
if global_generator is None:
if config.deterministic_ops_enabled():
raise RuntimeError('"get_global_generator" cannot be called if ' # pylint: disable=g-doc-exception
"determinism is enabled, unless "
'"set_global_generator" has already been called. '
'Please call "set_global_generator" first.')
with ops.init_scope():
global_generator = Generator.from_non_deterministic_state()
return global_generator
@tf_export("random.set_global_generator",
"random.experimental.set_global_generator")
def set_global_generator(generator):
"""Replaces the global generator with another `Generator` object.
This function creates a new Generator object (and the Variable object within),
which does not work well with tf.function because (1) tf.function puts
restrictions on Variable creation thus reset_global_generator can't be freely
used inside tf.function; (2) redirecting a global variable to
a new object is problematic with tf.function because the old object may be
captured by a 'tf.function'ed function and still be used by it.
A 'tf.function'ed function only keeps weak references to variables,
so deleting a variable and then calling that function again may raise an
error, as demonstrated by
random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun .
Args:
generator: the new `Generator` object.
"""
global global_generator
global_generator = generator
|
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given the output of -t commands from a ninja build for a gyp and GN generated
build, report on differences between the command lines."""
import os
import shlex
import subprocess
import sys
# Must be in v8/.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(BASE_DIR)
g_total_differences = 0
def FindAndRemoveArgWithValue(command_line, argname):
"""Given a command line as a list, remove and return the value of an option
that takes a value as a separate entry.
Modifies |command_line| in place.
"""
if argname not in command_line:
return ''
location = command_line.index(argname)
value = command_line[location + 1]
command_line[location:location + 2] = []
return value
def MergeSpacedArgs(command_line, argname):
"""Combine all arguments |argname| with their values, separated by a space."""
i = 0
result = []
while i < len(command_line):
arg = command_line[i]
if arg == argname:
result.append(arg + ' ' + command_line[i + 1])
i += 1
else:
result.append(arg)
i += 1
return result
def NormalizeSymbolArguments(command_line):
"""Normalize -g arguments.
If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
Modifies |command_line| in place.
"""
# Strip -g0 if there's no symbols.
have_some_symbols = False
for x in command_line:
if x.startswith('-g') and x != '-g0':
have_some_symbols = True
if not have_some_symbols and '-g0' in command_line:
command_line.remove('-g0')
# Rename -g2 to -g.
if '-g2' in command_line:
command_line[command_line.index('-g2')] = '-g'
def GetFlags(lines, build_dir):
"""Turn a list of command lines into a semi-structured dict."""
is_win = sys.platform == 'win32'
flags_by_output = {}
for line in lines:
command_line = shlex.split(line.strip(), posix=not is_win)[1:]
output_name = FindAndRemoveArgWithValue(command_line, '-o')
dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
NormalizeSymbolArguments(command_line)
command_line = MergeSpacedArgs(command_line, '-Xclang')
cc_file = [x for x in command_line if x.endswith('.cc') or
x.endswith('.c') or
x.endswith('.cpp')]
if len(cc_file) != 1:
print 'Skipping %s' % command_line
continue
assert len(cc_file) == 1
if is_win:
rsp_file = [x for x in command_line if x.endswith('.rsp')]
assert len(rsp_file) <= 1
if rsp_file:
rsp_file = os.path.join(build_dir, rsp_file[0][1:])
with open(rsp_file, "r") as open_rsp_file:
command_line = shlex.split(open_rsp_file, posix=False)
defines = [x for x in command_line if x.startswith('-D')]
include_dirs = [x for x in command_line if x.startswith('-I')]
dash_f = [x for x in command_line if x.startswith('-f')]
warnings = \
[x for x in command_line if x.startswith('/wd' if is_win else '-W')]
others = [x for x in command_line if x not in defines and \
x not in include_dirs and \
x not in dash_f and \
x not in warnings and \
x not in cc_file]
for index, value in enumerate(include_dirs):
if value == '-Igen':
continue
path = value[2:]
if not os.path.isabs(path):
path = os.path.join(build_dir, path)
include_dirs[index] = '-I' + os.path.normpath(path)
# GYP supports paths above the source root like <(DEPTH)/../foo while such
# paths are unsupported by gn. But gn allows to use system-absolute paths
# instead (paths that start with single '/'). Normalize all paths.
cc_file = [os.path.normpath(os.path.join(build_dir, cc_file[0]))]
# Filter for libFindBadConstructs.so having a relative path in one and
# absolute path in the other.
others_filtered = []
for x in others:
if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
others_filtered.append(
'-Xclang ' +
os.path.join(os.getcwd(),
os.path.normpath(
os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
elif x.startswith('-B'):
others_filtered.append(
'-B' +
os.path.join(os.getcwd(),
os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
else:
others_filtered.append(x)
others = others_filtered
flags_by_output[cc_file[0]] = {
'output': output_name,
'depname': dep_name,
'defines': sorted(defines),
'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
'dash_f': sorted(dash_f),
'warnings': sorted(warnings),
'other': sorted(others),
}
return flags_by_output
def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
"""Return a report of any differences between gyp and gn lists, ignoring
anything in |dont_care_{gyp|gn}| respectively."""
global g_total_differences
if not dont_care_gyp:
dont_care_gyp = []
if not dont_care_gn:
dont_care_gn = []
output = ''
if gyp[name] != gn[name]:
gyp_set = set(gyp[name])
gn_set = set(gn[name])
missing_in_gyp = gyp_set - gn_set
missing_in_gn = gn_set - gyp_set
missing_in_gyp -= set(dont_care_gyp)
missing_in_gn -= set(dont_care_gn)
if missing_in_gyp or missing_in_gn:
output += ' %s differ:\n' % name
if missing_in_gyp:
output += ' In gyp, but not in GN:\n %s' % '\n '.join(
sorted(missing_in_gyp)) + '\n'
g_total_differences += len(missing_in_gyp)
if missing_in_gn:
output += ' In GN, but not in gyp:\n %s' % '\n '.join(
sorted(missing_in_gn)) + '\n\n'
g_total_differences += len(missing_in_gn)
return output
def Run(command_line):
"""Run |command_line| as a subprocess and return stdout. Raises on error."""
return subprocess.check_output(command_line, shell=True)
def main():
if len(sys.argv) < 4:
print ('usage: %s gn_outdir gyp_outdir gn_target '
'[gyp_target1, gyp_target2, ...]' % __file__)
return 1
if len(sys.argv) == 4:
sys.argv.append(sys.argv[3])
gn_out_dir = sys.argv[1]
print >> sys.stderr, 'Expecting gn outdir in %s...' % gn_out_dir
gn = Run('ninja -C %s -t commands %s' % (gn_out_dir, sys.argv[3]))
if sys.platform == 'win32':
# On Windows flags are stored in .rsp files which are created during build.
print >> sys.stderr, 'Building in %s...' % gn_out_dir
Run('ninja -C %s -d keeprsp %s' % (gn_out_dir, sys.argv[3]))
gyp_out_dir = sys.argv[2]
print >> sys.stderr, 'Expecting gyp outdir in %s...' % gyp_out_dir
gyp = Run('ninja -C %s -t commands %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
if sys.platform == 'win32':
# On Windows flags are stored in .rsp files which are created during build.
print >> sys.stderr, 'Building in %s...' % gyp_out_dir
Run('ninja -C %s -d keeprsp %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
all_gyp_flags = GetFlags(gyp.splitlines(),
os.path.join(os.getcwd(), gyp_out_dir))
all_gn_flags = GetFlags(gn.splitlines(),
os.path.join(os.getcwd(), gn_out_dir))
gyp_files = set(all_gyp_flags.keys())
gn_files = set(all_gn_flags.keys())
different_source_list = gyp_files != gn_files
if different_source_list:
print 'Different set of sources files:'
print ' In gyp, not in GN:\n %s' % '\n '.join(
sorted(gyp_files - gn_files))
print ' In GN, not in gyp:\n %s' % '\n '.join(
sorted(gn_files - gyp_files))
print '\nNote that flags will only be compared for files in both sets.\n'
file_list = gyp_files & gn_files
files_with_given_differences = {}
for filename in sorted(file_list):
gyp_flags = all_gyp_flags[filename]
gn_flags = all_gn_flags[filename]
differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
differences += CompareLists(gyp_flags, gn_flags, 'defines')
differences += CompareLists(gyp_flags, gn_flags, 'include_dirs',
['-I%s' % os.path.dirname(BASE_DIR)])
differences += CompareLists(gyp_flags, gn_flags, 'warnings',
# More conservative warnings in GN we consider to be OK.
dont_care_gyp=[
'/wd4091', # 'keyword' : ignored on left of 'type' when no variable
# is declared.
'/wd4456', # Declaration hides previous local declaration.
'/wd4457', # Declaration hides function parameter.
'/wd4458', # Declaration hides class member.
'/wd4459', # Declaration hides global declaration.
'/wd4702', # Unreachable code.
'/wd4800', # Forcing value to bool 'true' or 'false'.
'/wd4838', # Conversion from 'type' to 'type' requires a narrowing
# conversion.
] if sys.platform == 'win32' else None,
dont_care_gn=[
'-Wendif-labels',
'-Wextra',
'-Wsign-compare',
] if not sys.platform == 'win32' else None)
differences += CompareLists(gyp_flags, gn_flags, 'other')
if differences:
files_with_given_differences.setdefault(differences, []).append(filename)
for diff, files in files_with_given_differences.iteritems():
print '\n'.join(sorted(files))
print diff
print 'Total differences:', g_total_differences
# TODO(scottmg): Return failure on difference once we're closer to identical.
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from signbank.dictionary.models import *
from reversion.admin import VersionAdmin
from signbank.settings import server_specific
from signbank.settings.server_specific import FIELDS, SEPARATE_ENGLISH_IDGLOSS_FIELD
from modeltranslation.admin import TranslationAdmin
from guardian.admin import GuardedModelAdmin
from django.contrib.auth import get_permission_codename
from django.contrib import messages
class DatasetAdmin(GuardedModelAdmin):
model = Dataset
list_display = ('name', 'is_public', 'signlanguage',)
class KeywordAdmin(VersionAdmin):
search_fields = ['^text']
class TranslationInline(admin.TabularInline):
model = Translation
extra = 1
raw_id_fields = ['translation']
class RelationToOtherSignInline(admin.TabularInline):
model = Relation
extra = 1
class RelationToForeignSignInline(admin.TabularInline):
model = RelationToForeignSign
extra = 1
# raw_id_fields = ['other_lang_gloss']
class DefinitionInline(admin.TabularInline):
model = Definition
extra = 1
class RelationInline(admin.TabularInline):
model = Relation
fk_name = 'source'
raw_id_fields = ['source', 'target']
verbose_name_plural = "Relations to other Glosses"
extra = 1
class OtherMediaInline(admin.TabularInline):
model = OtherMedia
extra = 1
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin import SimpleListFilter
class SenseNumberListFilter(SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('number of senses')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'senses'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (('none', _('No Senses')),
('morethanone', _('More than one')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Decide how to filter the queryset based on the request
if self.value() == 'none':
return queryset.filter(sense__isnull=True)
if self.value() == 'morethanone':
return queryset.filter(sense__gte=1)
class GlossAdmin(VersionAdmin):
idgloss_fields = ['lemma']
fieldsets = ((None, {'fields': tuple(idgloss_fields)+tuple(FIELDS['main'])+('signlanguage', 'dialect')}, ),
('Publication Status', {'fields': ('inWeb', 'isNew', 'creator','creationDate','alternative_id'),
'classes': ('collapse',)}, ),
('Phonology', {'fields': FIELDS['phonology'], 'classes': ('collapse',)}, ),
('Semantics', {'fields': FIELDS['semantics'], 'classes': ('collapse',)}),
('Frequency', {'fields': FIELDS['frequency'], 'classes': ('collapse',)}),
('Obsolete Fields', {'fields': ('inittext', ), 'classes': ('collapse',)}),
)
save_on_top = True
save_as = True
list_display = ['lemma']
list_display += ['morph', 'sense', 'sn']
search_fields = ['^lemma__lemmaidglosstranslation__text', '=sn']
list_filter = ['signlanguage', 'dialect', SenseNumberListFilter, 'inWeb', 'domhndsh']
inlines = [ RelationInline, RelationToForeignSignInline, DefinitionInline, TranslationInline, OtherMediaInline ]
history_latest_first = True
class HandshapeAdmin(VersionAdmin):
list_display = ['machine_value', 'english_name', 'dutch_name']
class GlossRevisionAdmin(VersionAdmin):
model = GlossRevision
class RegistrationProfileAdmin(admin.ModelAdmin):
list_display = ('__str__', 'activation_key_expired', )
search_fields = ('user__username', 'user__first_name', )
class DialectInline(admin.TabularInline):
model = Dialect
class DialectAdmin(VersionAdmin):
model = Dialect
class SignLanguageAdmin(VersionAdmin):
model = SignLanguage
inlines = [DialectInline]
# Define an inline admin descriptor for UserProfile model
# which acts a bit like a singleton
class UserProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
# Define a new User admin
class UserAdmin(UserAdmin):
inlines = (UserProfileInline, )
class FieldChoiceAdmin(VersionAdmin):
readonly_fields=['machine_value']
actions=['delete_selected']
if hasattr(server_specific, 'SHOW_ENGLISH_ONLY') and server_specific.SHOW_ENGLISH_ONLY:
show_english_only = True
list_display = ['english_name', 'machine_value','field']
else:
list_display = ['english_name', 'dutch_name', 'machine_value', 'field']
show_english_only = False
list_filter = ['field']
def get_form(self, request, obj=None, **kwargs):
if self.show_english_only:
self.exclude = ('dutch_name', 'chinese_name')
form = super(FieldChoiceAdmin, self).get_form(request, obj, **kwargs)
return form
def get_actions(self, request):
actions = super(FieldChoiceAdmin, self).get_actions(request)
if 'delete_selected' in actions:
# for field choices, do not offer delete selected to user
# in order to protect accidently field choice deletion
del actions['delete_selected']
return actions
def get_action_choices(self, request):
# remove the empty choice '---------' from actions
choices = super(FieldChoiceAdmin, self).get_action_choices(request)
choices.pop(0)
return choices
def has_delete_permission(self, request, obj=None):
if not obj:
# print('ADMIN has_delete_permission obj is None')
# just return False if there is no object, prevent arbitrary deletion of field choices
return False
field_value = obj.__dict__.get('field', '')
field_machine_value = obj.__dict__.get('machine_value', 0)
if not field_machine_value:
print('ADMIN has_delete_permission: field ', field_value, ' has an empty machine value')
from signbank.tools import fields_with_choices_glosses, fields_with_choices_handshapes, \
fields_with_choices_definition, fields_with_choices_morphology_definition, \
fields_with_choices_other_media_type, fields_with_choices_morpheme_type
fields_with_choices_glosses = fields_with_choices_glosses()
if field_value in fields_with_choices_glosses.keys():
queries = [Q(**{ field_name : field_machine_value }) for field_name in fields_with_choices_glosses[field_value]]
query = queries.pop()
for item in queries:
query |= item
count_in_use = Gloss.objects.filter(query).count()
return not count_in_use
fields_with_choices_handshapes = fields_with_choices_handshapes()
if field_value in fields_with_choices_handshapes.keys():
queries_h = [Q(**{ field_name : field_machine_value }) for field_name in fields_with_choices_handshapes[field_value]]
query_h = queries_h.pop()
for item in queries_h:
query_h |= item
count_in_use = Handshape.objects.filter(query_h).count()
return not count_in_use
fields_with_choices_definition = fields_with_choices_definition()
if field_value in fields_with_choices_definition.keys():
queries_d = [Q(**{ field_name : field_machine_value }) for field_name in fields_with_choices_definition[field_value]]
query_d = queries_d.pop()
for item in queries_d:
query_d |= item
count_in_use = Definition.objects.filter(query_d).count()
return not count_in_use
fields_with_choices_morphology_definition = fields_with_choices_morphology_definition()
if field_value in fields_with_choices_morphology_definition.keys():
queries_d = [Q(**{ field_name : field_machine_value }) for field_name in fields_with_choices_morphology_definition[field_value]]
query_d = queries_d.pop()
for item in queries_d:
query_d |= item
count_in_use = MorphologyDefinition.objects.filter(query_d).count()
return not count_in_use
fields_with_choices_other_media_type = fields_with_choices_other_media_type()
if field_value in fields_with_choices_other_media_type.keys():
queries_d = [Q(**{ field_name : field_machine_value }) for field_name in fields_with_choices_other_media_type[field_value]]
query_d = queries_d.pop()
for item in queries_d:
query_d |= item
count_in_use = OtherMedia.objects.filter(query_d).count()
return not count_in_use
fields_with_choices_morpheme_type = fields_with_choices_morpheme_type()
if field_value in fields_with_choices_morpheme_type.keys():
queries_d = [Q(**{ field_name : field_machine_value }) for field_name in fields_with_choices_morpheme_type[field_value]]
query_d = queries_d.pop()
for item in queries_d:
query_d |= item
count_in_use = Morpheme.objects.filter(query_d).count()
return not count_in_use
# fall through: the fieldname is not used in Gloss, Handshape, Definition, MorphologyDefinition, OtherMedia, Morpheme
print('ADMIN, field choices, has_delete_permission: fall through on: ', field_value)
opts = self.opts
codename = get_permission_codename('delete', opts)
# note that this delete option only checks whether the user is allowed, not if there are other uses of the field
# this would be the case for fields that are in the model and used by other signbanks
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
if obj is not None and obj.field == 'FingerSelection':
# This is a reserved field, used for displaying the Finger Selection
# Do not allow deletion
# print('ADMIN has_change_permission is False for FingerSelection')
return False
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def delete_selected(self, request, queryset):
# this code is not called anymore for field choices
for obj in queryset:
print('delete_selected not available for field choices, admin command ignored: ', obj)
pass
delete_selected.short_description = "Delete selected field choices"
def save_model(self, request, obj, form, change):
if obj.machine_value == None:
# Check out the query-set and make sure that it exists
qs = FieldChoice.objects.filter(field=obj.field)
if len(qs) == 0:
# The field does not yet occur within FieldChoice
# Future: ask user if that is what he wants (don't know how...)
# For now: assume user wants to add a new field (e.g: wordClass)
# NOTE: start with '2', because 0,1 are already taken by default values
obj.machine_value = 2
else:
# Calculate highest currently occurring value
highest_machine_value = max([field_choice.machine_value for field_choice in qs])
# The automatic machine value we calculate is 1 higher
obj.machine_value= highest_machine_value+1
obj.save()
class LanguageAdmin(TranslationAdmin):
pass
class LemmaIdglossAdmin(VersionAdmin):
pass
class LemmaIdglossTranslationAdmin(VersionAdmin):
pass
admin.site.register(Dialect, DialectAdmin)
admin.site.register(SignLanguage, SignLanguageAdmin)
admin.site.register(Gloss, GlossAdmin)
admin.site.register(Morpheme, GlossAdmin)
admin.site.register(Keyword, KeywordAdmin)
admin.site.register(FieldChoice,FieldChoiceAdmin)
admin.site.register(MorphologyDefinition)
admin.site.register(SimultaneousMorphologyDefinition)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Handshape, HandshapeAdmin)
admin.site.register(GlossRevision,GlossRevisionAdmin)
admin.site.register(UserProfile)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Dataset, DatasetAdmin)
admin.site.register(LemmaIdgloss, LemmaIdglossAdmin)
admin.site.register(LemmaIdglossTranslation, LemmaIdglossTranslationAdmin)
|
|
import six
from adminsortable2.admin import SortableInlineAdminMixin
from django.contrib import admin
from django.db.models import Count
from django.utils.safestring import mark_safe
from icekit.admin_tools.polymorphic import \
ChildModelPluginPolymorphicParentModelAdmin
import models
from icekit.content_collections.admin import TitleSlugAdmin
from icekit.plugins.base import BaseChildModelPlugin, PluginMount
from icekit.admin import ICEkitContentsAdmin
from icekit.templatetags.icekit_tags import grammatical_join
from icekit.admin_tools.mixins import ThumbnailAdminMixin, FluentLayoutsMixin
from icekit.admin_tools.utils import admin_link, admin_url
from polymorphic.admin import PolymorphicChildModelAdmin, PolymorphicChildModelFilter
from glamkit_collections.models import Country
class WorkCreatorsInlineForCreators(admin.TabularInline, ThumbnailAdminMixin):
model = models.WorkCreator
raw_id_fields = ('work',)
extra = 1
exclude = ('order',) # doing this prevents sorting from being edited in this context.
readonly_fields = (
'link',
)
def link(self, inst):
thumb_html = self.thumbnail(inst.work)
if thumb_html:
return '<a href="{0}">{1}</a>'.format(
admin_url(inst.work),
thumb_html,
)
else:
return admin_link(inst.work)
link.allow_tags = True
def get_queryset(self, request):
return super(WorkCreatorsInlineForCreators, self) \
.get_queryset(request) \
.filter(work__publishing_is_draft=True,
creator__publishing_is_draft=True)
class WorkCreatorsInlineForWorks(SortableInlineAdminMixin, WorkCreatorsInlineForCreators):
exclude = None # re-enable sorting
raw_id_fields = ('creator',)
def link(self, inst):
# NB skip ...ForCreators.thumbnail()
thumb_html = self.thumbnail(inst.creator)
if thumb_html:
return '<a href="{0}">{1}</a>'.format(
admin_url(inst.creator),
thumb_html,
)
else:
return admin_link(inst.creator)
link.allow_tags = True
def get_queryset(self, request):
return super(WorkCreatorsInlineForWorks, self) \
.get_queryset(request) \
.filter(work__publishing_is_draft=True,
creator__publishing_is_draft=True)
class WorkOriginsInline(SortableInlineAdminMixin, admin.TabularInline):
model = models.WorkOrigin
raw_id_fields = ('geographic_location', )
extra = 0
class WorkImageInline(
# Some super-weirdness means that this inline doesn't appear on
# docker-cloud staging if SortableInlineAdminMixin is enabled.
# Giving up for now. TODO: reinstate, or choose a different sorting ui lib
# SortableInlineAdminMixin,
admin.TabularInline, ThumbnailAdminMixin
):
model = models.WorkImage
raw_id_fields = ('image', 'work')
extra = 1
readonly_fields = (
'thumbnail',
)
def thumbnail(self, inst):
return '<a href="{0}">{1}</a>{2}'.format(
admin_url(inst.image),
super(WorkImageInline, self).thumbnail(inst),
inst.caption,
)
thumbnail.allow_tags = True
def get_thumbnail_source(self, inst):
return inst.image.image
class CreatorChildAdmin(
PolymorphicChildModelAdmin,
ICEkitContentsAdmin,
FluentLayoutsMixin,
):
base_model = models.CreatorBase
save_on_top = True
raw_id_fields = ('portrait', )
exclude = ('layout', 'alt_slug',)
prepopulated_fields = {"slug": ("name_display",)}
inlines = [WorkCreatorsInlineForCreators] + \
ICEkitContentsAdmin.inlines
readonly_fields = (
'birth_date_earliest',
'birth_date_latest',
'birth_date_sort_ascending',
'birth_date_sort_descending',
'birth_date_edtf',
'death_date_earliest',
'death_date_latest',
'death_date_sort_ascending',
'death_date_sort_descending',
'death_date_edtf',
)
NAME_FIELDSET = ('Name', {
'fields': (
'name_display',
'slug',
'name_sort',
),
})
DATE_FIELDSETS = (
("Dates", {
'fields': (
('birth_date_display',
'death_date_display',),
),
}),
("Advanced date controls", {
'classes': ('collapse',),
'fields': (
('birth_date_earliest',
'birth_date_latest',),
('birth_date_sort_ascending',
'birth_date_sort_descending',),
'birth_date_edtf',
('death_date_earliest',
'death_date_latest',),
('death_date_sort_ascending',
'death_date_sort_descending',),
'death_date_edtf',
),
}),
)
LINKS_FIELDSET = ('Links', {
'fields': (
'website',
'wikipedia_link',
),
})
fieldsets = (
NAME_FIELDSET,
) + DATE_FIELDSETS + (
LINKS_FIELDSET,
("Details", {
'fields': (
'portrait',
'list_image',
'boosted_search_terms',
'admin_notes',
),
}),
)
class WorkChildAdmin(
PolymorphicChildModelAdmin,
ICEkitContentsAdmin,
FluentLayoutsMixin,
TitleSlugAdmin,
):
base_model = models.WorkBase
save_on_top = True
exclude = ('layout', 'alt_slug',)
prepopulated_fields = {"slug": ("accession_number", "title",)}
readonly_fields = (
"date_edtf",
'date_earliest',
'date_latest',
'date_sort_ascending',
'date_sort_descending',
)
inlines = [WorkOriginsInline, WorkCreatorsInlineForWorks, WorkImageInline] + \
ICEkitContentsAdmin.inlines
DATE_FIELDSETS = (
("Date", {
'fields': (
'date_display',
),
}),
("Advanced date controls", {
'classes': ('collapse',),
'fields': (
('date_earliest',
'date_latest',),
('date_sort_ascending',
'date_sort_descending',),
'date_edtf',
),
}),
)
LINKS_FIELDSET = ('Links', {
'fields': (
'website',
'wikipedia_link',
),
})
fieldsets = (
(None, {
'fields': (
'title',
'subtitle',
'slug',
'oneliner',
),
}),
) + DATE_FIELDSETS + (
("Details", {
'fields': (
'credit_line',
'accession_number',
'department',
'list_image',
'boosted_search_terms',
'admin_notes',
),
}),
LINKS_FIELDSET,
)
class WorkChildModelPlugin(six.with_metaclass(
PluginMount, BaseChildModelPlugin)):
"""
Mount point for ``WorkBase`` child model plugins.
"""
model_admin = WorkChildAdmin
class CreatorChildModelPlugin(six.with_metaclass(
PluginMount, BaseChildModelPlugin)):
"""
Mount point for ``CreatorBase`` child model plugins.
"""
model_admin = CreatorChildAdmin
class CreatorBaseAdmin(
ChildModelPluginPolymorphicParentModelAdmin,
ICEkitContentsAdmin,
FluentLayoutsMixin,
ThumbnailAdminMixin,
):
base_model = models.CreatorBase
child_model_plugin_class = CreatorChildModelPlugin
child_model_admin = CreatorChildAdmin
search_fields = (
"name_display",
"name_sort",
"id",
"admin_notes",
)
list_display = ('thumbnail',) + ICEkitContentsAdmin.list_display + (
'works_count',
)
list_display_links = list_display[:2]
list_filter = ICEkitContentsAdmin.list_filter + (
PolymorphicChildModelFilter,
'workcreator__role',
)
def get_queryset(self, request):
return super(CreatorBaseAdmin, self).get_queryset(request)\
.annotate(works_count=Count('works'))
def works_count(self, inst):
return inst.works_count
works_count.admin_order_field = 'works_count'
class CountryFilter(admin.SimpleListFilter):
title = 'Country'
parameter_name = 'country'
def lookups(self, request, model_admin):
return ((c.id, c) for c in Country.objects.filter(
id__in=model_admin.get_queryset(request).values_list(
'origin_locations__country_id', flat=True
).distinct()
))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(origin_locations__geographic_location__country_id=self.value())
else:
return queryset
class WorkBaseAdmin(
ChildModelPluginPolymorphicParentModelAdmin,
ICEkitContentsAdmin,
FluentLayoutsMixin,
ThumbnailAdminMixin,
):
base_model = models.WorkBase
child_model_plugin_class = WorkChildModelPlugin
child_model_admin = WorkChildAdmin
list_display = ('thumbnail',) + ICEkitContentsAdmin.list_display + (
'child_type_name',
'creators_admin_links',
'country_flags',
)
list_display_links = list_display[:2]
search_fields = (
'title',
'slug',
'id',
'admin_notes',
'accession_number',
'credit_line',
)
list_filter = ICEkitContentsAdmin.list_filter + (
PolymorphicChildModelFilter,
'department',
CountryFilter,
)
def get_queryset(self, request):
return super(WorkBaseAdmin, self).get_queryset(request).prefetch_related('origin_locations__country')
def country_flags(self, inst):
result = []
for loc in inst.origin_locations.all():
f = loc.flag()
if f:
result.append(f)
return mark_safe(" ".join(result))
country_flags.short_description = 'Countries'
country_flags.admin_order_field = 'origin_locations'
def creators_admin_links(self, inst):
r = []
od = inst.workcreator_set.filter(
work__publishing_is_draft=True
).creators_grouped_by_role()
for role, creators in od:
if role:
line = "%s: " % role
else:
line = "Creator: "
line += grammatical_join([admin_link(x) for x in creators])
r.append(line)
return "; ".join(r)
creators_admin_links.short_description = "Creators"
creators_admin_links.allow_tags = True
admin.site.register(models.CreatorBase, CreatorBaseAdmin)
admin.site.register(models.WorkBase, WorkBaseAdmin)
admin.site.register(models.Role, TitleSlugAdmin)
admin.site.register(models.WorkImageType, TitleSlugAdmin)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _Compare(self, x, rank, fft_length=None, use_placeholder=False):
self._CompareForward(x, rank, fft_length, use_placeholder)
self._CompareBackward(x, rank, fft_length, use_placeholder)
def _CompareForward(self, x, rank, fft_length=None, use_placeholder=False):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(
x_ph, rank, fft_length, use_gpu=True, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length, use_gpu=True)
self.assertAllClose(x_np, x_tf, rtol=1e-4, atol=1e-4)
def _CompareBackward(self, x, rank, fft_length=None, use_placeholder=False):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(
x_ph, rank, fft_length, use_gpu=True, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length, use_gpu=True)
self.assertAllClose(x_np, x_tf, rtol=1e-4, atol=1e-4)
def _checkGradComplex(self, func, x, y, result_is_complex=True,
use_gpu=False):
with self.test_session(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
# func is a forward or inverse, real or complex, batched or unbatched FFT
# function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
def _checkGradReal(self, func, x, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, use_gpu=False, feed_dict=None):
# fft_length unused for complex FFTs.
with self.test_session(use_gpu=use_gpu):
return self._tfFFTForRank(rank)(x).eval(feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, use_gpu=False, feed_dict=None):
# fft_length unused for complex FFTs.
with self.test_session(use_gpu=use_gpu):
return self._tfIFFTForRank(rank)(x).eval(feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return spectral_ops.fft
elif rank == 2:
return spectral_ops.fft2d
elif rank == 3:
return spectral_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return spectral_ops.ifft
elif rank == 2:
return spectral_ops.ifft2d
elif rank == 3:
return spectral_ops.ifft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testBasic(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np.complex64), rank)
def testBasicPlaceholder(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np.complex64), rank, use_placeholder=True)
def testRandom(self):
np.random.seed(12345)
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(gen((4,) * dims), rank)
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np.float32) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np.float32)
self._checkGradComplex(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im, use_gpu=True)
def testGrad_Random(self):
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
self._checkGradComplex(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im, use_gpu=True)
class RFFTOpsTest(BaseFFTOpsTest):
def _CompareBackward(self, x, rank, fft_length=None, use_placeholder=False):
super(RFFTOpsTest, self)._CompareBackward(x, rank, fft_length,
use_placeholder)
def _tfFFT(self, x, rank, fft_length=None, use_gpu=False, feed_dict=None):
with self.test_session(use_gpu=use_gpu):
return self._tfFFTForRank(rank)(x, fft_length).eval(feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, use_gpu=False, feed_dict=None):
with self.test_session(use_gpu=use_gpu):
return self._tfIFFTForRank(rank)(x, fft_length).eval(feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.rfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.irfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return spectral_ops.rfft
elif rank == 2:
return spectral_ops.rfft2d
elif rank == 3:
return spectral_ops.rfft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return spectral_ops.irfft
elif rank == 2:
return spectral_ops.irfft2d
elif rank == 3:
return spectral_ops.irfft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.float32)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testBasic(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._CompareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._CompareBackward(c2r.astype(np.complex64), rank, (size,) * rank)
def testBasicPlaceholder(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._CompareForward(r2c.astype(np.float32), rank, (size,) * rank,
use_placeholder=True)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._CompareBackward(c2r.astype(np.complex64), rank, (size,) * rank,
use_placeholder=True)
def testFftLength(self):
if test.is_gpu_available(cuda_only=True):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
# Test truncation (FFT size < dimensions).
fft_length = (size - 2,) * rank
self._CompareForward(r2c.astype(np.float32), rank, fft_length)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._CompareForward(r2c.astype(np.float32), rank, fft_length,
use_placeholder=True)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length,
use_placeholder=True)
# Test padding (FFT size > dimensions).
fft_length = (size + 2,) * rank
self._CompareForward(r2c.astype(np.float32), rank, fft_length)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._CompareForward(r2c.astype(np.float32), rank, fft_length,
use_placeholder=True)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length,
use_placeholder=True)
def testRandom(self):
np.random.seed(12345)
def gen_real(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
ret = re.reshape(shape)
return ret
def gen_complex(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
ret = (re + im * 1j).reshape(shape)
return ret
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
self._CompareForward(gen_real((size,) * dims), rank, (size,) * rank)
complex_dims = (size,) * (dims - 1) + (inner_dim,)
self._CompareBackward(gen_complex(complex_dims), rank, (size,) * rank)
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfIFFT(x, rank)
for dims in xrange(rank, rank + 2):
x = np.zeros((1,) * rank)
# Test non-rank-1 fft_length produces an error.
fft_length = np.zeros((1, 1)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfIFFT(x, rank, fft_length)
# Test wrong fft_length length.
fft_length = np.zeros((rank + 1,)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfIFFT(x, rank, fft_length)
# Test that calling the kernel directly without padding to fft_length
# produces an error.
rffts_for_rank = {1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],
2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],
3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]}
rfft_fn, irfft_fn = rffts_for_rank[rank]
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least 6 but got: 5"):
x = np.zeros((5,) * rank).astype(np.float32)
fft_length = [6] * rank
with self.test_session():
rfft_fn(x, fft_length).eval()
# TODO(rjryan): Remove when CPU-based IRFFT is supported.
if test.is_gpu_available(cuda_only=True):
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least .* but got: 3"):
x = np.zeros((3,) * rank).astype(np.complex64)
fft_length = [6] * rank
with self.test_session():
irfft_fn(x, fft_length).eval()
def testGrad_Simple(self):
if test.is_gpu_available(cuda_only=True):
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.ones(shape=(size,) * dims, dtype=np.float32)
im = -np.ones(shape=(size,) * dims, dtype=np.float32)
self._checkGradReal(self._tfFFTForRank(rank), re, use_gpu=True)
self._checkGradComplex(
self._tfIFFTForRank(rank),
re,
im,
result_is_complex=False,
use_gpu=True)
def testGrad_Random(self):
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
self._checkGradReal(self._tfFFTForRank(rank), re, use_gpu=True)
self._checkGradComplex(
self._tfIFFTForRank(rank),
re,
im,
result_is_complex=False,
use_gpu=True)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Abstractions over S3's upload/download operations.
This module provides high level abstractions for efficient
uploads/downloads. It handles several things for the user:
* Automatically switching to multipart transfers when
a file is over a specific size threshold
* Uploading/downloading a file in parallel
* Throttling based on max bandwidth
* Progress callbacks to monitor transfers
* Retries. While botocore handles retries for streaming uploads,
it is not possible for it to handle retries for streaming
downloads. This module handles retries for both cases so
you don't need to implement any retry logic yourself.
This module has a reasonable set of defaults. It also allows you
to configure many aspects of the transfer process including:
* Multipart threshold size
* Max parallel downloads
* Max bandwidth
* Socket timeouts
* Retry amounts
There is no support for s3->s3 multipart copies at this
time.
.. _ref_s3transfer_usage:
Usage
=====
The simplest way to use this module is:
.. code-block:: python
client = boto3.client('s3', 'us-west-2')
transfer = S3Transfer(client)
# Upload /tmp/myfile to s3://bucket/key
transfer.upload_file('/tmp/myfile', 'bucket', 'key')
# Download s3://bucket/key to /tmp/myfile
transfer.download_file('bucket', 'key', '/tmp/myfile')
The ``upload_file`` and ``download_file`` methods also accept
``**kwargs``, which will be forwarded through to the corresponding
client operation. Here are a few examples using ``upload_file``::
# Making the object public
transfer.upload_file('/tmp/myfile', 'bucket', 'key',
extra_args={'ACL': 'public-read'})
# Setting metadata
transfer.upload_file('/tmp/myfile', 'bucket', 'key',
extra_args={'Metadata': {'a': 'b', 'c': 'd'}})
# Setting content type
transfer.upload_file('/tmp/myfile.json', 'bucket', 'key',
extra_args={'ContentType': "application/json"})
The ``S3Transfer`` clas also supports progress callbacks so you can
provide transfer progress to users. Both the ``upload_file`` and
``download_file`` methods take an optional ``callback`` parameter.
Here's an example of how to print a simple progress percentage
to the user:
.. code-block:: python
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far,
self._size, percentage))
sys.stdout.flush()
transfer = S3Transfer(boto3.client('s3', 'us-west-2'))
# Upload /tmp/myfile to s3://bucket/key and print upload progress.
transfer.upload_file('/tmp/myfile', 'bucket', 'key',
callback=ProgressPercentage('/tmp/myfile'))
You can also provide a TransferConfig object to the S3Transfer
object that gives you more fine grained control over the
transfer. For example:
.. code-block:: python
client = boto3.client('s3', 'us-west-2')
config = TransferConfig(
multipart_threshold=8 * 1024 * 1024,
max_concurrency=10,
num_download_attempts=10,
)
transfer = S3Transfer(client, config)
transfer.upload_file('/tmp/foo', 'bucket', 'key')
"""
import os
import math
import functools
import logging
import socket
import threading
import random
import string
import concurrent.futures
from botocore.compat import six
from botocore.vendored.requests.packages.urllib3.exceptions import \
ReadTimeoutError
from botocore.exceptions import IncompleteReadError
import s3transfer.compat
from s3transfer.exceptions import RetriesExceededError, S3UploadFailedError
__author__ = 'Amazon Web Services'
__version__ = '0.1.11'
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
queue = six.moves.queue
MB = 1024 * 1024
SHUTDOWN_SENTINEL = object()
def random_file_extension(num_digits=8):
return ''.join(random.choice(string.hexdigits) for _ in range(num_digits))
def disable_upload_callbacks(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and \
hasattr(request.body, 'disable_callback'):
request.body.disable_callback()
def enable_upload_callbacks(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and \
hasattr(request.body, 'enable_callback'):
request.body.enable_callback()
class QueueShutdownError(Exception):
pass
class ReadFileChunk(object):
def __init__(self, fileobj, start_byte, chunk_size, full_file_size,
callback=None, enable_callback=True):
"""
Given a file object shown below:
|___________________________________________________|
0 | | full_file_size
|----chunk_size---|
start_byte
:type fileobj: file
:param fileobj: File like object
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callback: function(amount_read)
:param callback: Called whenever data is read from this object.
"""
self._fileobj = fileobj
self._start_byte = start_byte
self._size = self._calculate_file_size(
self._fileobj, requested_size=chunk_size,
start_byte=start_byte, actual_file_size=full_file_size)
self._fileobj.seek(self._start_byte)
self._amount_read = 0
self._callback = callback
self._callback_enabled = enable_callback
@classmethod
def from_filename(cls, filename, start_byte, chunk_size, callback=None,
enable_callback=True):
"""Convenience factory function to create from a filename.
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callback: function(amount_read)
:param callback: Called whenever data is read from this object.
:type enable_callback: bool
:param enable_callback: Indicate whether to invoke callback
during read() calls.
:rtype: ``ReadFileChunk``
:return: A new instance of ``ReadFileChunk``
"""
f = open(filename, 'rb')
file_size = os.fstat(f.fileno()).st_size
return cls(f, start_byte, chunk_size, file_size, callback,
enable_callback)
def _calculate_file_size(self, fileobj, requested_size, start_byte,
actual_file_size):
max_chunk_size = actual_file_size - start_byte
return min(max_chunk_size, requested_size)
def read(self, amount=None):
if amount is None:
amount_to_read = self._size - self._amount_read
else:
amount_to_read = min(self._size - self._amount_read, amount)
data = self._fileobj.read(amount_to_read)
self._amount_read += len(data)
if self._callback is not None and self._callback_enabled:
self._callback(len(data))
return data
def enable_callback(self):
self._callback_enabled = True
def disable_callback(self):
self._callback_enabled = False
def seek(self, where):
self._fileobj.seek(self._start_byte + where)
if self._callback is not None and self._callback_enabled:
# To also rewind the callback() for an accurate progress report
self._callback(where - self._amount_read)
self._amount_read = where
def close(self):
self._fileobj.close()
def tell(self):
return self._amount_read
def __len__(self):
# __len__ is defined because requests will try to determine the length
# of the stream to set a content length. In the normal case
# of the file it will just stat the file, but we need to change that
# behavior. By providing a __len__, requests will use that instead
# of stat'ing the file.
return self._size
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
# This is a workaround for http://bugs.python.org/issue17575
# Basically httplib will try to iterate over the contents, even
# if its a file like object. This wasn't noticed because we've
# already exhausted the stream so iterating over the file immediately
# stops, which is what we're simulating here.
return iter([])
class StreamReaderProgress(object):
"""Wrapper for a read only stream that adds progress callbacks."""
def __init__(self, stream, callback=None):
self._stream = stream
self._callback = callback
def read(self, *args, **kwargs):
value = self._stream.read(*args, **kwargs)
if self._callback is not None:
self._callback(len(value))
return value
class OSUtils(object):
def get_file_size(self, filename):
return os.path.getsize(filename)
def open_file_chunk_reader(self, filename, start_byte, size, callback):
return ReadFileChunk.from_filename(filename, start_byte,
size, callback,
enable_callback=False)
def open(self, filename, mode):
return open(filename, mode)
def remove_file(self, filename):
"""Remove a file, noop if file does not exist."""
# Unlike os.remove, if the file does not exist,
# then this method does nothing.
try:
os.remove(filename)
except OSError:
pass
def rename_file(self, current_filename, new_filename):
s3transfer.compat.rename_file(current_filename, new_filename)
class MultipartUploader(object):
# These are the extra_args that need to be forwarded onto
# subsequent upload_parts.
UPLOAD_PART_ARGS = [
'SSECustomerKey',
'SSECustomerAlgorithm',
'SSECustomerKeyMD5',
'RequestPayer',
]
def __init__(self, client, config, osutil,
executor_cls=concurrent.futures.ThreadPoolExecutor):
self._client = client
self._config = config
self._os = osutil
self._executor_cls = executor_cls
def _extra_upload_part_args(self, extra_args):
# Only the args in UPLOAD_PART_ARGS actually need to be passed
# onto the upload_part calls.
upload_parts_args = {}
for key, value in extra_args.items():
if key in self.UPLOAD_PART_ARGS:
upload_parts_args[key] = value
return upload_parts_args
def upload_file(self, filename, bucket, key, callback, extra_args):
response = self._client.create_multipart_upload(Bucket=bucket,
Key=key, **extra_args)
upload_id = response['UploadId']
try:
parts = self._upload_parts(upload_id, filename, bucket, key,
callback, extra_args)
except Exception as e:
logger.debug("Exception raised while uploading parts, "
"aborting multipart upload.", exc_info=True)
self._client.abort_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id)
raise S3UploadFailedError(
"Failed to upload %s to %s: %s" % (
filename, '/'.join([bucket, key]), e))
self._client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id,
MultipartUpload={'Parts': parts})
def _upload_parts(self, upload_id, filename, bucket, key, callback,
extra_args):
upload_parts_extra_args = self._extra_upload_part_args(extra_args)
parts = []
part_size = self._config.multipart_chunksize
num_parts = int(
math.ceil(self._os.get_file_size(filename) / float(part_size)))
max_workers = self._config.max_concurrency
with self._executor_cls(max_workers=max_workers) as executor:
upload_partial = functools.partial(
self._upload_one_part, filename, bucket, key, upload_id,
part_size, upload_parts_extra_args, callback)
for part in executor.map(upload_partial, range(1, num_parts + 1)):
parts.append(part)
return parts
def _upload_one_part(self, filename, bucket, key,
upload_id, part_size, extra_args,
callback, part_number):
open_chunk_reader = self._os.open_file_chunk_reader
with open_chunk_reader(filename, part_size * (part_number - 1),
part_size, callback) as body:
response = self._client.upload_part(
Bucket=bucket, Key=key,
UploadId=upload_id, PartNumber=part_number, Body=body,
**extra_args)
etag = response['ETag']
return {'ETag': etag, 'PartNumber': part_number}
class ShutdownQueue(queue.Queue):
"""A queue implementation that can be shutdown.
Shutting down a queue means that this class adds a
trigger_shutdown method that will trigger all subsequent
calls to put() to fail with a ``QueueShutdownError``.
It purposefully deviates from queue.Queue, and is *not* meant
to be a drop in replacement for ``queue.Queue``.
"""
def _init(self, maxsize):
self._shutdown = False
self._shutdown_lock = threading.Lock()
# queue.Queue is an old style class so we don't use super().
return queue.Queue._init(self, maxsize)
def trigger_shutdown(self):
with self._shutdown_lock:
self._shutdown = True
logger.debug("The IO queue is now shutdown.")
def put(self, item):
# Note: this is not sufficient, it's still possible to deadlock!
# Need to hook into the condition vars used by this class.
with self._shutdown_lock:
if self._shutdown:
raise QueueShutdownError("Cannot put item to queue when "
"queue has been shutdown.")
return queue.Queue.put(self, item)
class MultipartDownloader(object):
def __init__(self, client, config, osutil,
executor_cls=concurrent.futures.ThreadPoolExecutor):
self._client = client
self._config = config
self._os = osutil
self._executor_cls = executor_cls
self._ioqueue = ShutdownQueue(self._config.max_io_queue)
def download_file(self, bucket, key, filename, object_size,
extra_args, callback=None):
with self._executor_cls(max_workers=2) as controller:
# 1 thread for the future that manages the uploading of files
# 1 thread for the future that manages IO writes.
download_parts_handler = functools.partial(
self._download_file_as_future,
bucket, key, filename, object_size, callback)
parts_future = controller.submit(download_parts_handler)
io_writes_handler = functools.partial(
self._perform_io_writes, filename)
io_future = controller.submit(io_writes_handler)
results = concurrent.futures.wait(
[parts_future, io_future],
return_when=concurrent.futures.FIRST_EXCEPTION)
self._process_future_results(results)
def _process_future_results(self, futures):
finished, unfinished = futures
for future in finished:
future.result()
def _download_file_as_future(self, bucket, key, filename, object_size,
callback):
part_size = self._config.multipart_chunksize
num_parts = int(math.ceil(object_size / float(part_size)))
max_workers = self._config.max_concurrency
download_partial = functools.partial(
self._download_range, bucket, key, filename,
part_size, num_parts, callback)
try:
with self._executor_cls(max_workers=max_workers) as executor:
list(executor.map(download_partial, range(num_parts)))
finally:
self._ioqueue.put(SHUTDOWN_SENTINEL)
def _calculate_range_param(self, part_size, part_index, num_parts):
start_range = part_index * part_size
if part_index == num_parts - 1:
end_range = ''
else:
end_range = start_range + part_size - 1
range_param = 'bytes=%s-%s' % (start_range, end_range)
return range_param
def _download_range(self, bucket, key, filename,
part_size, num_parts, callback, part_index):
try:
range_param = self._calculate_range_param(
part_size, part_index, num_parts)
max_attempts = self._config.num_download_attempts
last_exception = None
for i in range(max_attempts):
try:
logger.debug("Making get_object call.")
response = self._client.get_object(
Bucket=bucket, Key=key, Range=range_param)
streaming_body = StreamReaderProgress(
response['Body'], callback)
buffer_size = 1024 * 16
current_index = part_size * part_index
for chunk in iter(lambda: streaming_body.read(buffer_size),
b''):
self._ioqueue.put((current_index, chunk))
current_index += len(chunk)
return
except (socket.timeout, socket.error,
ReadTimeoutError, IncompleteReadError) as e:
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)
last_exception = e
continue
raise RetriesExceededError(last_exception)
finally:
logger.debug("EXITING _download_range for part: %s", part_index)
def _perform_io_writes(self, filename):
with self._os.open(filename, 'wb') as f:
while True:
task = self._ioqueue.get()
if task is SHUTDOWN_SENTINEL:
logger.debug("Shutdown sentinel received in IO handler, "
"shutting down IO handler.")
return
else:
try:
offset, data = task
f.seek(offset)
f.write(data)
except Exception as e:
logger.debug("Caught exception in IO thread: %s",
e, exc_info=True)
self._ioqueue.trigger_shutdown()
raise
class TransferConfig(object):
def __init__(self,
multipart_threshold=8 * MB,
max_concurrency=10,
multipart_chunksize=8 * MB,
num_download_attempts=5,
max_io_queue=100):
self.multipart_threshold = multipart_threshold
self.max_concurrency = max_concurrency
self.multipart_chunksize = multipart_chunksize
self.num_download_attempts = num_download_attempts
self.max_io_queue = max_io_queue
class S3Transfer(object):
ALLOWED_DOWNLOAD_ARGS = [
'VersionId',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'RequestPayer',
]
ALLOWED_UPLOAD_ARGS = [
'ACL',
'CacheControl',
'ContentDisposition',
'ContentEncoding',
'ContentLanguage',
'ContentType',
'Expires',
'GrantFullControl',
'GrantRead',
'GrantReadACP',
'GrantWriteACL',
'Metadata',
'RequestPayer',
'ServerSideEncryption',
'StorageClass',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'SSEKMSKeyId',
]
def __init__(self, client, config=None, osutil=None):
self._client = client
if config is None:
config = TransferConfig()
self._config = config
if osutil is None:
osutil = OSUtils()
self._osutil = osutil
def upload_file(self, filename, bucket, key,
callback=None, extra_args=None):
"""Upload a file to an S3 object.
Variants have also been injected into S3 client, Bucket and Object.
You don't have to use S3Transfer.upload_file() directly.
"""
if extra_args is None:
extra_args = {}
self._validate_all_known_args(extra_args, self.ALLOWED_UPLOAD_ARGS)
events = self._client.meta.events
events.register_first('request-created.s3',
disable_upload_callbacks,
unique_id='s3upload-callback-disable')
events.register_last('request-created.s3',
enable_upload_callbacks,
unique_id='s3upload-callback-enable')
if self._osutil.get_file_size(filename) >= \
self._config.multipart_threshold:
self._multipart_upload(filename, bucket, key, callback, extra_args)
else:
self._put_object(filename, bucket, key, callback, extra_args)
def _put_object(self, filename, bucket, key, callback, extra_args):
# We're using open_file_chunk_reader so we can take advantage of the
# progress callback functionality.
open_chunk_reader = self._osutil.open_file_chunk_reader
with open_chunk_reader(filename, 0,
self._osutil.get_file_size(filename),
callback=callback) as body:
self._client.put_object(Bucket=bucket, Key=key, Body=body,
**extra_args)
def download_file(self, bucket, key, filename, extra_args=None,
callback=None):
"""Download an S3 object to a file.
Variants have also been injected into S3 client, Bucket and Object.
You don't have to use S3Transfer.download_file() directly.
"""
# This method will issue a ``head_object`` request to determine
# the size of the S3 object. This is used to determine if the
# object is downloaded in parallel.
if extra_args is None:
extra_args = {}
self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS)
object_size = self._object_size(bucket, key, extra_args)
temp_filename = filename + os.extsep + random_file_extension()
try:
self._download_file(bucket, key, temp_filename, object_size,
extra_args, callback)
except Exception:
logger.debug("Exception caught in download_file, removing partial "
"file: %s", temp_filename, exc_info=True)
self._osutil.remove_file(temp_filename)
raise
else:
self._osutil.rename_file(temp_filename, filename)
def _download_file(self, bucket, key, filename, object_size,
extra_args, callback):
if object_size >= self._config.multipart_threshold:
self._ranged_download(bucket, key, filename, object_size,
extra_args, callback)
else:
self._get_object(bucket, key, filename, extra_args, callback)
def _validate_all_known_args(self, actual, allowed):
for kwarg in actual:
if kwarg not in allowed:
raise ValueError(
"Invalid extra_args key '%s', "
"must be one of: %s" % (
kwarg, ', '.join(allowed)))
def _ranged_download(self, bucket, key, filename, object_size,
extra_args, callback):
downloader = MultipartDownloader(self._client, self._config,
self._osutil)
downloader.download_file(bucket, key, filename, object_size,
extra_args, callback)
def _get_object(self, bucket, key, filename, extra_args, callback):
# precondition: num_download_attempts > 0
max_attempts = self._config.num_download_attempts
last_exception = None
for i in range(max_attempts):
try:
return self._do_get_object(bucket, key, filename,
extra_args, callback)
except (socket.timeout, socket.error,
ReadTimeoutError, IncompleteReadError) as e:
# TODO: we need a way to reset the callback if the
# download failed.
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)
last_exception = e
continue
raise RetriesExceededError(last_exception)
def _do_get_object(self, bucket, key, filename, extra_args, callback):
response = self._client.get_object(Bucket=bucket, Key=key,
**extra_args)
streaming_body = StreamReaderProgress(
response['Body'], callback)
with self._osutil.open(filename, 'wb') as f:
for chunk in iter(lambda: streaming_body.read(8192), b''):
f.write(chunk)
def _object_size(self, bucket, key, extra_args):
return self._client.head_object(
Bucket=bucket, Key=key, **extra_args)['ContentLength']
def _multipart_upload(self, filename, bucket, key, callback, extra_args):
uploader = MultipartUploader(self._client, self._config, self._osutil)
uploader.upload_file(filename, bucket, key, callback, extra_args)
|
|
"""See __init__.py for what is considered public here."""
import errno
import itertools
import logging
import os
import shutil
import struct
import sys
import tempfile
import xml.etree.ElementTree as etree
import zipfile
import urllib.request
def safe_open(path, overwrite=False):
"""
Open but do not overwrite by default. Open and overwrite on request.
Takes:
path - path to open
overwrite - allow/disallow to overwrite existing files (boolean)
Special case:
path='-': Return stdout without opening it.
"""
if path == "-":
return sys.stdout.buffer
if overwrite:
return open(path, "wb")
else:
# Open the file only if the open actually creates it,
# that is do not overwrite an existing file.
# http://docs.python.org/2/library/os.html#open-flag-constants
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
return os.fdopen(fd, "wb")
def safe_close(file_):
"""Close anything, but stdout."""
if file_ != sys.stdout.buffer:
file_.close()
class NamedFile(object):
"""File-like object with a name attribute."""
def __init__(self, file_, name):
self.file_ = file_
self.name = name
def __getattr__(self, attr):
"""
Delegate attribute lookups to the underlying file in the same manner
as tempfile.NamedTemporaryFile does, but without attribute caching.
"""
return getattr(self.__dict__["file_"], attr)
def hash_file(file_, file_size=None):
"""
Hash an open file.
The hash algorithm and some of the code comes from:
http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
A multi-file movie's hash is the hash of the first file.
Takes:
file - seekable file-like object
file_size - size of file in bytes
Returns:
hash as a zero-padded, 16-digit, lower case hex string
Raises:
Exception - file too small: < 128 KiB
"""
fmt = "q" # long long
buf_size = struct.calcsize(fmt)
chunk_size = 64 * 1024 # bytes
assert chunk_size % buf_size == 0
def chunk(hash_, seek_args):
file_.seek(*seek_args)
for _ in range(chunk_size // buf_size):
buf = file_.read(buf_size)
hash_ += struct.unpack(fmt, buf)[0]
hash_ &= 0xFFFFFFFFFFFFFFFF # to remain as 64 bit number
return hash_
saved_pos = file_.tell()
try:
if file_size is None:
file_.seek(0, os.SEEK_END)
file_size = file_.tell()
if file_size < 2 * chunk_size:
raise Exception(
"file too small: < {} bytes".format(2 * chunk_size))
hash_ = file_size
hash_ = chunk(hash_, seek_args=(0, os.SEEK_SET))
hash_ = chunk(hash_, seek_args=(-chunk_size, os.SEEK_END))
finally:
file_.seek(saved_pos, os.SEEK_SET)
hex_str = "{:016x}".format(hash_)
logging.info("hash: {}".format(hex_str))
return hex_str
class UserAgent(object):
"""Communicate with subtitle servers."""
def __init__(self, server, opener=urllib.request.build_opener()):
"""
Takes:
server - FQDN or IP of server
e.g. "www.opensubtitles.org"
opener - urllib(2) opener object
"""
self.server = server
self.opener = opener
# FIXME Which variant of ISO 639 is accepted?
#
# So far I have used the 3-letter codes like 'eng', 'hun'...
def _search_page_url(
self, movie_hash, language, cd_count=1, _fmt="simplexml"):
"""
Construct search page URL.
Takes:
movie_hash - hash of movie (hex string)
cf. movie_hash()
language - preferred language (ISO 639 code string)
cd_count - how many video files make up the movie?
Returns:
search page URL
"""
url = (
"http://"
+ self.server
+ "/en"
+ "/search"
+ "/sublanguageid-{}".format(language)
+ "/moviehash-{}".format(movie_hash)
+ "/subsumcd-{}".format(cd_count)
+ "/{}".format(_fmt)
)
logging.debug("search_page_url: {}".format(url))
return url
def search(self, movie, language):
"""
Takes:
movie - list of video file paths in "natural order"
language - ISO 639 code of subtitle language
Returns:
list of subtitle archive URLs (ordered as in the search results)
"""
with open(movie[0], "rb") as file_:
movie_hash = hash_file(file_)
cd_count = len(movie)
search_page_url = self._search_page_url(
language=language,
movie_hash=movie_hash,
cd_count=cd_count,
)
# future FIXME use absolute xpath: /search/results/subtitle/download
#
# findall with an absolute xpath is broken in
# xml.etree.Elementree 1.3.0 . I couldn't find the issue on
# bugs.python.org, but here is the code issuing the warning:
# /usr/lib/python2.7/xml/etree/ElementTree.py:745
search_page_xml = self.opener.open(search_page_url)
tree = etree.parse(search_page_xml)
search_results = [
elem.text for elem in tree.findall("./results/subtitle/download")]
search_page_xml.close()
return search_results
def __repr__(self):
return "{}({!r})".format(self.__class__, self.__dict__)
def __str__(self):
return "{}({!r})".format(self.__class__, self.server)
class SubtitleArchive(object):
"""
Access subtitles in a subtitle archive.
I haven't ever found a specification for opensubtitles.org's subtitle
archive format, so I'm listing my basic assumptions here. -- rubasov
The archive is a valid .zip file with any name.
The .zip contains no directories.
(Though we try to handle the presence of directories gracefully.)
The .zip contains one ore more subtitle file(s).
A subtitle file has one of the extensions listed here (case insensitive):
http://trac.opensubtitles.org/projects/opensubtitles
/wiki/DevReadFirst#Subtitlefilesextensions
All subtitle files in one archive belong to the same movie.
There is exactly one subtitle file for each video file of the movie.
(Think of multi-CD movies.)
There are no other subtitle files in the archive.
All other files in the archive can be ignored.
(e.g. .nfo files)
The "natural order" of the subtitle files is the same as if we have
ordered them by their archived names case insensitively.
"""
def __init__(
self,
url,
opener=urllib.request.build_opener(),
sort_key=str.lower,
extensions=set(
[".srt", ".sub", ".smi", ".txt", ".ssa", ".ass", ".mpl"])):
"""
Should use it as a context manager:
with SubtitleArchive() as archive:
...
Takes:
url - url of the subtitle archive
opener - urllib(2) opener object
sort_key - determines yield order of subtitles
extensions - iterable of valid subtitle extensions
lower case, include leading dot
"""
self.url = url
self.opener = opener
self.sort_key = sort_key
self.extensions = extensions
# We may set these directly for testing purposes.
self.tempfile = None
self.zipfile = None
logging.debug("archive_url: {}".format(self.url))
def __enter__(self):
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
if self.zipfile is not None:
self.zipfile.close()
if self.tempfile is not None:
self.tempfile.close()
# tempfile is responsible to delete
# the NamedTemporaryFile at this point
def _urlopen_via_tempfile(self):
# zipfile needs seekable file-like objects.
# Therefore we download the remote file to a local temporary file.
# See the notes here on why we need a *Named*TemporaryFile:
# http://docs.python.org/2/library/zipfile#zipfile.ZipFile.open
if self.tempfile is None:
dst = tempfile.NamedTemporaryFile()
src = self.opener.open(self.url)
shutil.copyfileobj(src, dst)
src.close()
dst.seek(0, os.SEEK_SET)
self.tempfile = dst
def _open_as_zipfile(self):
if self.zipfile is None:
self._urlopen_via_tempfile()
self.zipfile = zipfile.ZipFile(self.tempfile.name)
def yield_open(self):
"""
Yields:
subtitle_file with an extra name attribute in the order
determined by sort_key.
"""
self._open_as_zipfile()
for name in sorted(self.zipfile.namelist(), key=self.sort_key):
ext = os.path.splitext(name)[1]
if ext.lower() not in self.extensions:
continue
with self.zipfile.open(name) as file_:
yield NamedFile(file_, name)
def extract(self, movie, builder, overwrite=False):
"""
Extract subtitles from archive according to movie and naming scheme.
Takes:
movie - list of video files in "natural order"
builder - FilenameBuilder() object
overwrite - pass down to safe_open
Returns:
number of subtitle files extracted and successfully written
"""
template_counter = itertools.count(1)
count_of_files_written = 0
for template_num, video_path, subtitle_file in zip(
template_counter, movie, self.yield_open()):
dst = builder.build(
video=video_path,
subtitle=subtitle_file.name,
num=template_num,
)
logging.debug("src: {}".format(subtitle_file.name))
logging.debug("dst: {}".format(dst))
try:
dst_file = safe_open(dst, overwrite=overwrite)
except OSError as e:
if e.errno == errno.EEXIST:
logging.warning(
"refusing to overwrite file: {}".format(dst))
else:
raise
else:
shutil.copyfileobj(subtitle_file, dst_file)
count_of_files_written += 1
safe_close(dst_file)
return count_of_files_written
def __repr__(self):
return "{}({!r})".format(self.__class__, self.__dict__)
def __str__(self):
return "{}({!r})".format(self.__class__, self.url)
class FilenameBuilder(object):
"""
Construct filenames for subtitles.
First, I wanted to write subtitles according to the standard subtitle
lookup rules of video players. That is - given the movie path,
replace the movie extension with the subtitle extension.
Later, I ended up with a much more generic templating. For details
see Naming Schemes in the manual of opensub-get.
NOTE We do not protect our user from silly combinations of
templates and input filenames like:
template : {video/dir}{video/base}{subtitle/ext}
filenames : /dir/ + subtitle.srt -> /dir/.srt
"""
def __init__(self, template="{video/dir}{video/base}{subtitle/ext}"):
"""
Takes:
template - string optionally containing template variables
see tpl_dict below for valid template variables
"""
self.template = template
def _split_dir_base_ext(self, path):
"""
Split a file path 3-way.
Example:
path : foo/bar/baz.qux
dir : foo/bar/
base : baz
ext : .qux
The concatenation of dir, base and ext add up to a path
equivalent to the original (roundtrip safety).
"""
if path == "":
raise Exception("invalid path: empty string")
head, tail = os.path.split(path)
# os.path.split stripped trailing slashes,
# we have to add them back for roundtrip safety.
dir_ = os.path.join(head, "")
base, ext = os.path.splitext(tail)
return dir_, base, ext
def build(self, video=None, subtitle=None, num=None):
"""
Takes:
video - path to video file
subtitle - path of subtitle in the archive
num - file number for numbered templating
Returns:
path that can be used to write the subtitle to
"""
v_dir, v_base, v_ext = self._split_dir_base_ext(video)
s_dir, s_base, s_ext = self._split_dir_base_ext(subtitle)
template_dict = {
"num": num,
"video/dir": v_dir,
"video/base": v_base,
"video/ext": v_ext,
"subtitle/dir": s_dir,
"subtitle/base": s_base,
"subtitle/ext": s_ext,
}
# Delete keys whose value is None from the template dictionary.
# This way format() will raise a KeyError when
# it encounters a template variable without a value.
template_dict.update(
(k, v) for k, v
in template_dict.items() if v is not None)
# FIXME python3.2 PendingDeprecationWarning:
# object.__format__ with a non-empty format string is deprecated
#
# I'm completely lost what would be the non-deprecated version.
# -- rubasov
name_built = self.template.format(**template_dict)
return name_built
|
|
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import sys
from mox3 import mox
from oslo_serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import network
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20CreateNetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20CreateNetworkJSON, self).setUp(plurals={'tags': 'tag'})
def _test_create_network(self, **kwargs):
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
resource = kwargs.pop('resource', 'network')
name = kwargs.pop('name', 'myname')
myid = kwargs.pop('myid', 'myid')
args = kwargs.pop('args', [name, ])
position_names = kwargs.pop('position_names', ['name', ])
position_values = kwargs.pop('position_values', [name, ])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
**kwargs)
def test_create_network(self):
# Create net: myname.
self._test_create_network()
def test_create_network_with_unicode(self):
# Create net: u'\u7f51\u7edc'.
self._test_create_network(name=u'\u7f51\u7edc')
def test_create_network_description(self):
# Create net: --tenant_id tenantid myname.
name = 'myname'
args = ['--description', 'Nice network', name]
self._test_create_network(name=name,
args=args,
description='Nice network')
def test_create_network_tenant_underscore(self):
# Create net: --tenant_id tenantid myname.
name = 'myname'
args = ['--tenant_id', 'tenantid', name]
self._test_create_network(name=name, args=args, tenant_id="tenantid")
def test_create_network_tenant_dash(self):
# Test dashed options
# Create net: --tenant_id tenantid myname.
name = 'myname'
args = ['--tenant-id', 'tenantid', name]
self._test_create_network(name=name, args=args, tenant_id="tenantid")
def test_create_network_provider_args(self):
# Create net: with --provider arguments.
# Test --provider attributes before network name
name = 'myname'
args = ['--provider:network_type', 'vlan',
'--provider:physical_network', 'physnet1',
'--provider:segmentation_id', '400', name]
position_names = ['provider:network_type',
'provider:physical_network',
'provider:segmentation_id', 'name']
position_values = ['vlan', 'physnet1', '400', name]
self._test_create_network(name=name,
args=args,
position_names=position_names,
position_values=position_values)
def test_create_network_tags(self):
# Create net: myname --tags a b.
name = 'myname'
args = [name, '--tags', 'a', 'b']
self._test_create_network(name=name, args=args, tags=['a', 'b'])
def test_create_network_state_underscore(self):
# Create net: --admin_state_down myname.
name = 'myname'
args = ['--admin_state_down', name, ]
self._test_create_network(name=name, args=args, admin_state_up=False)
def test_create_network_state_dash(self):
# Test dashed options
name = 'myname'
args = ['--admin-state-down', name, ]
self._test_create_network(name=name, args=args, admin_state_up=False)
def test_create_network_vlan_transparent(self):
# Create net: myname --vlan-transparent True.
name = 'myname'
args = ['--vlan-transparent', 'True', name]
self._test_create_network(name=name,
args=args,
vlan_transparent='True')
def test_create_network_with_qos_policy(self):
# Create net: --qos-policy mypolicy.
name = 'myname'
qos_policy_name = 'mypolicy'
args = [name, '--qos-policy', qos_policy_name]
position_names = ['name', 'qos_policy_id']
position_values = [name, qos_policy_name]
self._test_create_network(name=name,
args=args,
position_names=position_names,
position_values=position_values)
def test_create_network_with_az_hint(self):
# Create net: --availability-zone-hint zone1
# --availability-zone-hint zone2.
name = 'myname'
args = ['--availability-zone-hint', 'zone1',
'--availability-zone-hint', 'zone2', name]
position_names = ['availability_zone_hints', 'name']
position_values = [['zone1', 'zone2'], name]
self._test_create_network(name=name,
args=args,
position_names=position_names,
position_values=position_values)
def test_create_network_with_dns_domain(self):
# Create net: --dns-domain my-domain.org.
name = 'myname'
dns_domain_name = 'my-domain.org.'
args = [name, '--dns-domain', dns_domain_name]
position_names = ['name', 'dns_domain']
position_values = [name, dns_domain_name]
self._test_create_network(name=name,
args=args,
position_names=position_names,
position_values=position_values)
class CLITestV20ListNetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20ListNetworkJSON, self).setUp(plurals={'tags': 'tag'})
def test_list_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(path, query),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_networks(self, cmd, detail=False, tags=(),
fields_1=(), fields_2=(), page_size=None,
sort_key=(), sort_dir=(), base_args=None,
query=''):
resources = "networks"
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources(resources, cmd, detail, tags,
fields_1, fields_2, page_size=page_size,
sort_key=sort_key, sort_dir=sort_dir,
base_args=base_args, query=query)
def test_list_nets_pagination(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("networks", cmd)
def test_list_nets_sort(self):
# list nets:
# --sort-key name --sort-key id --sort-dir asc --sort-dir desc
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['asc', 'desc'])
def test_list_nets_sort_with_keys_more_than_dirs(self):
# list nets: --sort-key name --sort-key id --sort-dir desc
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['desc'])
def test_list_nets_sort_with_dirs_more_than_keys(self):
# list nets: --sort-key name --sort-dir desc --sort-dir asc
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name'],
sort_dir=['desc', 'asc'])
def test_list_nets_limit(self):
# list nets: -P.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, page_size=1000)
def test_list_nets_detail(self):
# list nets: -D.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, True)
def test_list_nets_tags(self):
# List nets: -- --tags a b.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=['a', 'b'])
def test_list_nets_tags_with_unicode(self):
# List nets: -- --tags u'\u7f51\u7edc'.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=[u'\u7f51\u7edc'])
def test_list_nets_detail_tags(self):
# List nets: -D -- --tags a b.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, detail=True, tags=['a', 'b'])
def _test_list_nets_extend_subnets(self, data, expected):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().MultipleTimes().AndReturn(self.client)
setup_list_stub('networks', data, '')
filters = ''
for n in data:
for s in n['subnets']:
filters = filters + "&id=%s" % s
setup_list_stub('subnets',
[{'id': 'mysubid1', 'cidr': '192.168.1.0/24'},
{'id': 'mysubid2', 'cidr': '172.16.0.0/24'},
{'id': 'mysubid3', 'cidr': '10.1.1.0/24'}],
query='fields=id&fields=cidr' + filters)
self.mox.ReplayAll()
args = []
cmd_parser = cmd.get_parser('list_networks')
parsed_args = cmd_parser.parse_args(args)
result = cmd.take_action(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_result = [x for x in result[1]]
self.assertEqual(len(expected), len(_result))
for res, exp in zip(_result, expected):
self.assertEqual(len(exp), len(res))
for obsrvd, expctd in zip(res, exp):
self.assertEqual(expctd, obsrvd)
def test_list_nets_extend_subnets(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid2',
'mysubid3']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2',
'mysubid2 172.16.0.0/24\nmysubid3 10.1.1.0/24')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_extend_subnets_no_subnet(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid4']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2', 'mysubid4 ')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_fields(self):
# List nets: --fields a --fields b -- --fields c d.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_nets_columns(self, cmd, returned_body,
args=('-f', 'json')):
resources = 'networks'
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_columns(cmd, resources, returned_body, args=args)
def test_list_nets_defined_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body,
args=['-f', 'json', '-c', 'id'])
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(1, len(net))
self.assertIn("id", net.keys())
def test_list_nets_with_default_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body)
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(3, len(net))
self.assertEqual(0, len(set(net) ^ set(cmd.list_columns)))
def test_list_external_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "router%3Aexternal=True&id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_external_nets(self, resources, cmd,
detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
if query:
query += '&router%3Aexternal=True'
else:
query += 'router%3Aexternal=True'
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_external_nets_detail(self):
# list external nets: -D.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd, True)
def test_list_external_nets_tags(self):
# List external nets: -- --tags a b.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources,
cmd, tags=['a', 'b'])
def test_list_external_nets_detail_tags(self):
# List external nets: -D -- --tags a b.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
detail=True, tags=['a', 'b'])
def test_list_external_nets_fields(self):
# List external nets: --fields a --fields b -- --fields c d.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_list_shared_networks(self):
# list nets : --shared False
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, base_args='--shared False'.split(),
query='shared=False')
class CLITestV20UpdateNetworkJSON(test_cli20.CLITestV20Base):
def test_update_network_exception(self):
# Update net: myid.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
# Update net: myid --name myname --tags a b.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b', '--description',
'This network takes the scenic route'],
{'name': 'myname', 'tags': ['a', 'b'],
'description': 'This network takes the '
'scenic route'})
def test_update_network_with_unicode(self):
# Update net: myid --name u'\u7f51\u7edc' --tags a b.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--tags', 'a', 'b'],
{'name': u'\u7f51\u7edc',
'tags': ['a', 'b'], }
)
def test_update_network_with_qos_policy(self):
# Update net: myid --qos-policy mypolicy.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--qos-policy', 'mypolicy'],
{'qos_policy_id': 'mypolicy', })
def test_update_network_with_no_qos_policy(self):
# Update net: myid --no-qos-policy.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-qos-policy'],
{'qos_policy_id': None, })
def test_update_network_with_dns_domain(self):
# Update net: myid --dns-domain my-domain.org.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--dns-domain', 'my-domain.org.'],
{'dns_domain': 'my-domain.org.', })
def test_update_network_with_no_dns_domain(self):
# Update net: myid --no-dns-domain
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-dns-domain'],
{'dns_domain': "", })
class CLITestV20ShowNetworkJSON(test_cli20.CLITestV20Base):
def test_show_network(self):
# Show net: --fields id --fields name myid.
resource = 'network'
cmd = network.ShowNetwork(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
class CLITestV20DeleteNetworkJSON(test_cli20.CLITestV20Base):
def test_delete_network(self):
# Delete net: myid.
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_bulk_delete_network(self):
# Delete net: myid1 myid2.
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid1 = 'myid1'
myid2 = 'myid2'
args = [myid1, myid2]
self._test_delete_resource(resource, cmd, myid1, args, extra_id=myid2)
def test_bulk_delete_network_fail(self):
# Delete net: myid1 myid2.
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid1 = 'myid1'
myid2 = 'myid2'
args = [myid1, myid2]
self.assertRaises(exceptions.NeutronCLIError,
self._test_delete_resource,
resource, cmd, myid1, args, extra_id=myid2,
delete_fail=True)
class CLITestV20ExtendListNetworkJSON(test_cli20.CLITestV20Base):
def _test_extend_list(self, mox_calls):
data = [{'id': 'netid%d' % i, 'name': 'net%d' % i,
'subnets': ['mysubid%d' % i]}
for i in range(10)]
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, 'subnets_path')
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
cmd.get_client().MultipleTimes().AndReturn(self.client)
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser('create_subnets').parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
def _build_test_data(self, data):
subnet_ids = []
response = []
filters = ""
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
for subnet_id in n['subnets']:
filters = "%s&id=%s" % (filters, subnet_id)
response.append({'id': subnet_id,
'cidr': '192.168.0.0/16'})
resp_str = self.client.serialize({'subnets': response})
resp = (test_cli20.MyResp(200), resp_str)
return filters, resp
def test_extend_list(self):
def mox_calls(path, data):
filters, response = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(
path, 'fields=id&fields=cidr' + filters), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
sub_data_lists = [data[:len(data) - 1], data[len(data) - 1:]]
filters, response = self._build_test_data(data)
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client.httpclient,
"_check_uri_length")
self.client.httpclient._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
for data in sub_data_lists:
filters, response = self._build_test_data(data)
self.client.httpclient._check_uri_length(
mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(
path, 'fields=id&fields=cidr%s' % filters),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
|
|
"""Support for KNX/IP lights."""
from __future__ import annotations
from typing import Any, Callable, Iterable
from xknx.devices import Light as XknxLight
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.color as color_util
from .const import DOMAIN
from .knx_entity import KnxEntity
from .schema import LightSchema
DEFAULT_COLOR = (0.0, 0.0)
DEFAULT_BRIGHTNESS = 255
DEFAULT_WHITE_VALUE = 255
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: Callable[[Iterable[Entity]], None],
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up lights for KNX platform."""
entities = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxLight):
entities.append(KNXLight(device))
async_add_entities(entities)
class KNXLight(KnxEntity, LightEntity):
"""Representation of a KNX light."""
def __init__(self, device: XknxLight) -> None:
"""Initialize of KNX light."""
self._device: XknxLight
super().__init__(device)
self._min_kelvin = device.min_kelvin or LightSchema.DEFAULT_MIN_KELVIN
self._max_kelvin = device.max_kelvin or LightSchema.DEFAULT_MAX_KELVIN
self._min_mireds = color_util.color_temperature_kelvin_to_mired(
self._max_kelvin
)
self._max_mireds = color_util.color_temperature_kelvin_to_mired(
self._min_kelvin
)
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
if self._device.supports_brightness:
return self._device.current_brightness
hsv_color = self._hsv_color
if self._device.supports_color and hsv_color:
return round(hsv_color[-1] / 100 * 255)
return None
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the HS color value."""
rgb: tuple[int, int, int] | None = None
if self._device.supports_rgbw or self._device.supports_color:
rgb, _ = self._device.current_color
return color_util.color_RGB_to_hs(*rgb) if rgb else None
@property
def _hsv_color(self) -> tuple[float, float, float] | None:
"""Return the HSV color value."""
rgb: tuple[int, int, int] | None = None
if self._device.supports_rgbw or self._device.supports_color:
rgb, _ = self._device.current_color
return color_util.color_RGB_to_hsv(*rgb) if rgb else None
@property
def white_value(self) -> int | None:
"""Return the white value."""
white: int | None = None
if self._device.supports_rgbw:
_, white = self._device.current_color
return white
@property
def color_temp(self) -> int | None:
"""Return the color temperature in mireds."""
if self._device.supports_color_temperature:
kelvin = self._device.current_color_temperature
# Avoid division by zero if actuator reported 0 Kelvin (e.g., uninitialized DALI-Gateway)
if kelvin is not None and kelvin > 0:
return color_util.color_temperature_kelvin_to_mired(kelvin)
if self._device.supports_tunable_white:
relative_ct = self._device.current_tunable_white
if relative_ct is not None:
# as KNX devices typically use Kelvin we use it as base for
# calculating ct from percent
return color_util.color_temperature_kelvin_to_mired(
self._min_kelvin
+ ((relative_ct / 255) * (self._max_kelvin - self._min_kelvin))
)
return None
@property
def min_mireds(self) -> int:
"""Return the coldest color temp this light supports in mireds."""
return self._min_mireds
@property
def max_mireds(self) -> int:
"""Return the warmest color temp this light supports in mireds."""
return self._max_mireds
@property
def effect_list(self) -> list[str] | None:
"""Return the list of supported effects."""
return None
@property
def effect(self) -> str | None:
"""Return the current effect."""
return None
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return bool(self._device.state)
@property
def supported_features(self) -> int:
"""Flag supported features."""
flags = 0
if self._device.supports_brightness:
flags |= SUPPORT_BRIGHTNESS
if self._device.supports_color:
flags |= SUPPORT_COLOR | SUPPORT_BRIGHTNESS
if self._device.supports_rgbw:
flags |= SUPPORT_COLOR | SUPPORT_WHITE_VALUE
if (
self._device.supports_color_temperature
or self._device.supports_tunable_white
):
flags |= SUPPORT_COLOR_TEMP
return flags
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
white_value = kwargs.get(ATTR_WHITE_VALUE, self.white_value)
mireds = kwargs.get(ATTR_COLOR_TEMP, self.color_temp)
update_brightness = ATTR_BRIGHTNESS in kwargs
update_color = ATTR_HS_COLOR in kwargs
update_white_value = ATTR_WHITE_VALUE in kwargs
update_color_temp = ATTR_COLOR_TEMP in kwargs
# avoid conflicting changes and weird effects
if not (
self.is_on
or update_brightness
or update_color
or update_white_value
or update_color_temp
):
await self._device.set_on()
if self._device.supports_brightness and (
update_brightness and not update_color
):
# if we don't need to update the color, try updating brightness
# directly if supported; don't do it if color also has to be
# changed, as RGB color implicitly sets the brightness as well
await self._device.set_brightness(brightness)
elif (self._device.supports_rgbw or self._device.supports_color) and (
update_brightness or update_color or update_white_value
):
# change RGB color, white value (if supported), and brightness
# if brightness or hs_color was not yet set use the default value
# to calculate RGB from as a fallback
if brightness is None:
brightness = DEFAULT_BRIGHTNESS
if hs_color is None:
hs_color = DEFAULT_COLOR
if white_value is None and self._device.supports_rgbw:
white_value = DEFAULT_WHITE_VALUE
hsv_color = hs_color + (brightness * 100 / 255,)
rgb = color_util.color_hsv_to_RGB(*hsv_color)
await self._device.set_color(rgb, white_value)
if update_color_temp:
kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds))
kelvin = min(self._max_kelvin, max(self._min_kelvin, kelvin))
if self._device.supports_color_temperature:
await self._device.set_color_temperature(kelvin)
elif self._device.supports_tunable_white:
relative_ct = int(
255
* (kelvin - self._min_kelvin)
/ (self._max_kelvin - self._min_kelvin)
)
await self._device.set_tunable_white(relative_ct)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
await self._device.set_off()
|
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
from __future__ import print_function
import common
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import threading
import time
import uuid
from symbolizer import BuildIdsPaths, RunSymbolizer
FAR = common.GetHostToolPathFromPlatform('far')
# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5
def _AttachKernelLogReader(target):
"""Attaches a kernel log reader as a long-running SSH task."""
logging.info('Attaching kernel logger.')
return target.RunCommandPiped(['log_listener',
'--since_now',
'--hide_metadata',
'--tag',
'klog',
],
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
class MergedInputStream(object):
"""Merges a number of input streams into a UTF-8 encoded UNIX pipe on a
dedicated thread. Terminates when the file descriptor of the primary stream
(the first in the sequence) is closed."""
def __init__(self, streams):
assert len(streams) > 0
self._streams = streams
self._output_stream = None
self._thread = None
def Start(self):
"""Returns a pipe to the merged output stream."""
read_pipe, write_pipe = os.pipe()
self._output_stream = os.fdopen(write_pipe, 'wb', 0)
self._thread = threading.Thread(target=self._Run)
self._thread.start()
return os.fdopen(read_pipe, 'r')
def _Run(self):
streams_by_fd = {}
primary_fd = self._streams[0].fileno()
for s in self._streams:
streams_by_fd[s.fileno()] = s
# Set when the primary FD is closed. Input from other FDs will continue to
# be processed until select() runs dry.
flush = False
# The lifetime of the MergedInputStream is bound to the lifetime of
# |primary_fd|.
while primary_fd:
# When not flushing: block until data is read or an exception occurs.
rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
if len(rlist) == 0 and flush:
break
for fileno in xlist:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line)
else:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
# Flush the streams by executing nonblocking reads from the input file
# descriptors until no more data is available, or all the streams are
# closed.
while streams_by_fd:
rlist, _, _ = select.select(streams_by_fd, [], [], 0)
if not rlist:
break
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line)
else:
del streams_by_fd[fileno]
def _GetComponentUri(package_name):
return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
package_name)
class RunTestPackageArgs:
"""RunTestPackage() configuration arguments structure.
code_coverage: If set, the test package will be run via 'runtests', and the
output will be saved to /tmp folder on the device.
test_realm_label: Specifies the realm name that run-test-component should use.
This must be specified if a filter file is to be set, or a results summary
file fetched after the test suite has run.
use_run_test_component: If True then the test package will be run hermetically
via 'run-test-component', rather than using 'run'.
"""
def __init__(self):
self.code_coverage = False
self.test_realm_label = None
self.use_run_test_component = False
@staticmethod
def FromCommonArgs(args):
run_test_package_args = RunTestPackageArgs()
run_test_package_args.code_coverage = args.code_coverage
return run_test_package_args
def _DrainStreamToStdout(stream, quit_event):
"""Outputs the contents of |stream| until |quit_event| is set."""
while not quit_event.is_set():
rlist, _, _ = select.select([stream], [], [], 0.1)
if rlist:
line = rlist[0].readline()
if not line:
return
print(line.rstrip())
def _SymbolizeStream(input_fd, ids_txt_files):
"""Returns a Popen object for a symbolizer process invocation.
input_fd: The data to symbolize.
ids_txt_files: A list of ids.txt files which contain symbol data."""
return RunSymbolizer(input_fd, subprocess.PIPE, ids_txt_files)
def RunTestPackage(output_dir, target, package_paths, package_name,
package_args, args):
"""Installs the Fuchsia package at |package_path| on the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_paths: The paths to the .far packages to be installed.
package_name: The name of the primary package to run.
package_args: The arguments which will be passed to the Fuchsia process.
args: RunTestPackageArgs instance configuring how the package will be run.
Returns the exit code of the remote package process."""
kernel_logger = _AttachKernelLogReader(target)
try:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(target=lambda: _DrainStreamToStdout(
kernel_logger.stdout, log_output_quit_event))
log_output_thread.daemon = True
log_output_thread.start()
with target.GetPkgRepo():
start_time = time.time()
target.InstallPackage(package_paths)
logging.info('Test installed in {:.2f} seconds.'.format(time.time() -
start_time))
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
logging.info('Running application.')
# TODO(crbug.com/1156768): Deprecate runtests.
if args.code_coverage:
# runtests requires specifying an output directory and a double dash
# before the argument list.
command = ['runtests', '-o', '/tmp', _GetComponentUri(package_name)]
if args.test_realm_label:
command += ['--realm-label', args.test_realm_label]
command += ['--']
elif args.use_run_test_component:
command = ['run-test-component']
if args.test_realm_label:
command += ['--realm-label=%s' % args.test_realm_label]
command.append(_GetComponentUri(package_name))
command.append('--')
else:
command = ['run', _GetComponentUri(package_name)]
command.extend(package_args)
process = target.RunCommandPiped(command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Symbolize klog and systemlog as separate streams. The symbolizer
# protocol is stateful, so comingled raw stack dumps can yield
# unsymbolizable garbage data.
ids_txt_paths = BuildIdsPaths(package_paths)
with _SymbolizeStream(process.stdout, ids_txt_paths) as \
symbolized_stdout, \
_SymbolizeStream(kernel_logger.stdout, ids_txt_paths) as \
symbolized_klog:
output_stream = MergedInputStream([symbolized_stdout.stdout,
symbolized_klog.stdout]).Start()
for next_line in output_stream:
print(next_line.rstrip())
symbolized_stdout.wait() # Should return instantly.
symbolized_klog.kill() # klog is never-ending and must be killed.
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning('Process exited with status code %d.' %
process.returncode)
finally:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
kernel_logger.kill()
return process.returncode
|
|
#!/usr/bin/env python
# Author: csiu
# Created: 2015-02-01
import argparse
from ConfigParser import SafeConfigParser
import sys
import os
import math
import re
import linecache
from utils import lmean, random_string, line_count, all_same, ensure_dir, get_value_from_keycolonvalue_list
usage = """Feature extraction:
- CpG content
- Conservation
- TATA affinity
EXAMPLE:
python2.7 features.py -i ../test/test.gff -o ../test_outdir
"""
def _CpG_content(seq):
L = len(seq)
CpG = seq.count('CG')+seq.count('cg')+seq.count('Cg')+seq.count('cG')
C = seq.count('C')+seq.count('c')
G = seq.count('G')+seq.count('g')
content = (float(CpG)/float(L)) / pow(float(C+G)/float(2*L),2)
return content
def _conservation(chromosomes, d_split, d_cons, phastcons_dir):
[os.makedirs(d) for d in [d_cons] if not os.path.exists(d)]
for c in chromosomes:
gff = os.path.join(d_split, "tss_filtered_all_"+c+".gff")
wig = os.path.join(phastcons_dir, "track_000.chr"+c+".wib")
out = os.path.join(d_cons, "conservation_all_"+c+".txt")
d = os.path.dirname(os.path.realpath(__file__))
extract_phastcons2_pl = os.path.join(d, "external/extract_phastcons2.pl")
os.system(extract_phastcons2_pl+" "+wig+" "+gff+" > "+out)
def _average_conservation(f_cons,f_aver_cons):
with open(f_aver_cons, 'w') as out:
with open(f_cons) as f:
for l in f:
l = l.split(',')
info = l[0].split(';')
start_pos = get_value_from_keycolonvalue_list('start', info)
stop_pos = get_value_from_keycolonvalue_list('stop', info)
try:
av_conservation = lmean([float(i) for i in l[1:]])
except:
av_conservation = 0.0
out.write('\t'.join([start_pos, stop_pos, str(av_conservation)]) +'\n')
### =====================================================================================
def prep_work(in_fa, in_bed, out_fa):
## bed to fasta format
cmd = 'fastaFromBed -fi '+in_fa+' -bed '+in_bed+' -fo '+out_fa
print cmd
os.system(cmd)
## sort input
sorted_gff = os.path.join(os.path.dirname(out_fa), os.path.basename(in_bed)+'.sorted.tmp')
cmd = "sort -k4,5 -n "+in_bed+" > "+sorted_gff
os.system(cmd)
return sorted_gff
def cpg_content(f_fasta, out_cpg):
## f_fasta = filtered fasta file containing sequence of interest
f_o = open(out_cpg, "w")
f = open(f_fasta).read()
f_list = f.split('>')
for item in f_list[1:]:
item_list = item.split('\n')
seq = "".join(([i for i in item_list[1:]]))
chromosome = item_list[0].split(':')[0]; #print chromosome
start_pos = item_list[0].split(':')[1].split('-')[0]; #print start_pos
stop_pos = item_list[0].split(':')[1].split('-')[1]; #print stop_pos
try:
CpG_value = _CpG_content(seq)
except:
CpG_value=0.0
print >> f_o, "\t".join((chromosome, str(int(start_pos)+1), stop_pos, str(CpG_value)))
f_o.close()
## sort CpG
sorted_cpg = out_cpg + '.sorted.tmp'
cmd = "sort -k2,3 -n "+out_cpg+" > "+sorted_cpg
os.system(cmd)
return sorted_cpg
def conservation_score(f_chromsizes, d_phastcons, in_gff, out_avcons):
tmp = random_string(12)
d_split = out_avcons + 'gff_by_chromosome_'+tmp
d_cons = out_avcons + 'conservation_'+tmp
[os.makedirs(d) for d in [d_split, d_cons] if not os.path.exists(d)]
f_cons = out_avcons + 'conservation.txt'
f_aver_cons = out_avcons
## get chromosomes
chromosomes = []
with open(f_chromsizes) as f:
for l in f:
c = l.split('\t')[0]
if (('random' not in c) and ('chrM' not in c) and ('chrUn' not in c)):
chromosomes.append(c[3:])
## separate infile by chromosome
for c in chromosomes:
f_out = os.path.join(d_split, 'tss_filtered_all_'+c+'.gff')
with open(f_out, 'w') as out:
with open(in_gff) as f:
for line in f:
chrom = line.split('\t')[0]
if (chrom == c):
out.write(line)
## calculate conservation per chromosome
_conservation(chromosomes, d_split, d_cons, d_phastcons)
## merge chromosomes
os.system("cat "+d_cons+"/conservation_all_*txt > "+f_cons)
## get average conservation
_average_conservation(f_cons, f_aver_cons)
## cleanup
is_same = []
for c in chromosomes:
n_gff = line_count(os.path.join(d_split, 'tss_filtered_all_'+c+'.gff'))
n_con = line_count(os.path.join(d_cons, 'conservation_all_'+c+'.txt'))
is_same.append(n_gff == n_con)
if all_same(is_same):
os.system('rm -r %s %s %s' % (d_split, d_cons, f_cons))
else:
not_equal = [chromosomes[i] for i,v in enumerate(is_same) if not v]
sys.exit('Error: Total number of positions does not match for chr: ' + ' '.join(not_equal))
## sort average conservation
sorted_avcons = out_avcons + '.sorted.tmp'
cmd = "sort -k1,2 -n "+out_avcons+" > "+sorted_avcons
os.system(cmd)
return sorted_avcons
def tata_affinity(TRAP, f_psemmatrix, f_fasta, out_tata):
cmd_trap = TRAP+" -s "+f_fasta+" --psem "+f_psemmatrix+" -g 0.5"+" -o "+out_tata
print cmd_trap
os.system(cmd_trap)
## sort TATA affinity
sorted_tata = out_tata + '.sorted.tmp'
tmp_tata = sorted_tata + random_string(12)
with open(tmp_tata, 'w') as out:
with open(out_tata) as f:
for l in f:
if not l.startswith("#"):
l = l.split('\t')
chr = l[0].split(':')[0]
start = l[0].split(':')[1].split('-')[0]
stop = l[0].split(':')[1].split('-')[1]
l[0] = '\t'.join([chr, start, stop])
out.write('\t'.join(l))
cmd = "sort -k2,3 -n "+tmp_tata+" > "+sorted_tata
os.system(cmd)
os.system("rm "+tmp_tata)
return sorted_tata
def build_features_matrix(sorted_gff, sorted_cpg, sorted_avcons, sorted_tata, f_out):
## check that all in files contain same number of data lines
n_g = line_count(sorted_gff)
n_c = line_count(sorted_cpg)
n_a = line_count(sorted_avcons)
n_t = line_count(sorted_tata)
if not all_same([n_g, n_c, n_a, n_t]):
sys.exit('Error: line count of feature files are not all equal:%s,%s,%s,%s' %
n_g, n_c, n_a, n_t)
## create matrix
lcount = 0
with open(f_out, 'w') as out:
with open(sorted_gff) as f:
for l in f:
lcount += 1
l = l.strip().split('\t')
c = l[0]
region_up = l[3] #500bp upstream of start; not used
region_down = l[4] #500bp downstream of start; not used
count = l[5]
strand = l[6]
info = l[8].split(';')
#dist_score = '?'
peak_start = get_value_from_keycolonvalue_list('start', info)
peak_stop = get_value_from_keycolonvalue_list('stop', info)
CpG_value = linecache.getline(sorted_cpg,lcount).strip().split('\t')[3]
try:
conservation = linecache.getline(sorted_avcons,lcount).strip().split('\t')[2]
except:
conservation = '0'
affinity = linecache.getline(sorted_tata,lcount).strip().split('\t')[7]
features = ';'.join(['cpg:'+CpG_value, 'cons:'+conservation, 'tata:'+affinity])
new_info = ';'.join(['region_start:'+region_up, 'region_stop:'+region_down])
line = '\t'.join([c, l[1], l[2],
peak_start, peak_stop, count, strand,
features, new_info])
out.write(line + '\n')
### =====================================================================================
def main(infile, outdir,
f_fasta, f_chromsizes, d_phastcons, TRAP, f_psemmatrix,
fo_outfile):
[os.makedirs(d) for d in [outdir] if not os.path.exists(d)]
id_infile = re.sub('.gff$', '' ,os.path.basename(infile))
fo_filtered_fasta = os.path.join(outdir, id_infile+'.fa')
fo_cpg = os.path.join(outdir, id_infile+'.cpg')
fo_avcons = os.path.join(outdir, id_infile+'.avgcons')
fo_tata = os.path.join(outdir, id_infile+'.tata')
sorted_gff = prep_work(f_fasta, infile, fo_filtered_fasta)
sorted_cpg = cpg_content(fo_filtered_fasta, fo_cpg)
sorted_avcons = conservation_score(f_chromsizes, d_phastcons, infile, fo_avcons)
sorted_tata = tata_affinity(TRAP, f_psemmatrix, fo_filtered_fasta, fo_tata)
build_features_matrix(sorted_gff, sorted_cpg, sorted_avcons, sorted_tata, fo_outfile)
if __name__ == '__main__':
cparser = SafeConfigParser()
cparser.read('config.ini')
parser = argparse.ArgumentParser(description=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--infile', dest='infile',
required=True,
help='path to input file')
parser.add_argument('-o', '--outdir', dest='outdir',
default=".",
help='''specify path to output directory.
Default is the current working directory.''')
parser.add_argument('-f', '--fo_outfile', dest='fo_outfile',
help='''specify path to final output file.''')
## For prep work
parser.add_argument('--f_fasta', dest='f_fasta',
default=cparser.get('genome','fasta'),
help='''Path to (hg19) fasta file''')
## For conservation score
parser.add_argument('--f_chromsizes', dest='f_chromsizes',
default=cparser.get('genome','chromsizes'),
help='''Path to (hg19) chrom sizes files.
Format: <chr>\t<chr.size>''')
parser.add_argument('--d_phastcons', dest='d_phastcons',
default=cparser.get('cons','phastcons'),
help='''Path to directory containing "track_000.chr<#>.wib" files for Phastcons''')
## For TATA affinity
parser.add_argument('--TRAP', dest='TRAP',
default=cparser.get('tata','trap'),
help='''Path to TRAP ("ANNOTATE_v3") binary''')
parser.add_argument('--f_psemmatrix', dest='f_psemmatrix',
default=cparser.get('tata','psem'),
help='''Path to TATA_box_jaspar.psem''')
## get at the arguments
args = parser.parse_args()
if args.fo_outfile == None:
fo_outfile = os.path.join(args.outdir, 'output.gff')
else:
ensure_dir(args.fo_outfile)
fo_outfile = args.fo_outfile
## do something..
main(args.infile, args.outdir,
args.f_fasta, args.f_chromsizes, args.d_phastcons, args.TRAP, args.f_psemmatrix,
fo_outfile)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import buildbot_common
import build_version
import getos
from buildbot_common import ErrorExit
from easy_template import RunTemplateFileIfChanged
from build_paths import SDK_RESOURCE_DIR
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def IsExample(desc):
dest = desc['DEST']
return dest.startswith(('examples', 'tests', 'getting_started'))
def GenerateSourceCopyList(desc):
sources = []
# Some examples use their own Makefile/sources/etc.
if 'TARGETS' not in desc:
# Only copy the DATA files.
return desc.get('DATA', [])
# Add sources for each target
for target in desc['TARGETS']:
sources.extend(target['SOURCES'])
# And HTML and data files
sources.extend(desc.get('DATA', []))
if IsExample(desc):
sources.append('common.js')
if not desc.get('NO_PACKAGE_FILES'):
sources.extend(['icon128.png', 'background.js'])
return sources
def GetSourcesDict(sources):
source_map = {}
for key in ['.c', '.cc']:
source_list = [fname for fname in sources if fname.endswith(key)]
if source_list:
source_map[key] = source_list
else:
source_map[key] = []
return source_map
def GetProjectObjects(source_dict):
object_list = []
for key in ['.c', '.cc']:
for src in source_dict[key]:
object_list.append(os.path.splitext(src)[0])
return object_list
def GetPlatforms(plat_list, plat_filter, first_toolchain):
platforms = []
for plat in plat_list:
if plat in plat_filter:
platforms.append(plat)
if first_toolchain:
return [platforms[0]]
return platforms
def ErrorMsgFunc(text):
sys.stderr.write(text + '\n')
def AddMakeBat(pepperdir, makepath):
"""Create a simple batch file to execute Make.
Creates a simple batch file named make.bat for the Windows platform at the
given path, pointing to the Make executable in the SDK."""
makepath = os.path.abspath(makepath)
if not makepath.startswith(pepperdir):
ErrorExit('Make.bat not relative to Pepper directory: ' + makepath)
makeexe = os.path.abspath(os.path.join(pepperdir, 'tools'))
relpath = os.path.relpath(makeexe, makepath)
fp = open(os.path.join(makepath, 'make.bat'), 'wb')
outpath = os.path.join(relpath, 'make.exe')
# Since make.bat is only used by Windows, for Windows path style
outpath = outpath.replace(os.path.sep, '\\')
fp.write('@%s %%*\n' % outpath)
fp.close()
def FindFile(name, srcroot, srcdirs):
checks = []
for srcdir in srcdirs:
srcfile = os.path.join(srcroot, srcdir, name)
srcfile = os.path.abspath(srcfile)
if os.path.exists(srcfile):
return srcfile
else:
checks.append(srcfile)
ErrorMsgFunc('%s not found in:\n\t%s' % (name, '\n\t'.join(checks)))
return None
def IsNexe(desc):
for target in desc['TARGETS']:
if target['TYPE'] == 'main':
return True
return False
def ProcessHTML(srcroot, dstroot, desc, toolchains, configs, first_toolchain):
name = desc['NAME']
nmf = desc['TARGETS'][0]['NAME']
outdir = os.path.join(dstroot, desc['DEST'], name)
srcpath = os.path.join(srcroot, 'index.html')
dstpath = os.path.join(outdir, 'index.html')
tools = GetPlatforms(toolchains, desc['TOOLS'], first_toolchain)
path = "{tc}/{config}"
replace = {
'title': desc['TITLE'],
'attrs':
'data-name="%s" data-tools="%s" data-configs="%s" data-path="%s"' % (
nmf, ' '.join(tools), ' '.join(configs), path),
}
RunTemplateFileIfChanged(srcpath, dstpath, replace)
def GenerateManifest(srcroot, dstroot, desc):
outdir = os.path.join(dstroot, desc['DEST'], desc['NAME'])
srcpath = os.path.join(SDK_RESOURCE_DIR, 'manifest.json.template')
dstpath = os.path.join(outdir, 'manifest.json')
permissions = desc.get('PERMISSIONS', [])
socket_permissions = desc.get('SOCKET_PERMISSIONS', [])
combined_permissions = list(permissions)
if socket_permissions:
combined_permissions.append({'socket': socket_permissions})
pretty_permissions = json.dumps(combined_permissions,
sort_keys=True, indent=4)
replace = {
'name': desc['TITLE'],
'description': '%s Example' % desc['TITLE'],
'key': True,
'channel': None,
'permissions': pretty_permissions,
'multi_platform': desc.get('MULTI_PLATFORM', False),
'version': build_version.ChromeVersionNoTrunk(),
'min_chrome_version': desc.get('MIN_CHROME_VERSION')
}
RunTemplateFileIfChanged(srcpath, dstpath, replace)
def FindAndCopyFiles(src_files, root, search_dirs, dst_dir):
buildbot_common.MakeDir(dst_dir)
for src_name in src_files:
src_file = FindFile(src_name, root, search_dirs)
if not src_file:
ErrorExit('Failed to find: ' + src_name)
dst_file = os.path.join(dst_dir, src_name)
if os.path.exists(dst_file):
if os.stat(src_file).st_mtime <= os.stat(dst_file).st_mtime:
Trace('Skipping "%s", destination "%s" is newer.' % (
src_file, dst_file))
continue
dst_path = os.path.dirname(dst_file)
if not os.path.exists(dst_path):
buildbot_common.MakeDir(dst_path)
buildbot_common.CopyFile(src_file, dst_file)
def ModifyDescInPlace(desc):
"""Perform post-load processing on .dsc file data.
Currently this consists of:
- Add -Wall to CXXFLAGS
- Synthesize SEL_LDR_LIBS and SEL_LDR_DEPS by stripping
down LIBS and DEPS (removing certain ppapi-only libs).
"""
ppapi_only_libs = ['ppapi_simple']
for target in desc['TARGETS']:
target.setdefault('CXXFLAGS', [])
target['CXXFLAGS'].insert(0, '-Wall')
def filter_out(key):
value = target.get(key, [])
if type(value) == dict:
value = dict(value)
for key in value.keys():
value[key] = [v for v in value[key] if v not in ppapi_only_libs]
else:
value = [v for v in value if v not in ppapi_only_libs]
return value
target['SEL_LDR_LIBS'] = filter_out('LIBS')
target['SEL_LDR_DEPS'] = filter_out('DEPS')
def ProcessProject(pepperdir, srcroot, dstroot, desc, toolchains, configs=None,
first_toolchain=False):
if not configs:
configs = ['Debug', 'Release']
name = desc['NAME']
out_dir = os.path.join(dstroot, desc['DEST'], name)
buildbot_common.MakeDir(out_dir)
srcdirs = desc.get('SEARCH', ['.', SDK_RESOURCE_DIR])
# Copy sources to example directory
sources = GenerateSourceCopyList(desc)
FindAndCopyFiles(sources, srcroot, srcdirs, out_dir)
# Copy public headers to the include directory.
for headers_set in desc.get('HEADERS', []):
headers = headers_set['FILES']
header_out_dir = os.path.join(dstroot, headers_set['DEST'])
FindAndCopyFiles(headers, srcroot, srcdirs, header_out_dir)
make_path = os.path.join(out_dir, 'Makefile')
outdir = os.path.dirname(os.path.abspath(make_path))
if getos.GetPlatform() == 'win':
AddMakeBat(pepperdir, outdir)
# If this project has no TARGETS, then we don't need to generate anything.
if 'TARGETS' not in desc:
return (name, desc['DEST'])
if IsNexe(desc):
template = os.path.join(SDK_RESOURCE_DIR, 'Makefile.example.template')
else:
template = os.path.join(SDK_RESOURCE_DIR, 'Makefile.library.template')
# Ensure the order of |tools| is the same as toolchains; that way if
# first_toolchain is set, it will choose based on the order of |toolchains|.
tools = [tool for tool in toolchains if tool in desc['TOOLS']]
if first_toolchain:
tools = [tools[0]]
ModifyDescInPlace(desc)
template_dict = {
'desc': desc,
'rel_sdk': '/'.join(['..'] * (len(desc['DEST'].split('/')) + 1)),
'pre': desc.get('PRE', ''),
'post': desc.get('POST', ''),
'tools': tools,
'sel_ldr': desc.get('SEL_LDR'),
'targets': desc['TARGETS'],
'multi_platform': desc.get('MULTI_PLATFORM', False),
}
RunTemplateFileIfChanged(template, make_path, template_dict)
if IsExample(desc):
ProcessHTML(srcroot, dstroot, desc, toolchains, configs,
first_toolchain)
if not desc.get('NO_PACKAGE_FILES'):
GenerateManifest(srcroot, dstroot, desc)
return (name, desc['DEST'])
def GenerateMasterMakefile(pepperdir, out_path, targets, deps):
"""Generate a Master Makefile that builds all examples.
Args:
pepperdir: NACL_SDK_ROOT
out_path: Root for output such that out_path+NAME = full path
targets: List of targets names
"""
in_path = os.path.join(SDK_RESOURCE_DIR, 'Makefile.index.template')
out_path = os.path.join(out_path, 'Makefile')
rel_path = os.path.relpath(pepperdir, os.path.dirname(out_path))
template_dict = {
'projects': targets,
'deps' : deps,
'rel_sdk' : rel_path,
}
RunTemplateFileIfChanged(in_path, out_path, template_dict)
outdir = os.path.dirname(os.path.abspath(out_path))
if getos.GetPlatform() == 'win':
AddMakeBat(pepperdir, outdir)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import dialects
from sqlalchemy.ext import compiler
from sqlalchemy import types as sqla_types
from nova.db import types
from nova.objects import keypair
LOG = logging.getLogger(__name__)
def Inet():
return sa.String(length=43).with_variant(
dialects.postgresql.INET(), 'postgresql',
)
def InetSmall():
return sa.String(length=39).with_variant(
dialects.postgresql.INET(), 'postgresql',
)
# We explicitly name many of our foreignkeys for MySQL so they match Havana
@compiler.compiles(sa.ForeignKeyConstraint, 'postgresql')
def process(element, compiler, **kw):
element.name = None
return compiler.visit_foreign_key_constraint(element, **kw)
def _create_shadow_tables(migrate_engine):
meta = sa.MetaData()
meta.reflect(migrate_engine)
table_names = list(meta.tables.keys())
# NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
# is sqlalchemy-migrate which requires this. We'll remove these migrations
# when dropping SQLAlchemy < 2.x support
meta.bind = migrate_engine
for table_name in table_names:
# Skip tables that are not soft-deletable
if table_name in (
'tags',
'resource_providers',
'inventories',
'allocations',
'resource_provider_aggregates',
'console_auth_tokens',
):
continue
table = sa.Table(table_name, meta, autoload_with=migrate_engine)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so after
# copy it will have NullType. The other types that are used in Nova
# are supported by sqlite
if isinstance(column.type, sqla_types.NullType):
column_copy = sa.Column(
column.name, sa.BigInteger(), default=0,
)
if table_name == 'instances' and column.name == 'locked_by':
enum = sa.Enum(
'owner', 'admin', name='shadow_instances0locked_by',
)
column_copy = sa.Column(column.name, enum)
# TODO(stephenfin): Fix these various bugs in a follow-up
# 244_increase_user_id_length_volume_usage_cache; this
# alteration should apply to shadow tables also
if table_name == 'volume_usage_cache' and column.name == 'user_id':
# nullable should be True
column_copy = sa.Column('user_id', sa.String(36))
# 247_nullable_mismatch; these alterations should apply to shadow
# tables also
if table_name == 'quota_usages' and column.name == 'resources':
# nullable should be False
column_copy = sa.Column('resource', sa.String(length=255))
if table_name == 'pci_devices':
if column.name == 'deleted':
# nullable should be True
column_copy = sa.Column(
'deleted', sa.Integer, default=0, nullable=False,
)
if column.name == 'product_id':
# nullable should be False
column_copy = sa.Column('product_id', sa.String(4))
if column.name == 'vendor_id':
# nullable should be False
column_copy = sa.Column('vendor_id', sa.String(4))
if column.name == 'dev_type':
# nullable should be False
column_copy = sa.Column('dev_type', sa.String(8))
# 280_add_nullable_false_to_keypairs_name; this should apply to the
# shadow table also
if table_name == 'key_pairs' and column.name == 'name':
# nullable should be False
column_copy = sa.Column('name', sa.String(length=255))
# NOTE(stephenfin): By default, 'sqlalchemy.Enum' will issue a
# 'CREATE TYPE' command on PostgreSQL, even if the type already
# exists. We work around this by using the PostgreSQL-specific
# 'sqlalchemy.dialects.postgresql.ENUM' type and setting
# 'create_type' to 'False'. See [1] for more information.
#
# [1] https://stackoverflow.com/a/28894354/613428
if migrate_engine.name == 'postgresql':
if table_name == 'key_pairs' and column.name == 'type':
enum = dialects.postgresql.ENUM(
'ssh', 'x509', name='keypair_types', create_type=False)
column_copy = sa.Column(
column.name, enum, nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH)
elif (
table_name == 'migrations' and
column.name == 'migration_type'
):
enum = dialects.postgresql.ENUM(
'migration', 'resize', 'live-migration', 'evacuation',
name='migration_type', create_type=False)
column_copy = sa.Column(column.name, enum, nullable=True)
if column_copy is None:
column_copy = column.copy()
columns.append(column_copy)
shadow_table = sa.Table(
'shadow_' + table_name, meta, *columns, mysql_engine='InnoDB',
)
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception('Exception while creating table.')
raise
# TODO(stephenfin): Fix these various bugs in a follow-up
# 252_add_instance_extra_table; we don't create indexes for shadow tables
# in general and these should be removed
table = sa.Table(
'shadow_instance_extra', meta, autoload_with=migrate_engine,
)
idx = sa.Index('shadow_instance_extra_idx', table.c.instance_uuid)
idx.create(migrate_engine)
# 373_migration_uuid; we should't create indexes for shadow tables
table = sa.Table('shadow_migrations', meta, autoload_with=migrate_engine)
idx = sa.Index('shadow_migrations_uuid', table.c.uuid, unique=True)
idx.create(migrate_engine)
def upgrade(migrate_engine):
meta = sa.MetaData()
# NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
# is sqlalchemy-migrate which requires this. We'll remove these migrations
# when dropping SQLAlchemy < 2.x support
meta.bind = migrate_engine
agent_builds = sa.Table('agent_builds', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('hypervisor', sa.String(length=255)),
sa.Column('os', sa.String(length=255)),
sa.Column('architecture', sa.String(length=255)),
sa.Column('version', sa.String(length=255)),
sa.Column('url', sa.String(length=255)),
sa.Column('md5hash', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index(
'agent_builds_hypervisor_os_arch_idx',
'hypervisor', 'os', 'architecture'),
UniqueConstraint(
'hypervisor', 'os', 'architecture', 'deleted',
name='uniq_agent_builds0hypervisor0os0architecture0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = sa.Table('aggregate_hosts', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('host', sa.String(length=255)),
sa.Column(
'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
nullable=False),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'host', 'aggregate_id', 'deleted',
name='uniq_aggregate_hosts0host0aggregate_id0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = sa.Table('aggregate_metadata', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
nullable=False),
sa.Column('key', sa.String(length=255), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.Column('deleted', sa.Integer),
sa.Index('aggregate_metadata_key_idx', 'key'),
sa.Index('aggregate_metadata_value_idx', 'value'),
UniqueConstraint(
'aggregate_id', 'key', 'deleted',
name='uniq_aggregate_metadata0aggregate_id0key0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = sa.Table('aggregates', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('name', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Column('uuid', sa.String(36)),
sa.Index('aggregate_uuid_idx', 'uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
allocations = sa.Table('allocations', meta,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('resource_provider_id', sa.Integer, nullable=False),
sa.Column('consumer_id', sa.String(36), nullable=False),
sa.Column('resource_class_id', sa.Integer, nullable=False),
sa.Column('used', sa.Integer, nullable=False),
sa.Index(
'allocations_resource_provider_class_used_idx',
'resource_provider_id', 'resource_class_id', 'used'),
sa.Index('allocations_consumer_id_idx', 'consumer_id'),
sa.Index('allocations_resource_class_id_idx', 'resource_class_id'),
mysql_engine='InnoDB',
mysql_charset='latin1',
)
block_device_mapping = sa.Table('block_device_mapping', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('device_name', sa.String(length=255), nullable=True),
sa.Column('delete_on_termination', sa.Boolean),
sa.Column('snapshot_id', sa.String(length=36), nullable=True),
sa.Column('volume_id', sa.String(length=36), nullable=True),
sa.Column('volume_size', sa.Integer),
sa.Column('no_device', sa.Boolean),
sa.Column('connection_info', types.MediumText()),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid',
name='block_device_mapping_instance_uuid_fkey')),
sa.Column('deleted', sa.Integer),
sa.Column('source_type', sa.String(length=255), nullable=True),
sa.Column('destination_type', sa.String(length=255), nullable=True),
sa.Column('guest_format', sa.String(length=255), nullable=True),
sa.Column('device_type', sa.String(length=255), nullable=True),
sa.Column('disk_bus', sa.String(length=255), nullable=True),
sa.Column('boot_index', sa.Integer),
sa.Column('image_id', sa.String(length=36), nullable=True),
sa.Column('tag', sa.String(255)),
sa.Column('attachment_id', sa.String(36), nullable=True),
sa.Column('uuid', sa.String(36), nullable=True),
sa.Column('volume_type', sa.String(255), nullable=True),
sa.Index('snapshot_id', 'snapshot_id'),
sa.Index('volume_id', 'volume_id'),
sa.Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
sa.Index(
'block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
sa.Index(
'block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
UniqueConstraint('uuid', name='uniq_block_device_mapping0uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = sa.Table('bw_usage_cache', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('start_period', sa.DateTime, nullable=False),
sa.Column('last_refreshed', sa.DateTime),
sa.Column('bw_in', sa.BigInteger),
sa.Column('bw_out', sa.BigInteger),
sa.Column('mac', sa.String(length=255)),
sa.Column('uuid', sa.String(length=36)),
sa.Column('last_ctr_in', sa.BigInteger()),
sa.Column('last_ctr_out', sa.BigInteger()),
sa.Column('deleted', sa.Integer),
sa.Index(
'bw_usage_cache_uuid_start_period_idx',
'uuid', 'start_period'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = sa.Table('cells', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('api_url', sa.String(length=255)),
sa.Column('weight_offset', sa.Float),
sa.Column('weight_scale', sa.Float),
sa.Column('name', sa.String(length=255)),
sa.Column('is_parent', sa.Boolean),
sa.Column('deleted', sa.Integer),
sa.Column('transport_url', sa.String(length=255), nullable=False),
UniqueConstraint(
'name', 'deleted',
name='uniq_cells0name0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = sa.Table('certificates', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('file_name', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index(
'certificates_project_id_deleted_idx',
'project_id', 'deleted'),
sa.Index('certificates_user_id_deleted_idx', 'user_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = sa.Table('compute_nodes', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('service_id', sa.Integer, nullable=True),
sa.Column('vcpus', sa.Integer, nullable=False),
sa.Column('memory_mb', sa.Integer, nullable=False),
sa.Column('local_gb', sa.Integer, nullable=False),
sa.Column('vcpus_used', sa.Integer, nullable=False),
sa.Column('memory_mb_used', sa.Integer, nullable=False),
sa.Column('local_gb_used', sa.Integer, nullable=False),
sa.Column('hypervisor_type', types.MediumText(), nullable=False),
sa.Column('hypervisor_version', sa.Integer, nullable=False),
sa.Column('cpu_info', types.MediumText(), nullable=False),
sa.Column('disk_available_least', sa.Integer),
sa.Column('free_ram_mb', sa.Integer),
sa.Column('free_disk_gb', sa.Integer),
sa.Column('current_workload', sa.Integer),
sa.Column('running_vms', sa.Integer),
sa.Column('hypervisor_hostname', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Column('host_ip', InetSmall()),
sa.Column('supported_instances', sa.Text),
sa.Column('pci_stats', sa.Text, nullable=True),
sa.Column('metrics', sa.Text, nullable=True),
sa.Column('extra_resources', sa.Text, nullable=True),
sa.Column('stats', sa.Text, default='{}'),
sa.Column('numa_topology', sa.Text, nullable=True),
sa.Column('host', sa.String(255), nullable=True),
sa.Column('ram_allocation_ratio', sa.Float, nullable=True),
sa.Column('cpu_allocation_ratio', sa.Float, nullable=True),
sa.Column('uuid', sa.String(36), nullable=True),
sa.Column('disk_allocation_ratio', sa.Float, nullable=True),
sa.Column('mapped', sa.Integer, default=0, nullable=True),
sa.Index('compute_nodes_uuid_idx', 'uuid', unique=True),
UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
name='uniq_compute_nodes0host0hypervisor_hostname0deleted',
),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_auth_tokens = sa.Table('console_auth_tokens', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('token_hash', sa.String(255), nullable=False),
sa.Column('console_type', sa.String(255), nullable=False),
sa.Column('host', sa.String(255), nullable=False),
sa.Column('port', sa.Integer, nullable=False),
sa.Column('internal_access_path', sa.String(255)),
sa.Column('instance_uuid', sa.String(36), nullable=False),
sa.Column('expires', sa.Integer, nullable=False),
sa.Column('access_url_base', sa.String(255), nullable=True),
sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
sa.Index(
'console_auth_tokens_token_hash_instance_uuid_idx',
'token_hash', 'instance_uuid'),
UniqueConstraint(
'token_hash', name='uniq_console_auth_tokens0token_hash'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
console_pools = sa.Table('console_pools', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('address', InetSmall()),
sa.Column('username', sa.String(length=255)),
sa.Column('password', sa.String(length=255)),
sa.Column('console_type', sa.String(length=255)),
sa.Column('public_hostname', sa.String(length=255)),
sa.Column('host', sa.String(length=255)),
sa.Column('compute_host', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'host', 'console_type', 'compute_host', 'deleted',
name='uniq_console_pools0host0console_type0compute_host0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
consoles = sa.Table('consoles', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('instance_name', sa.String(length=255)),
sa.Column('password', sa.String(length=255)),
sa.Column('port', sa.Integer),
sa.Column('pool_id', sa.Integer, sa.ForeignKey('console_pools.id')),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='consoles_instance_uuid_fkey')),
sa.Column('deleted', sa.Integer),
sa.Index('consoles_instance_uuid_idx', 'instance_uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = sa.Table('dns_domains', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column(
'domain', sa.String(length=255), primary_key=True, nullable=False),
sa.Column('scope', sa.String(length=255)),
sa.Column('availability_zone', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
sa.Index('dns_domains_project_id_idx', 'project_id'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = sa.Table('fixed_ips', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('address', InetSmall()),
sa.Column('network_id', sa.Integer),
sa.Column('allocated', sa.Boolean),
sa.Column('leased', sa.Boolean),
sa.Column('reserved', sa.Boolean),
sa.Column('virtual_interface_id', sa.Integer),
sa.Column('host', sa.String(length=255)),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='fixed_ips_instance_uuid_fkey'),
),
sa.Column('deleted', sa.Integer),
sa.Index('network_id', 'network_id'),
sa.Index('address', 'address'),
sa.Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
sa.Index(
'fixed_ips_virtual_interface_id_fkey',
'virtual_interface_id'),
sa.Index('fixed_ips_host_idx', 'host'),
sa.Index(
'fixed_ips_network_id_host_deleted_idx', 'network_id',
'host', 'deleted'),
sa.Index(
'fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved',
'network_id', 'deleted'),
sa.Index(
'fixed_ips_deleted_allocated_idx',
'address', 'deleted', 'allocated'),
sa.Index(
'fixed_ips_deleted_allocated_updated_at_idx',
'deleted', 'allocated', 'updated_at'),
UniqueConstraint(
'address', 'deleted',
name='uniq_fixed_ips0address0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = sa.Table('floating_ips', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('address', InetSmall()),
sa.Column('fixed_ip_id', sa.Integer),
sa.Column('project_id', sa.String(length=255)),
sa.Column('host', sa.String(length=255)),
sa.Column('auto_assigned', sa.Boolean),
sa.Column('pool', sa.String(length=255)),
sa.Column('interface', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index('fixed_ip_id', 'fixed_ip_id'),
sa.Index('floating_ips_host_idx', 'host'),
sa.Index('floating_ips_project_id_idx', 'project_id'),
sa.Index(
'floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id'),
UniqueConstraint(
'address', 'deleted',
name='uniq_floating_ips0address0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = sa.Table('instance_faults', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='fk_instance_faults_instance_uuid')),
sa.Column('code', sa.Integer, nullable=False),
sa.Column('message', sa.String(length=255)),
sa.Column('details', types.MediumText()),
sa.Column('host', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index('instance_faults_host_idx', 'host'),
sa.Index(
'instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = sa.Table('instance_id_mappings', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(36), nullable=False),
sa.Column('deleted', sa.Integer),
sa.Index('ix_instance_id_mappings_uuid', 'uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = sa.Table('instance_info_caches', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('network_info', types.MediumText()),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid',
name='instance_info_caches_instance_uuid_fkey'),
nullable=False),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'instance_uuid',
name='uniq_instance_info_caches0instance_uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
groups = sa.Table('instance_groups', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255)),
UniqueConstraint(
'uuid', 'deleted',
name='uniq_instance_groups0uuid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_policy = sa.Table('instance_group_policy', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('policy', sa.String(length=255)),
sa.Column(
'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
nullable=False),
sa.Index('instance_group_policy_policy_idx', 'policy'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_member = sa.Table('instance_group_member', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('instance_id', sa.String(length=255)),
sa.Column(
'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
nullable=False),
sa.Index(
'instance_group_member_instance_idx',
'instance_id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_metadata = sa.Table('instance_metadata', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('key', sa.String(length=255)),
sa.Column('value', sa.String(length=255)),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='instance_metadata_instance_uuid_fkey'),
nullable=True),
sa.Column('deleted', sa.Integer),
sa.Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = sa.Table('instance_system_metadata', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='instance_system_metadata_ibfk_1'),
nullable=False),
sa.Column('key', sa.String(length=255), nullable=False),
sa.Column('value', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index('instance_uuid', 'instance_uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_type_extra_specs = sa.Table('instance_type_extra_specs', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'instance_type_id', sa.Integer, sa.ForeignKey('instance_types.id'),
nullable=False),
sa.Column('key', sa.String(length=255)),
sa.Column('value', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index(
'instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
UniqueConstraint(
'instance_type_id', 'key', 'deleted',
name='uniq_instance_type_extra_specs0instance_type_id0key0deleted'
),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_type_projects = sa.Table('instance_type_projects', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'instance_type_id', sa.Integer,
sa.ForeignKey(
'instance_types.id', name='instance_type_projects_ibfk_1'),
nullable=False),
sa.Column('project_id', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'instance_type_id', 'project_id', 'deleted',
name='uniq_instance_type_projects0instance_type_id0project_id'
'0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_types = sa.Table('instance_types', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('name', sa.String(length=255)),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('memory_mb', sa.Integer, nullable=False),
sa.Column('vcpus', sa.Integer, nullable=False),
sa.Column('swap', sa.Integer, nullable=False),
sa.Column('vcpu_weight', sa.Integer),
sa.Column('flavorid', sa.String(length=255)),
sa.Column('rxtx_factor', sa.Float),
sa.Column('root_gb', sa.Integer),
sa.Column('ephemeral_gb', sa.Integer),
sa.Column('disabled', sa.Boolean),
sa.Column('is_public', sa.Boolean),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'name', 'deleted',
name='uniq_instance_types0name0deleted'),
UniqueConstraint(
'flavorid', 'deleted',
name='uniq_instance_types0flavorid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances = sa.Table('instances', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('internal_id', sa.Integer),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('image_ref', sa.String(length=255)),
sa.Column('kernel_id', sa.String(length=255)),
sa.Column('ramdisk_id', sa.String(length=255)),
sa.Column('launch_index', sa.Integer),
sa.Column('key_name', sa.String(length=255)),
sa.Column('key_data', types.MediumText()),
sa.Column('power_state', sa.Integer),
sa.Column('vm_state', sa.String(length=255)),
sa.Column('memory_mb', sa.Integer),
sa.Column('vcpus', sa.Integer),
sa.Column('hostname', sa.String(length=255)),
sa.Column('host', sa.String(length=255)),
sa.Column('user_data', types.MediumText()),
sa.Column('reservation_id', sa.String(length=255)),
sa.Column('launched_at', sa.DateTime),
sa.Column('terminated_at', sa.DateTime),
sa.Column('display_name', sa.String(length=255)),
sa.Column('display_description', sa.String(length=255)),
sa.Column('availability_zone', sa.String(length=255)),
sa.Column('locked', sa.Boolean),
sa.Column('os_type', sa.String(length=255)),
sa.Column('launched_on', types.MediumText()),
sa.Column('instance_type_id', sa.Integer),
sa.Column('vm_mode', sa.String(length=255)),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('architecture', sa.String(length=255)),
sa.Column('root_device_name', sa.String(length=255)),
sa.Column('access_ip_v4', InetSmall()),
sa.Column('access_ip_v6', InetSmall()),
sa.Column('config_drive', sa.String(length=255)),
sa.Column('task_state', sa.String(length=255)),
sa.Column('default_ephemeral_device', sa.String(length=255)),
sa.Column('default_swap_device', sa.String(length=255)),
sa.Column('progress', sa.Integer),
sa.Column('auto_disk_config', sa.Boolean),
sa.Column('shutdown_terminate', sa.Boolean),
sa.Column('disable_terminate', sa.Boolean),
sa.Column('root_gb', sa.Integer),
sa.Column('ephemeral_gb', sa.Integer),
sa.Column('cell_name', sa.String(length=255)),
sa.Column('node', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Column(
'locked_by',
sa.Enum('owner', 'admin', name='instances0locked_by')),
sa.Column('cleaned', sa.Integer, default=0),
sa.Column('ephemeral_key_uuid', sa.String(36)),
# NOTE(danms): This column originally included default=False. We
# discovered in bug #1862205 that this will attempt to rewrite
# the entire instances table with that value, which can time out
# for large data sets (and does not even abort).
# NOTE(stephenfin): This was originally added by sqlalchemy-migrate
# which did not generate the constraints
sa.Column('hidden', sa.Boolean(create_constraint=False)),
sa.Index('uuid', 'uuid', unique=True),
sa.Index('instances_reservation_id_idx', 'reservation_id'),
sa.Index(
'instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
sa.Index(
'instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
sa.Index('instances_uuid_deleted_idx', 'uuid', 'deleted'),
sa.Index('instances_host_node_deleted_idx', 'host', 'node', 'deleted'),
sa.Index(
'instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
sa.Index('instances_project_id_deleted_idx', 'project_id', 'deleted'),
sa.Index('instances_deleted_created_at_idx', 'deleted', 'created_at'),
sa.Index('instances_project_id_idx', 'project_id'),
sa.Index(
'instances_updated_at_project_id_idx',
'updated_at', 'project_id'),
UniqueConstraint('uuid', name='uniq_instances0uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = sa.Table('instance_actions', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('action', sa.String(length=255)),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='fk_instance_actions_instance_uuid')),
sa.Column('request_id', sa.String(length=255)),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('start_time', sa.DateTime),
sa.Column('finish_time', sa.DateTime),
sa.Column('message', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Index('instance_uuid_idx', 'instance_uuid'),
sa.Index('request_id_idx', 'request_id'),
sa.Index(
'instance_actions_instance_uuid_updated_at_idx',
'instance_uuid', 'updated_at'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = sa.Table('instance_actions_events', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('event', sa.String(length=255)),
sa.Column(
'action_id', sa.Integer, sa.ForeignKey('instance_actions.id')),
sa.Column('start_time', sa.DateTime),
sa.Column('finish_time', sa.DateTime),
sa.Column('result', sa.String(length=255)),
sa.Column('traceback', sa.Text),
sa.Column('deleted', sa.Integer),
sa.Column('host', sa.String(255)),
sa.Column('details', sa.Text),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_extra = sa.Table('instance_extra', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='instance_extra_instance_uuid_fkey'),
nullable=False),
sa.Column('numa_topology', sa.Text, nullable=True),
sa.Column('pci_requests', sa.Text, nullable=True),
sa.Column('flavor', sa.Text, nullable=True),
sa.Column('vcpu_model', sa.Text, nullable=True),
sa.Column('migration_context', sa.Text, nullable=True),
sa.Column('keypairs', sa.Text, nullable=True),
sa.Column('device_metadata', sa.Text, nullable=True),
sa.Column('trusted_certs', sa.Text, nullable=True),
sa.Column('vpmems', sa.Text, nullable=True),
sa.Column('resources', sa.Text, nullable=True),
sa.Index('instance_extra_idx', 'instance_uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
inventories = sa.Table('inventories', meta,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('resource_provider_id', sa.Integer, nullable=False),
sa.Column('resource_class_id', sa.Integer, nullable=False),
sa.Column('total', sa.Integer, nullable=False),
sa.Column('reserved', sa.Integer, nullable=False),
sa.Column('min_unit', sa.Integer, nullable=False),
sa.Column('max_unit', sa.Integer, nullable=False),
sa.Column('step_size', sa.Integer, nullable=False),
sa.Column('allocation_ratio', sa.Float, nullable=False),
sa.Index(
'inventories_resource_provider_id_idx', 'resource_provider_id'),
sa.Index(
'inventories_resource_class_id_idx', 'resource_class_id'),
sa.Index(
'inventories_resource_provider_resource_class_idx',
'resource_provider_id', 'resource_class_id'),
UniqueConstraint(
'resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class'),
mysql_engine='InnoDB',
mysql_charset='latin1',
)
key_pairs = sa.Table('key_pairs', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('user_id', sa.String(length=255)),
sa.Column('fingerprint', sa.String(length=255)),
sa.Column('public_key', types.MediumText()),
sa.Column('deleted', sa.Integer),
sa.Column(
'type', sa.Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH),
UniqueConstraint(
'user_id', 'name', 'deleted',
name='uniq_key_pairs0user_id0name0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = sa.Table('migrations', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('source_compute', sa.String(length=255)),
sa.Column('dest_compute', sa.String(length=255)),
sa.Column('dest_host', sa.String(length=255)),
sa.Column('status', sa.String(length=255)),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid', name='fk_migrations_instance_uuid')),
sa.Column('old_instance_type_id', sa.Integer),
sa.Column('new_instance_type_id', sa.Integer),
sa.Column('source_node', sa.String(length=255)),
sa.Column('dest_node', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
sa.Column(
'migration_type',
sa.Enum(
'migration', 'resize', 'live-migration', 'evacuation',
name='migration_type'),
nullable=True),
# NOTE(stephenfin): This was originally added by sqlalchemy-migrate
# which did not generate the constraints
sa.Column(
'hidden', sa.Boolean(create_constraint=False), default=False),
sa.Column('memory_total', sa.BigInteger, nullable=True),
sa.Column('memory_processed', sa.BigInteger, nullable=True),
sa.Column('memory_remaining', sa.BigInteger, nullable=True),
sa.Column('disk_total', sa.BigInteger, nullable=True),
sa.Column('disk_processed', sa.BigInteger, nullable=True),
sa.Column('disk_remaining', sa.BigInteger, nullable=True),
sa.Column('uuid', sa.String(36)),
# NOTE(stephenfin): This was originally added by sqlalchemy-migrate
# which did not generate the constraints
sa.Column(
'cross_cell_move', sa.Boolean(create_constraint=False),
default=False),
sa.Column('user_id', sa.String(255), nullable=True),
sa.Column('project_id', sa.String(255), nullable=True),
sa.Index('migrations_uuid', 'uuid', unique=True),
sa.Index(
'migrations_instance_uuid_and_status_idx',
'deleted', 'instance_uuid', 'status'),
sa.Index('migrations_updated_at_idx', 'updated_at'),
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sa.Index(
'migrations_by_host_nodes_and_status_idx',
'deleted', 'source_compute', 'dest_compute', 'source_node',
'dest_node', 'status',
mysql_length={
'source_compute': 100,
'dest_compute': 100,
'source_node': 100,
'dest_node': 100,
}),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = sa.Table('networks', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('injected', sa.Boolean),
sa.Column('cidr', Inet()),
sa.Column('netmask', InetSmall()),
sa.Column('bridge', sa.String(length=255)),
sa.Column('gateway', InetSmall()),
sa.Column('broadcast', InetSmall()),
sa.Column('dns1', InetSmall()),
sa.Column('vlan', sa.Integer),
sa.Column('vpn_public_address', InetSmall()),
sa.Column('vpn_public_port', sa.Integer),
sa.Column('vpn_private_address', InetSmall()),
sa.Column('dhcp_start', InetSmall()),
sa.Column('project_id', sa.String(length=255)),
sa.Column('host', sa.String(length=255)),
sa.Column('cidr_v6', Inet()),
sa.Column('gateway_v6', InetSmall()),
sa.Column('label', sa.String(length=255)),
sa.Column('netmask_v6', InetSmall()),
sa.Column('bridge_interface', sa.String(length=255)),
sa.Column('multi_host', sa.Boolean),
sa.Column('dns2', InetSmall()),
sa.Column('uuid', sa.String(length=36)),
sa.Column('priority', sa.Integer),
sa.Column('rxtx_base', sa.Integer),
sa.Column('deleted', sa.Integer),
sa.Column('mtu', sa.Integer),
sa.Column('dhcp_server', types.IPAddress),
# NOTE(stephenfin): These were originally added by sqlalchemy-migrate
# which did not generate the constraints
sa.Column(
'enable_dhcp', sa.Boolean(create_constraint=False), default=True),
sa.Column(
'share_address', sa.Boolean(create_constraint=False),
default=False),
sa.Index('networks_host_idx', 'host'),
sa.Index('networks_cidr_v6_idx', 'cidr_v6'),
sa.Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
sa.Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
sa.Index(
'networks_uuid_project_id_deleted_idx',
'uuid', 'project_id', 'deleted'),
sa.Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
UniqueConstraint('vlan', 'deleted', name='uniq_networks0vlan0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
pci_devices = sa.Table('pci_devices', meta,
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Integer, default=0, nullable=True),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column(
'compute_node_id', sa.Integer,
sa.ForeignKey(
'compute_nodes.id', name='pci_devices_compute_node_id_fkey'),
nullable=False),
sa.Column('address', sa.String(12), nullable=False),
sa.Column('product_id', sa.String(4), nullable=False),
sa.Column('vendor_id', sa.String(4), nullable=False),
sa.Column('dev_type', sa.String(8), nullable=False),
sa.Column('dev_id', sa.String(255)),
sa.Column('label', sa.String(255), nullable=False),
sa.Column('status', sa.String(36), nullable=False),
sa.Column('extra_info', sa.Text, nullable=True),
sa.Column('instance_uuid', sa.String(36), nullable=True),
sa.Column('request_id', sa.String(36), nullable=True),
sa.Column('numa_node', sa.Integer, default=None),
sa.Column('parent_addr', sa.String(12), nullable=True),
sa.Column('uuid', sa.String(36)),
sa.Index(
'ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
sa.Index(
'ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
sa.Index(
'ix_pci_devices_compute_node_id_parent_addr_deleted',
'compute_node_id', 'parent_addr', 'deleted'),
UniqueConstraint(
'compute_node_id', 'address', 'deleted',
name='uniq_pci_devices0compute_node_id0address0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8')
provider_fw_rules = sa.Table('provider_fw_rules', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('protocol', sa.String(length=5)),
sa.Column('from_port', sa.Integer),
sa.Column('to_port', sa.Integer),
sa.Column('cidr', Inet()),
sa.Column('deleted', sa.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = sa.Table('quota_classes', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('class_name', sa.String(length=255)),
sa.Column('resource', sa.String(length=255)),
sa.Column('hard_limit', sa.Integer),
sa.Column('deleted', sa.Integer),
sa.Index('ix_quota_classes_class_name', 'class_name'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = sa.Table('quota_usages', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('project_id', sa.String(length=255)),
sa.Column('resource', sa.String(length=255), nullable=False),
sa.Column('in_use', sa.Integer, nullable=False),
sa.Column('reserved', sa.Integer, nullable=False),
sa.Column('until_refresh', sa.Integer),
sa.Column('deleted', sa.Integer),
sa.Column('user_id', sa.String(length=255)),
sa.Index('ix_quota_usages_project_id', 'project_id'),
sa.Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = sa.Table('quotas', meta,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('project_id', sa.String(length=255)),
sa.Column('resource', sa.String(length=255), nullable=False),
sa.Column('hard_limit', sa.Integer),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'project_id', 'resource', 'deleted',
name='uniq_quotas0project_id0resource0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
project_user_quotas = sa.Table('project_user_quotas', meta,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('user_id', sa.String(length=255), nullable=False),
sa.Column('project_id', sa.String(length=255), nullable=False),
sa.Column('resource', sa.String(length=255), nullable=False),
sa.Column('hard_limit', sa.Integer, nullable=True),
sa.Index(
'project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
sa.Index(
'project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted'),
UniqueConstraint(
'user_id', 'project_id', 'resource', 'deleted',
name='uniq_project_user_quotas0user_id0project_id0resource0'
'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = sa.Table('reservations', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column(
'usage_id', sa.Integer,
sa.ForeignKey('quota_usages.id', name='reservations_ibfk_1'),
nullable=False),
sa.Column('project_id', sa.String(length=255)),
sa.Column('resource', sa.String(length=255)),
sa.Column('delta', sa.Integer, nullable=False),
sa.Column('expire', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('user_id', sa.String(length=255)),
sa.Index('ix_reservations_project_id', 'project_id'),
sa.Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
sa.Index('reservations_uuid_idx', 'uuid'),
sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
resource_providers = sa.Table('resource_providers', meta,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(36), nullable=False),
sa.Column('name', sa.Unicode(200), nullable=True),
sa.Column('generation', sa.Integer, default=0),
sa.Column('can_host', sa.Integer, default=0),
UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
UniqueConstraint('name', name='uniq_resource_providers0name'),
sa.Index('resource_providers_name_idx', 'name'),
sa.Index('resource_providers_uuid_idx', 'uuid'),
mysql_engine='InnoDB',
mysql_charset='latin1',
)
resource_provider_aggregates = sa.Table(
'resource_provider_aggregates', meta,
sa.Column(
'resource_provider_id', sa.Integer, primary_key=True,
nullable=False),
sa.Column(
'aggregate_id', sa.Integer, primary_key=True, nullable=False),
sa.Index(
'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
mysql_engine='InnoDB',
mysql_charset='latin1',
)
s3_images = sa.Table('s3_images', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('deleted', sa.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = sa.Table(
'security_group_instance_association', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'security_group_id', sa.Integer,
sa.ForeignKey(
'security_groups.id',
name='security_group_instance_association_ibfk_1'),
),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid',
name='security_group_instance_association_instance_uuid_fkey'),
),
sa.Column('deleted', sa.Integer),
sa.Index(
'security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = sa.Table('security_group_rules', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'parent_group_id', sa.Integer,
sa.ForeignKey('security_groups.id')),
sa.Column('protocol', sa.String(length=255)),
sa.Column('from_port', sa.Integer),
sa.Column('to_port', sa.Integer),
sa.Column('cidr', Inet()),
sa.Column('group_id', sa.Integer, sa.ForeignKey('security_groups.id')),
sa.Column('deleted', sa.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = sa.Table('security_groups', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('name', sa.String(length=255)),
sa.Column('description', sa.String(length=255)),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('deleted', sa.Integer),
UniqueConstraint(
'project_id', 'name', 'deleted',
name='uniq_security_groups0project_id0name0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = sa.Table(
'security_group_default_rules', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Integer, default=0),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('protocol', sa.String(length=5)),
sa.Column('from_port', sa.Integer),
sa.Column('to_port', sa.Integer),
sa.Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = sa.Table('services', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('host', sa.String(length=255)),
sa.Column('binary', sa.String(length=255)),
sa.Column('topic', sa.String(length=255)),
sa.Column('report_count', sa.Integer, nullable=False),
sa.Column('disabled', sa.Boolean),
sa.Column('deleted', sa.Integer),
sa.Column('disabled_reason', sa.String(length=255)),
sa.Column('last_seen_up', sa.DateTime, nullable=True),
# NOTE(stephenfin): This was originally added by sqlalchemy-migrate
# which did not generate the constraints
sa.Column(
'forced_down', sa.Boolean(create_constraint=False), default=False),
sa.Column('version', sa.Integer, default=0),
sa.Column('uuid', sa.String(36), nullable=True),
sa.Index('services_uuid_idx', 'uuid', unique=True),
UniqueConstraint(
'host', 'topic', 'deleted',
name='uniq_services0host0topic0deleted'),
UniqueConstraint(
'host', 'binary', 'deleted',
name='uniq_services0host0binary0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = sa.Table('snapshot_id_mappings', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('deleted', sa.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = sa.Table('snapshots', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column(
'id', sa.String(length=36), primary_key=True, nullable=False),
sa.Column('volume_id', sa.String(length=36), nullable=False),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('status', sa.String(length=255)),
sa.Column('progress', sa.String(length=255)),
sa.Column('volume_size', sa.Integer),
sa.Column('scheduled_at', sa.DateTime),
sa.Column('display_name', sa.String(length=255)),
sa.Column('display_description', sa.String(length=255)),
sa.Column('deleted', sa.String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
tags = sa.Table('tags', meta,
sa.Column(
'resource_id', sa.String(36), primary_key=True, nullable=False),
sa.Column('tag', sa.Unicode(80), primary_key=True, nullable=False),
sa.Index('tags_tag_idx', 'tag'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
task_log = sa.Table('task_log', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('task_name', sa.String(length=255), nullable=False),
sa.Column('state', sa.String(length=255), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('period_beginning', sa.DateTime, nullable=False),
sa.Column('period_ending', sa.DateTime, nullable=False),
sa.Column('message', sa.String(length=255), nullable=False),
sa.Column('task_items', sa.Integer),
sa.Column('errors', sa.Integer),
sa.Column('deleted', sa.Integer),
sa.Index('ix_task_log_period_beginning', 'period_beginning'),
sa.Index('ix_task_log_host', 'host'),
sa.Index('ix_task_log_period_ending', 'period_ending'),
UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name='uniq_task_log0task_name0host0period_beginning0period_ending',
),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = sa.Table('virtual_interfaces', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('address', sa.String(length=255)),
sa.Column('network_id', sa.Integer),
sa.Column('uuid', sa.String(length=36)),
sa.Column(
'instance_uuid', sa.String(length=36),
sa.ForeignKey(
'instances.uuid',
name='virtual_interfaces_instance_uuid_fkey'),
nullable=True),
sa.Column('deleted', sa.Integer),
sa.Column('tag', sa.String(255)),
sa.Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
sa.Index('virtual_interfaces_network_id_idx', 'network_id'),
sa.Index('virtual_interfaces_uuid_idx', 'uuid'),
UniqueConstraint(
'address', 'deleted',
name='uniq_virtual_interfaces0address0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = sa.Table('volume_id_mappings', meta,
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('deleted', sa.Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = sa.Table('volume_usage_cache', meta,
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('id', sa.Integer(), primary_key=True, nullable=False),
sa.Column('volume_id', sa.String(36), nullable=False),
sa.Column('tot_last_refreshed', sa.DateTime(timezone=False)),
sa.Column('tot_reads', sa.BigInteger(), default=0),
sa.Column('tot_read_bytes', sa.BigInteger(), default=0),
sa.Column('tot_writes', sa.BigInteger(), default=0),
sa.Column('tot_write_bytes', sa.BigInteger(), default=0),
sa.Column('curr_last_refreshed', sa.DateTime(timezone=False)),
sa.Column('curr_reads', sa.BigInteger(), default=0),
sa.Column('curr_read_bytes', sa.BigInteger(), default=0),
sa.Column('curr_writes', sa.BigInteger(), default=0),
sa.Column('curr_write_bytes', sa.BigInteger(), default=0),
sa.Column('deleted', sa.Integer),
sa.Column('instance_uuid', sa.String(length=36)),
sa.Column('project_id', sa.String(length=36)),
sa.Column('user_id', sa.String(length=64)),
sa.Column('availability_zone', sa.String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# create all tables
tables = [instances, aggregates, console_auth_tokens,
console_pools, instance_types,
security_groups, snapshots,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events, instance_extra,
groups, group_policy, group_member,
key_pairs, migrations, networks,
pci_devices, provider_fw_rules, quota_classes, quota_usages,
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, tags, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache,
resource_providers, inventories, allocations,
resource_provider_aggregates]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception('Exception while creating table.')
raise
# MySQL specific indexes
if migrate_engine.name == 'mysql':
# NOTE(stephenfin): For some reason, we have to put this within the if
# statement to avoid it being evaluated for the sqlite case. Even
# though we don't call create except in the MySQL case... Failure to do
# this will result in the following ugly error message:
#
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such
# index: instance_type_id
#
# Yeah, I don't get it either...
mysql_specific_indexes = [
sa.Index(
'instance_type_id',
instance_type_projects.c.instance_type_id),
sa.Index('usage_id', reservations.c.usage_id),
sa.Index(
'security_group_id',
security_group_instance_association.c.security_group_id),
]
for index in mysql_specific_indexes:
index.create(migrate_engine)
if migrate_engine.name == 'mysql':
# In Folsom we explicitly converted migrate_version to UTF8.
with migrate_engine.connect() as conn:
conn.exec_driver_sql(
'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8'
)
# Set default DB charset to UTF8.
conn.exec_driver_sql(
'ALTER DATABASE `%s` DEFAULT CHARACTER SET utf8' % (
migrate_engine.url.database,
)
)
# NOTE(cdent): The resource_providers table is defined as latin1 to
# be more efficient. Now we need the name column to be UTF8. We
# modify it here otherwise the declarative handling in sqlalchemy
# gets confused.
conn.exec_driver_sql(
'ALTER TABLE resource_providers MODIFY name '
'VARCHAR(200) CHARACTER SET utf8'
)
_create_shadow_tables(migrate_engine)
# TODO(stephenfin): Fix these various bugs in a follow-up
# 298_mysql_extra_specs_binary_collation; we should update the shadow table
# also
if migrate_engine.name == 'mysql':
with migrate_engine.connect() as conn:
# Use binary collation for extra specs table
conn.exec_driver_sql(
'ALTER TABLE instance_type_extra_specs '
'CONVERT TO CHARACTER SET utf8 '
'COLLATE utf8_bin'
)
|
|
#!/usr/bin/env python
import urlparse
import urllib2
import BaseHTTPServer
import unittest
import hashlib
from test import test_support
mimetools = test_support.import_module('mimetools', deprecated=True)
threading = test_support.import_module('threading')
# Loopback http server infrastructure
class LoopbackHttpServer(BaseHTTPServer.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(1.0)
def get_request(self):
"""BaseHTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(('127.0.0.1', 0),
request_handler)
#print "Serving HTTP on %s port %s" % (self.httpd.server_name,
# self.httpd.server_port)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
# Set the stop flag.
self._stop = True
self.join()
def run(self):
self.ready.set()
while not self._stop:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num)).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write("Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if 'Proxy-Authorization' not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers['Proxy-Authorization']
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib2 uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
# Proxy test infrastructure
class FakeProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
#sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urlparse.urlparse(
self.path, 'http')
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write("You've reached %s!<BR>" % self.path)
self.wfile.write("Our apologies, but our server is down due to "
"a sudden zombie invasion.")
# Test cases
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
def tearDown(self):
test_support.threading_cleanup(*self._threads)
class ProxyAuthTests(BaseTestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib2.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib2.ProxyDigestAuthHandler()
self.opener = urllib2.build_opener(handler, self.proxy_digest_handler)
def tearDown(self):
self.server.stop()
super(ProxyAuthTests, self).tearDown()
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib2.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib2.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib2.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
if body:
self.wfile.write(body)
def do_POST(self):
content_length = self.headers['Content-Length']
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % self.port)
if body:
self.send_header('Content-type', 'text/plain')
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
class TestUrlopen(BaseTestCase):
"""Tests urllib2.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def start_server(self, responses):
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def test_redirection(self):
expected_response = 'We got here...'
responses = [
(302, [('Location', 'http://localhost:%s/somewhere_else')], ''),
(200, [], expected_response)
]
handler = self.start_server(responses)
try:
f = urllib2.urlopen('http://localhost:%s/' % handler.port)
data = f.read()
f.close()
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ['/', '/somewhere_else'])
finally:
self.server.stop()
def test_404(self):
expected_response = 'Bad bad bad...'
handler = self.start_server([(404, [], expected_response)])
try:
try:
urllib2.urlopen('http://localhost:%s/weeble' % handler.port)
except urllib2.URLError, f:
pass
else:
self.fail('404 should raise URLError')
data = f.read()
f.close()
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ['/weeble'])
finally:
self.server.stop()
def test_200(self):
expected_response = 'pycon 2008...'
handler = self.start_server([(200, [], expected_response)])
try:
f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port)
data = f.read()
f.close()
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ['/bizarre'])
finally:
self.server.stop()
def test_200_with_parameters(self):
expected_response = 'pycon 2008...'
handler = self.start_server([(200, [], expected_response)])
try:
f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port, 'get=with_feeling')
data = f.read()
f.close()
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ['/bizarre', 'get=with_feeling'])
finally:
self.server.stop()
def test_sending_headers(self):
handler = self.start_server([(200, [], "we don't care")])
try:
req = urllib2.Request("http://localhost:%s/" % handler.port,
headers={'Range': 'bytes=20-39'})
urllib2.urlopen(req)
self.assertEqual(handler.headers_received['Range'], 'bytes=20-39')
finally:
self.server.stop()
def test_basic(self):
handler = self.start_server([(200, [], "we don't care")])
try:
open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
finally:
self.server.stop()
def test_info(self):
handler = self.start_server([(200, [], "we don't care")])
try:
open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assertIsInstance(info_obj, mimetools.Message,
"object returned by 'info' is not an "
"instance of mimetools.Message")
self.assertEqual(info_obj.getsubtype(), "plain")
finally:
self.server.stop()
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server([(200, [], "we don't care")])
try:
open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
finally:
self.server.stop()
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
# domain name requests in an attempt to boost traffic
# to their own sites, finding a domain name to use
# for this test is difficult. RFC2606 leads one to
# believe that '.invalid' should work, but experience
# seemed to indicate otherwise. Single character
# TLDs are likely to remain invalid, so this seems to
# be the best choice. The trailing '.' prevents a
# related problem: The normal DNS resolver appends
# the domain names from the search path if there is
# no '.' the end and, and if one of those domains
# implements a '*' rule a result is returned.
# However, none of this will prevent the test from
# failing if the ISP hijacks all invalid domain
# requests. The real solution would be to be able to
# parameterize the framework with a mock resolver.
urllib2.urlopen, "http://sadflkjsasf.i.nvali.d./")
def test_main():
# We will NOT depend on the network resource flag
# (Lib/test/regrtest.py -u network) since all tests here are only
# localhost. However, if this is a bad rationale, then uncomment
# the next line.
#test_support.requires("network")
test_support.run_unittest(ProxyAuthTests, TestUrlopen)
if __name__ == "__main__":
test_main()
|
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import httplib
import mock
import stubout
import webtest
from google.apputils import basetest
from simian import settings
from simian.mac import admin
from simian.mac import models
from simian.mac.admin import applesus
from simian.mac.admin import main as gae_main
from simian.mac.admin import xsrf
from simian.mac.common import auth
from tests.simian.mac.common import test
class ApplesusModuleTest(test.AppengineTest):
def setUp(self):
super(ApplesusModuleTest, self).setUp()
self.testapp = webtest.TestApp(gae_main.app)
@mock.patch.object(auth, 'IsAdminUser', return_value=False)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
def testPostAccessDenied(self, *_):
self.testapp.post('/admin/applesus', status=httplib.FORBIDDEN)
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
@mock.patch.object(applesus.applesus, 'GenerateAppleSUSCatalogs')
def testPostCatalogGeneration(self, generate_catalog_mock, *_):
self.testapp.post('/admin/applesus', {
'regenerate-catalogs': 1,
'tracks': 'stable',
})
generate_catalog_mock.assert_called_once_with(tracks=['stable'], delay=1)
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
def testChangeProduct(self, *_):
settings.EMAIL_ON_EVERY_CHANGE = True
product_id = 'pid'
models.AppleSUSProduct(key_name=product_id, product_id=product_id).put()
self.testapp.post('/admin/applesus/product/' + product_id, {
'enabled': 1,
'track': 'stable',
}, status=httplib.OK)
product = models.AppleSUSProduct.get_by_key_name(product_id)
self.assertEqual(['stable'], product.tracks)
self.RunAllDeferredTasks()
mail_stub = self.testbed.get_stub('mail')
messages = mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('admin@example.com', messages[0].to)
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
def testChangeProductManualOverride(self, *_):
product_id = 'pid'
models.AppleSUSProduct(
key_name=product_id, product_id=product_id, manual_override=False).put()
self.testapp.post('/admin/applesus/product/' + product_id, {
'manual_override': 1,
}, status=httplib.OK)
product = models.AppleSUSProduct.get_by_key_name(product_id)
self.assertTrue(product.manual_override)
self.RunAllDeferredTasks()
mail_stub = self.testbed.get_stub('mail')
messages = mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual(1, len(models.AdminAppleSUSProductLog.all().fetch(None)))
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
def testChangeProductUnattended(self, *_):
product_id = 'pid'
models.AppleSUSProduct(
key_name=product_id, product_id=product_id, unattended=False).put()
self.testapp.post('/admin/applesus/product/' + product_id, {
'unattended': 1,
}, status=httplib.OK)
product = models.AppleSUSProduct.get_by_key_name(product_id)
self.assertTrue(product.unattended)
self.RunAllDeferredTasks()
mail_stub = self.testbed.get_stub('mail')
messages = mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual(1, len(models.AdminAppleSUSProductLog.all().fetch(None)))
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
def testChangeProductForceInstallToday(self, *_):
product_id = 'pid'
models.AppleSUSProduct(
key_name=product_id, product_id=product_id, manual_override=False).put()
force_install_after_date = datetime.datetime.now()
self.testapp.post('/admin/applesus/product/' + product_id, {
'force_install_after_date': datetime.datetime.strftime(
force_install_after_date, '%Y-%m-%d %H:%M'),
}, status=httplib.BAD_REQUEST)
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
def testChangeProductForceInstall(self, *_):
product_id = 'pid'
models.AppleSUSProduct(
key_name=product_id, product_id=product_id, manual_override=False).put()
force_install_after_date = (datetime.datetime.now() +
datetime.timedelta(days=4))
self.testapp.post('/admin/applesus/product/' + product_id, {
'force_install_after_date': datetime.datetime.strftime(
force_install_after_date, '%Y-%m-%d %H:%M'),
}, status=httplib.OK)
product = models.AppleSUSProduct.get_by_key_name(product_id)
self.assertEquals(
datetime.datetime.strftime(
force_install_after_date, '%Y-%m-%dT%H:%M:00Z'),
product.force_install_after_date_str)
self.RunAllDeferredTasks()
mail_stub = self.testbed.get_stub('mail')
messages = mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual(1, len(models.AdminAppleSUSProductLog.all().fetch(None)))
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(admin.AdminHandler, 'Render')
def testDisplayMain(self, render_mock, _):
product_id = 'pid'
models.AppleSUSProduct(
key_name=product_id, product_id=product_id, tracks=['unstable']).put()
self.testapp.get('/admin/applesus/', status=httplib.OK)
args = test.GetArgFromCallHistory(render_mock, arg_index=1)
self.assertEqual(1, len(args['products']))
self.assertLess(
args['products'][0].stable_promote_date,
(datetime.datetime.now() + datetime.timedelta(days=60)).date())
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(admin.AdminHandler, 'Render')
def testDisplayUpdateLogs(self, render_mock, _):
product_id = 'pid'
p = models.AppleSUSProduct(
key_name=product_id, product_id=product_id, tracks=['unstable'])
p.put()
models.AdminAppleSUSProductLog.Log(p, 'action description')
self.testapp.get(
'/admin/applesus/logs', {'product_id': product_id}, status=httplib.OK)
args = test.GetArgFromCallHistory(render_mock, arg_index=1)
self.assertEqual(1, len(args['logs']))
self.assertEqual(product_id, args['product_id'])
if __name__ == '__main__':
basetest.main()
|
|
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SNS
:configuration: This module accepts explicit sns credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sns.keyid: GKTADJGHEIQSXMKKRBJ08H
sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sns.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
# Import third party libs
try:
#pylint: disable=unused-import
import boto
import boto.sns
#pylint: enable=unused-import
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return False
__utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__)
return True
def get_all_topics(region=None, key=None, keyid=None, profile=None):
'''
Returns a list of the all topics..
CLI example::
salt myminion boto_sns.get_all_topics
'''
cache_key = _cache_get_key()
try:
return __context__[cache_key]
except KeyError:
pass
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
__context__[cache_key] = {}
# TODO: support >100 SNS topics (via NextToken)
topics = conn.get_all_topics()
for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']:
short_name = t['TopicArn'].split(':')[-1]
__context__[cache_key][short_name] = t['TopicArn']
return __context__[cache_key]
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if an SNS topic exists.
CLI example::
salt myminion boto_sns.exists mytopic region=us-east-1
'''
topics = get_all_topics(region=region, key=key, keyid=keyid,
profile=profile)
if name.startswith('arn:aws:sns:'):
return name in list(topics.values())
else:
return name in list(topics.keys())
def create(name, region=None, key=None, keyid=None, profile=None):
'''
Create an SNS topic.
CLI example to create a topic::
salt myminion boto_sns.create mytopic region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.create_topic(name)
log.info('Created SNS topic {0}'.format(name))
_invalidate_cache()
return True
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SNS topic.
CLI example to delete a topic::
salt myminion boto_sns.delete mytopic region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_topic(get_arn(name, region, key, keyid, profile))
log.info('Deleted SNS topic {0}'.format(name))
_invalidate_cache()
return True
def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None):
'''
Get list of all subscriptions to a specific topic.
CLI example to delete a topic::
salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1
'''
cache_key = _subscriptions_cache_key(name)
try:
return __context__[cache_key]
except KeyError:
pass
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile))
__context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']
return __context__[cache_key]
def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None):
'''
Subscribe to a Topic.
CLI example to delete a topic::
salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint)
log.info('Subscribe {0} {1} to {2} topic'.format(protocol, endpoint, topic))
try:
del __context__[_subscriptions_cache_key(topic)]
except KeyError:
pass
return True
def get_arn(name, region=None, key=None, keyid=None, profile=None):
'''
Returns the full ARN for a given topic name.
CLI example::
salt myminion boto_sns.get_arn mytopic
'''
if name.startswith('arn:aws:sns:'):
return name
account_id = __salt__['boto_iam.get_account_id'](
region=region, key=key, keyid=keyid, profile=profile
)
return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile),
account_id, name)
def _get_region(region=None, profile=None):
if profile and 'region' in profile:
return profile['region']
if not region and __salt__['config.option']('sns.region'):
region = __salt__['config.option']('sns.region')
if not region:
region = 'us-east-1'
return region
def _subscriptions_cache_key(name):
return '{0}_{1}_subscriptions'.format(_cache_get_key(), name)
def _invalidate_cache():
try:
del __context__[_cache_get_key()]
except KeyError:
pass
def _cache_get_key():
return 'boto_sns.topics_cache'
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Locale dependent formatting and parsing of dates and times.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_TIME``,
* ``LC_ALL``, and
* ``LANG``
"""
from __future__ import division
from datetime import date, datetime, time, timedelta
import re
from babel.core import default_locale, get_global, Locale
from babel.util import UTC
__all__ = ['format_date', 'format_datetime', 'format_time', 'format_timedelta',
'get_timezone_name', 'parse_date', 'parse_datetime', 'parse_time']
__docformat__ = 'restructuredtext en'
LC_TIME = default_locale('LC_TIME')
# Aliases for use in scopes where the modules are shadowed by local variables
date_ = date
datetime_ = datetime
time_ = time
def get_period_names(locale=LC_TIME):
"""Return the names for day periods (AM/PM) used by the locale.
>>> get_period_names(locale='en_US')['am']
u'AM'
:param locale: the `Locale` object, or a locale string
:return: the dictionary of period names
:rtype: `dict`
"""
return Locale.parse(locale).periods
def get_day_names(width='wide', context='format', locale=LC_TIME):
"""Return the day names used by the locale for the specified format.
>>> get_day_names('wide', locale='en_US')[1]
u'Tuesday'
>>> get_day_names('abbreviated', locale='es')[1]
u'mar'
>>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1]
u'D'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of day names
:rtype: `dict`
"""
return Locale.parse(locale).days[context][width]
def get_month_names(width='wide', context='format', locale=LC_TIME):
"""Return the month names used by the locale for the specified format.
>>> get_month_names('wide', locale='en_US')[1]
u'January'
>>> get_month_names('abbreviated', locale='es')[1]
u'ene'
>>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1]
u'J'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of month names
:rtype: `dict`
"""
return Locale.parse(locale).months[context][width]
def get_quarter_names(width='wide', context='format', locale=LC_TIME):
"""Return the quarter names used by the locale for the specified format.
>>> get_quarter_names('wide', locale='en_US')[1]
u'1st quarter'
>>> get_quarter_names('abbreviated', locale='de_DE')[1]
u'Q1'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of quarter names
:rtype: `dict`
"""
return Locale.parse(locale).quarters[context][width]
def get_era_names(width='wide', locale=LC_TIME):
"""Return the era names used by the locale for the specified format.
>>> get_era_names('wide', locale='en_US')[1]
u'Anno Domini'
>>> get_era_names('abbreviated', locale='de_DE')[1]
u'n. Chr.'
:param width: the width to use, either "wide", "abbreviated", or "narrow"
:param locale: the `Locale` object, or a locale string
:return: the dictionary of era names
:rtype: `dict`
"""
return Locale.parse(locale).eras[width]
def get_date_format(format='medium', locale=LC_TIME):
"""Return the date formatting patterns used by the locale for the specified
format.
>>> get_date_format(locale='en_US')
<DateTimePattern u'MMM d, y'>
>>> get_date_format('full', locale='de_DE')
<DateTimePattern u'EEEE, d. MMMM y'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
:return: the date format pattern
:rtype: `DateTimePattern`
"""
return Locale.parse(locale).date_formats[format]
def get_datetime_format(format='medium', locale=LC_TIME):
"""Return the datetime formatting patterns used by the locale for the
specified format.
>>> get_datetime_format(locale='en_US')
u'{1} {0}'
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
:return: the datetime format pattern
:rtype: `unicode`
"""
patterns = Locale.parse(locale).datetime_formats
if format not in patterns:
format = None
return patterns[format]
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
:return: the time format pattern
:rtype: `DateTimePattern`
"""
return Locale.parse(locale).time_formats[format]
def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME):
"""Return the timezone associated with the given `datetime` object formatted
as string indicating the offset from GMT.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> get_timezone_gmt(dt, locale='en')
u'GMT+00:00'
>>> from pytz import timezone
>>> tz = timezone('America/Los_Angeles')
>>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz)
>>> get_timezone_gmt(dt, locale='en')
u'GMT-08:00'
>>> get_timezone_gmt(dt, 'short', locale='en')
u'-0800'
The long format depends on the locale, for example in France the acronym
UTC string is used instead of GMT:
>>> get_timezone_gmt(dt, 'long', locale='fr_FR')
u'UTC-08:00'
:param datetime: the ``datetime`` object; if `None`, the current date and
time in UTC is used
:param width: either "long" or "short"
:param locale: the `Locale` object, or a locale string
:return: the GMT offset representation of the timezone
:rtype: `unicode`
:since: version 0.9
"""
if datetime is None:
datetime = datetime_.utcnow()
elif isinstance(datetime, (int, long)):
datetime = datetime_.utcfromtimestamp(datetime).time()
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
locale = Locale.parse(locale)
offset = datetime.tzinfo.utcoffset(datetime)
seconds = offset.days * 24 * 60 * 60 + offset.seconds
hours, seconds = divmod(seconds, 3600)
if width == 'short':
pattern = u'%+03d%02d'
else:
pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
return pattern % (hours, seconds // 60)
def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME):
"""Return a representation of the given timezone using "location format".
The result depends on both the local display name of the country and the
city associated with the time zone:
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> get_timezone_location(tz, locale='de_DE')
u"Kanada (St. John's)"
>>> tz = timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt)'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned:
>>> tz = timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Deutschland'
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if `None`, the current date and time in
UTC is assumed
:param locale: the `Locale` object, or a locale string
:return: the localized timezone name using location format
:rtype: `unicode`
:since: version 0.9
"""
if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = dt_or_tzinfo
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt or datetime.utcnow())
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Otherwise, if there is only one timezone for the country, return the
# localized country name
region_format = locale.zone_formats['region']
territory = get_global('zone_territories').get(zone)
if territory not in locale.territories:
territory = 'ZZ' # invalid/unknown
territory_name = locale.territories[territory]
if territory and len(get_global('territory_zones').get(territory, [])) == 1:
return region_format % (territory_name)
# Otherwise, include the city in the output
fallback_format = locale.zone_formats['fallback']
if 'city' in info:
city_name = info['city']
else:
metazone = get_global('meta_zones').get(zone)
metazone_info = locale.meta_zones.get(metazone, {})
if 'city' in metazone_info:
city_name = metazone_info['city']
elif '/' in zone:
city_name = zone.split('/', 1)[1].replace('_', ' ')
else:
city_name = zone.replace('_', ' ')
return region_format % (fallback_format % {
'0': city_name,
'1': territory_name
})
def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False,
locale=LC_TIME):
r"""Return the localized display name for the given timezone. The timezone
may be specified using a ``datetime`` or `tzinfo` object.
>>> from pytz import timezone
>>> dt = time(15, 30, tzinfo=timezone('America/Los_Angeles'))
>>> get_timezone_name(dt, locale='en_US')
u'Pacific Standard Time'
>>> get_timezone_name(dt, width='short', locale='en_US')
u'PST'
If this function gets passed only a `tzinfo` object and no concrete
`datetime`, the returned display name is indenpendent of daylight savings
time. This can be used for example for selecting timezones, or to set the
time of events that recur across DST changes:
>>> tz = timezone('America/Los_Angeles')
>>> get_timezone_name(tz, locale='en_US')
u'Pacific Time'
>>> get_timezone_name(tz, 'short', locale='en_US')
u'PT'
If no localized display name for the timezone is available, and the timezone
is associated with a country that uses only a single timezone, the name of
that country is returned, formatted according to the locale:
>>> tz = timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Deutschland'
>>> get_timezone_name(tz, locale='pt_BR')
u'Hor\xe1rio Alemanha'
On the other hand, if the country uses multiple timezones, the city is also
included in the representation:
>>> tz = timezone('America/St_Johns')
>>> get_timezone_name(tz, locale='de_DE')
u"Kanada (St. John's)"
The `uncommon` parameter can be set to `True` to enable the use of timezone
representations that are not commonly used by the requested locale. For
example, while in French the central European timezone is usually
abbreviated as "HEC", in Canadian French, this abbreviation is not in
common use, so a generic name would be chosen by default:
>>> tz = timezone('Europe/Paris')
>>> get_timezone_name(tz, 'short', locale='fr_CA')
u'France'
>>> get_timezone_name(tz, 'short', uncommon=True, locale='fr_CA')
u'HEC'
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if a ``tzinfo`` object is used, the
resulting display name will be generic, i.e.
independent of daylight savings time; if `None`, the
current date in UTC is assumed
:param width: either "long" or "short"
:param uncommon: whether even uncommon timezone abbreviations should be used
:param locale: the `Locale` object, or a locale string
:return: the timezone display name
:rtype: `unicode`
:since: version 0.9
:see: `LDML Appendix J: Time Zone Display Names
<http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_
"""
if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = dt_or_tzinfo
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt)
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Try explicitly translated zone names first
if width in info:
if dt is None:
field = 'generic'
else:
dst = tzinfo.dst(dt)
if dst is None:
field = 'generic'
elif dst == 0:
field = 'standard'
else:
field = 'daylight'
if field in info[width]:
return info[width][field]
metazone = get_global('meta_zones').get(zone)
if metazone:
metazone_info = locale.meta_zones.get(metazone, {})
if width in metazone_info and (uncommon or metazone_info.get('common')):
if dt is None:
field = 'generic'
else:
field = tzinfo.dst(dt) and 'daylight' or 'standard'
if field in metazone_info[width]:
return metazone_info[width][field]
# If we have a concrete datetime, we assume that the result can't be
# independent of daylight savings time, so we return the GMT offset
if dt is not None:
return get_timezone_gmt(dt, width=width, locale=locale)
return get_timezone_location(dt_or_tzinfo, locale=locale)
def format_date(date=None, format='medium', locale=LC_TIME):
"""Return a date formatted according to the given pattern.
>>> d = date(2007, 04, 01)
>>> format_date(d, locale='en_US')
u'Apr 1, 2007'
>>> format_date(d, format='full', locale='de_DE')
u'Sonntag, 1. April 2007'
If you don't want to use the locale default formats, you can specify a
custom date pattern:
>>> format_date(d, "EEE, MMM d, ''yy", locale='en')
u"Sun, Apr 1, '07"
:param date: the ``date`` or ``datetime`` object; if `None`, the current
date is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
:note: If the pattern contains time fields, an `AttributeError` will be
raised when trying to apply the formatting. This is also true if
the value of ``date`` parameter is actually a ``datetime`` object,
as this function automatically converts that to a ``date``.
"""
if date is None:
date = date_.today()
elif isinstance(date, datetime):
date = date.date()
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_date_format(format, locale=locale)
pattern = parse_pattern(format)
return pattern.apply(date, locale)
def format_datetime(datetime=None, format='medium', tzinfo=None,
locale=LC_TIME):
r"""Return a date formatted according to the given pattern.
>>> dt = datetime(2007, 04, 01, 15, 30)
>>> format_datetime(dt, locale='en_US')
u'Apr 1, 2007 3:30:00 PM'
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> from pytz import timezone
>>> format_datetime(dt, 'full', tzinfo=timezone('Europe/Paris'),
... locale='fr_FR')
u'dimanche 1 avril 2007 17:30:00 Heure avanc\xe9e de l\u2019Europe centrale'
>>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
... tzinfo=timezone('US/Eastern'), locale='en')
u'2007.04.01 AD at 11:30:00 EDT'
:param datetime: the `datetime` object; if `None`, the current date and
time is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the timezone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
"""
if datetime is None:
datetime = datetime_.utcnow()
elif isinstance(datetime, (int, long, float)):
datetime = datetime_.utcfromtimestamp(datetime)
elif isinstance(datetime, time):
datetime = datetime_.combine(date.today(), datetime)
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
if tzinfo is not None:
datetime = datetime.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
datetime = tzinfo.normalize(datetime)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
return get_datetime_format(format, locale=locale) \
.replace('{0}', format_time(datetime, format, tzinfo=None,
locale=locale)) \
.replace('{1}', format_date(datetime, format, locale=locale))
else:
return parse_pattern(format).apply(datetime, locale)
def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME):
r"""Return a time formatted according to the given pattern.
>>> t = time(15, 30)
>>> format_time(t, locale='en_US')
u'3:30:00 PM'
>>> format_time(t, format='short', locale='de_DE')
u'15:30'
If you don't want to use the locale default formats, you can specify a
custom time pattern:
>>> format_time(t, "hh 'o''clock' a", locale='en')
u"03 o'clock PM"
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> from pytz import timezone
>>> t = datetime(2007, 4, 1, 15, 30)
>>> tzinfo = timezone('Europe/Paris')
>>> t = tzinfo.localize(t)
>>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
u'15:30:00 Heure avanc\xe9e de l\u2019Europe centrale'
>>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=timezone('US/Eastern'),
... locale='en')
u"09 o'clock AM, Eastern Daylight Time"
As that example shows, when this function gets passed a
``datetime.datetime`` value, the actual time in the formatted string is
adjusted to the timezone specified by the `tzinfo` parameter. If the
``datetime`` is "naive" (i.e. it has no associated timezone information),
it is assumed to be in UTC.
These timezone calculations are **not** performed if the value is of type
``datetime.time``, as without date information there's no way to determine
what a given time would translate to in a different timezone without
information about whether daylight savings time is in effect or not. This
means that time values are left as-is, and the value of the `tzinfo`
parameter is only used to display the timezone name if needed:
>>> t = time(15, 30)
>>> format_time(t, format='full', tzinfo=timezone('Europe/Paris'),
... locale='fr_FR')
u'15:30:00 Heure normale de l\u2019Europe centrale'
>>> format_time(t, format='full', tzinfo=timezone('US/Eastern'),
... locale='en_US')
u'3:30:00 PM Eastern Standard Time'
:param time: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the time-zone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
:note: If the pattern contains date fields, an `AttributeError` will be
raised when trying to apply the formatting. This is also true if
the value of ``time`` parameter is actually a ``datetime`` object,
as this function automatically converts that to a ``time``.
"""
if time is None:
time = datetime.utcnow()
elif isinstance(time, (int, long, float)):
time = datetime.utcfromtimestamp(time)
if time.tzinfo is None:
time = time.replace(tzinfo=UTC)
if isinstance(time, datetime):
if tzinfo is not None:
time = time.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
time = tzinfo.normalize(time)
time = time.timetz()
elif tzinfo is not None:
time = time.replace(tzinfo=tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_time_format(format, locale=locale)
return parse_pattern(format).apply(time, locale)
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
def format_timedelta(delta, granularity='second', threshold=.85, locale=LC_TIME):
"""Return a time delta according to the rules of the given locale.
>>> format_timedelta(timedelta(weeks=12), locale='en_US')
u'3 mths'
>>> format_timedelta(timedelta(seconds=1), locale='es')
u'1 s'
The granularity parameter can be provided to alter the lowest unit
presented, which defaults to a second.
>>> format_timedelta(timedelta(hours=3), granularity='day',
... locale='en_US')
u'1 day'
The threshold parameter can be used to determine at which value the
presentation switches to the next higher unit. A higher threshold factor
means the presentation will switch later. For example:
>>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US')
u'1 day'
>>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US')
u'23 hrs'
:param delta: a ``timedelta`` object representing the time difference to
format, or the delta in seconds as an `int` value
:param granularity: determines the smallest unit that should be displayed,
the value can be one of "year", "month", "week", "day",
"hour", "minute" or "second"
:param threshold: factor that determines at which point the presentation
switches to the next higher unit
:param locale: a `Locale` object or a locale identifier
:rtype: `unicode`
"""
if isinstance(delta, timedelta):
seconds = int((delta.days * 86400) + delta.seconds)
else:
seconds = delta
locale = Locale.parse(locale)
for unit, secs_per_unit in TIMEDELTA_UNITS:
value = abs(seconds) / secs_per_unit
if value >= threshold or unit == granularity:
if unit == granularity and value > 0:
value = max(1, value)
value = int(round(value))
plural_form = locale.plural_form(value)
pattern = locale._data['unit_patterns'][unit][plural_form]
return pattern.replace('{0}', str(value))
return u''
def parse_date(string, locale=LC_TIME):
"""Parse a date from a string.
This function uses the date format for the locale as a hint to determine
the order in which the date fields appear in the string.
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string: the string containing the date
:param locale: a `Locale` object or a locale identifier
:return: the parsed date
:rtype: `date`
"""
# TODO: try ISO format first?
format = get_date_format(locale=locale).pattern.lower()
year_idx = format.index('y')
month_idx = format.index('m')
if month_idx < 0:
month_idx = format.index('l')
day_idx = format.index('d')
indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: this currently only supports numbers, but should also support month
# names, both in the requested locale, and english
numbers = re.findall('(\d+)', string)
year = numbers[indexes['Y']]
if len(year) == 2:
year = 2000 + int(year)
else:
year = int(year)
month = int(numbers[indexes['M']])
day = int(numbers[indexes['D']])
if month > 12:
month, day = day, month
return date(year, month, day)
def parse_datetime(string, locale=LC_TIME):
"""Parse a date and time from a string.
This function uses the date and time formats for the locale as a hint to
determine the order in which the time fields appear in the string.
:param string: the string containing the date and time
:param locale: a `Locale` object or a locale identifier
:return: the parsed date/time
:rtype: `datetime`
"""
raise NotImplementedError
def parse_time(string, locale=LC_TIME):
"""Parse a time from a string.
This function uses the time format for the locale as a hint to determine
the order in which the time fields appear in the string.
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string: the string containing the time
:param locale: a `Locale` object or a locale identifier
:return: the parsed time
:rtype: `time`
"""
# TODO: try ISO format first?
format = get_time_format(locale=locale).pattern.lower()
hour_idx = format.index('h')
if hour_idx < 0:
hour_idx = format.index('k')
min_idx = format.index('m')
sec_idx = format.index('s')
indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: support 12 hour clock, and 0-based hour specification
# and seconds should be optional, maybe minutes too
# oh, and time-zones, of course
numbers = re.findall('(\d+)', string)
hour = int(numbers[indexes['H']])
minute = int(numbers[indexes['M']])
second = int(numbers[indexes['S']])
return time(hour, minute, second)
class DateTimePattern(object):
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def __unicode__(self):
return self.pattern
def __mod__(self, other):
if type(other) is not DateTimeFormat:
return NotImplemented
return self.format % other
def apply(self, datetime, locale):
return self % DateTimeFormat(datetime, locale)
class DateTimeFormat(object):
def __init__(self, value, locale):
assert isinstance(value, (date, datetime, time))
if isinstance(value, (datetime, time)) and value.tzinfo is None:
value = value.replace(tzinfo=UTC)
self.value = value
self.locale = Locale.parse(locale)
def __getitem__(self, name):
char = name[0]
num = len(name)
if char == 'G':
return self.format_era(char, num)
elif char in ('y', 'Y', 'u'):
return self.format_year(char, num)
elif char in ('Q', 'q'):
return self.format_quarter(char, num)
elif char in ('M', 'L'):
return self.format_month(char, num)
elif char in ('w', 'W'):
return self.format_week(char, num)
elif char == 'd':
return self.format(self.value.day, num)
elif char == 'D':
return self.format_day_of_year(num)
elif char == 'F':
return self.format_day_of_week_in_month()
elif char in ('E', 'e', 'c'):
return self.format_weekday(char, num)
elif char == 'a':
return self.format_period(char)
elif char == 'h':
if self.value.hour % 12 == 0:
return self.format(12, num)
else:
return self.format(self.value.hour % 12, num)
elif char == 'H':
return self.format(self.value.hour, num)
elif char == 'K':
return self.format(self.value.hour % 12, num)
elif char == 'k':
if self.value.hour == 0:
return self.format(24, num)
else:
return self.format(self.value.hour, num)
elif char == 'm':
return self.format(self.value.minute, num)
elif char == 's':
return self.format(self.value.second, num)
elif char == 'S':
return self.format_frac_seconds(num)
elif char == 'A':
return self.format_milliseconds_in_day(num)
elif char in ('z', 'Z', 'v', 'V'):
return self.format_timezone(char, num)
else:
raise KeyError('Unsupported date/time field %r' % char)
def format_era(self, char, num):
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
era = int(self.value.year >= 0)
return get_era_names(width, self.locale)[era]
def format_year(self, char, num):
value = self.value.year
if char.isupper():
week = self.get_week_number(self.get_day_of_year())
if week == 0:
value -= 1
year = self.format(value, num)
if num == 2:
year = year[-2:]
return year
def format_quarter(self, char, num):
quarter = (self.value.month - 1) // 3 + 1
if num <= 2:
return ('%%0%dd' % num) % quarter
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'Q': 'format', 'q': 'stand-alone'}[char]
return get_quarter_names(width, context, self.locale)[quarter]
def format_month(self, char, num):
if num <= 2:
return ('%%0%dd' % num) % self.value.month
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'M': 'format', 'L': 'stand-alone'}[char]
return get_month_names(width, context, self.locale)[self.value.month]
def format_week(self, char, num):
if char.islower(): # week of year
day_of_year = self.get_day_of_year()
week = self.get_week_number(day_of_year)
if week == 0:
date = self.value - timedelta(days=day_of_year)
week = self.get_week_number(self.get_day_of_year(date),
date.weekday())
return self.format(week, num)
else: # week of month
week = self.get_week_number(self.value.day)
if week == 0:
date = self.value - timedelta(days=self.value.day)
week = self.get_week_number(date.day, date.weekday())
pass
return '%d' % week
def format_weekday(self, char, num):
if num < 3:
if char.islower():
value = 7 - self.locale.first_week_day + self.value.weekday()
return self.format(value % 7 + 1, num)
num = 3
weekday = self.value.weekday()
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num]
return get_day_names(width, context, self.locale)[weekday]
def format_day_of_year(self, num):
return self.format(self.get_day_of_year(), num)
def format_day_of_week_in_month(self):
return '%d' % ((self.value.day - 1) // 7 + 1)
def format_period(self, char):
period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)]
return get_period_names(locale=self.locale)[period]
def format_frac_seconds(self, num):
value = str(self.value.microsecond)
return self.format(round(float('.%s' % value), num) * 10**num, num)
def format_milliseconds_in_day(self, num):
msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
self.value.minute * 60000 + self.value.hour * 3600000
return self.format(msecs, num)
def format_timezone(self, char, num):
width = {3: 'short', 4: 'long'}[max(3, num)]
if char == 'z':
return get_timezone_name(self.value, width, locale=self.locale)
elif char == 'Z':
return get_timezone_gmt(self.value, width, locale=self.locale)
elif char == 'v':
return get_timezone_name(self.value.tzinfo, width,
locale=self.locale)
elif char == 'V':
if num == 1:
return get_timezone_name(self.value.tzinfo, width,
uncommon=True, locale=self.locale)
return get_timezone_location(self.value.tzinfo, locale=self.locale)
def format(self, value, length):
return ('%%0%dd' % length) % value
def get_day_of_year(self, date=None):
if date is None:
date = self.value
return (date - date.replace(month=1, day=1)).days + 1
def get_week_number(self, day_of_period, day_of_week=None):
"""Return the number of the week of a day within a period. This may be
the week number in a year or the week number in a month.
Usually this will return a value equal to or greater than 1, but if the
first week of the period is so short that it actually counts as the last
week of the previous period, this function will return 0.
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
>>> format.get_week_number(6)
1
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
>>> format.get_week_number(6)
2
:param day_of_period: the number of the day in the period (usually
either the day of month or the day of year)
:param day_of_week: the week day; if ommitted, the week day of the
current date is assumed
"""
if day_of_week is None:
day_of_week = self.value.weekday()
first_day = (day_of_week - self.locale.first_week_day -
day_of_period + 1) % 7
if first_day < 0:
first_day += 7
week_number = (day_of_period + first_day - 1) // 7
if 7 - first_day >= self.locale.min_week_days:
week_number += 1
return week_number
PATTERN_CHARS = {
'G': [1, 2, 3, 4, 5], # era
'y': None, 'Y': None, 'u': None, # year
'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter
'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month
'w': [1, 2], 'W': [1], # week
'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day
'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day
'a': [1], # period
'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour
'm': [1, 2], # minute
's': [1, 2], 'S': None, 'A': None, # second
'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone
}
def parse_pattern(pattern):
"""Parse date, time, and datetime format patterns.
>>> parse_pattern("MMMMd").format
u'%(MMMM)s%(d)s'
>>> parse_pattern("MMM d, yyyy").format
u'%(MMM)s %(d)s, %(yyyy)s'
Pattern can contain literal strings in single quotes:
>>> parse_pattern("H:mm' Uhr 'z").format
u'%(H)s:%(mm)s Uhr %(z)s'
An actual single quote can be used by using two adjacent single quote
characters:
>>> parse_pattern("hh' o''clock'").format
u"%(hh)s o'clock"
:param pattern: the formatting pattern to parse
"""
if type(pattern) is DateTimePattern:
return pattern
result = []
quotebuf = None
charbuf = []
fieldchar = ['']
fieldnum = [0]
def append_chars():
result.append(''.join(charbuf).replace('%', '%%'))
del charbuf[:]
def append_field():
limit = PATTERN_CHARS[fieldchar[0]]
if limit and fieldnum[0] not in limit:
raise ValueError('Invalid length for field: %r'
% (fieldchar[0] * fieldnum[0]))
result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0]))
fieldchar[0] = ''
fieldnum[0] = 0
for idx, char in enumerate(pattern.replace("''", '\0')):
if quotebuf is None:
if char == "'": # quote started
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
quotebuf = []
elif char in PATTERN_CHARS:
if charbuf:
append_chars()
if char == fieldchar[0]:
fieldnum[0] += 1
else:
if fieldchar[0]:
append_field()
fieldchar[0] = char
fieldnum[0] = 1
else:
if fieldchar[0]:
append_field()
charbuf.append(char)
elif quotebuf is not None:
if char == "'": # end of quote
charbuf.extend(quotebuf)
quotebuf = None
else: # inside quote
quotebuf.append(char)
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
return DateTimePattern(pattern, u''.join(result).replace('\0', "'"))
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
FileInstall: Polls for changes on files in a directory and notifies listeners
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import os
import threading
import zlib
# Pelix
from pelix.ipopo.decorators import (
ComponentFactory,
Provides,
Requires,
Validate,
Invalidate,
Instantiate,
BindField,
UnbindField,
UpdateField,
Property,
)
import pelix.services as services
import pelix.threadpool
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(services.SERVICE_FILEINSTALL)
@Requires(
"_listeners",
services.SERVICE_FILEINSTALL_LISTENERS,
aggregate=True,
optional=True,
)
@Property("_poll_time", "poll.time", 1)
@Instantiate("pelix-services-file-install")
class FileInstall(object):
"""
Polls folders to look for files modifications
"""
def __init__(self):
"""
Sets up members
"""
# Listeners (injected)
self._listeners = []
# Folder -> [listeners] (computed)
self._folder_listeners = {}
# Polling delta time (1 second by default)
self._poll_time = 1
# Lock
self.__lock = threading.RLock()
# Single thread task pool to notify listeners
self.__pool = pelix.threadpool.ThreadPool(
1, logname="FileInstallNotifier"
)
# 1 thread per watched folder (folder -> Thread)
self.__threads = {}
# Thread stoppers (folder -> Event)
self.__stoppers = {}
# Validation flag
self.__validated = False
@Validate
def _validate(self, _):
"""
Component validated
"""
with self.__lock:
# Start the task pool
self.__pool.start()
# Update the flag
self.__validated = True
@Invalidate
def _invalidate(self, _):
"""
Component invalidated
"""
with self.__lock:
# Update the flag
self.__validated = False
# Stop all threads
for event in set(self.__stoppers.values()):
event.set()
# Wait for them
for thread in set(self.__threads.values()):
thread.join()
# Stop the task pool
self.__pool.stop()
# Clean up
self.__stoppers.clear()
self.__threads.clear()
@BindField("_listeners")
def _bind_listener(self, _, svc, svc_ref):
"""
A new listener is bound
"""
with self.__lock:
folder = svc_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
if folder:
# Register the listener for this service
self.add_listener(folder, svc)
@UpdateField("_listeners")
def _update_field(self, _, svc, svc_ref, old_props):
"""
A bound listener has been updated
"""
with self.__lock:
old_folder = old_props.get(services.PROP_FILEINSTALL_FOLDER)
new_folder = svc_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
if old_folder != new_folder:
# Folder changed
self.remove_listener(old_folder, svc)
self.add_listener(new_folder, svc)
@UnbindField("_listeners")
def _unbind_listener(self, _, svc, svc_ref):
"""
A listener is gone
"""
with self.__lock:
folder = svc_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
if folder:
# Remove the listener
self.remove_listener(folder, svc)
def add_listener(self, folder, listener):
"""
Manual registration of a folder listener
:param folder: Path to the folder to watch
:param listener: Listener to register
:return: True if the listener has been registered
"""
with self.__lock:
# Simply add the listener
if folder:
try:
listeners = self._folder_listeners[folder]
except KeyError:
# Unknown folder
listeners = self._folder_listeners[folder] = set()
# Start a new thread
event = self.__stoppers[folder] = threading.Event()
thread = threading.Thread(
target=self.__watch,
args=(folder, event),
name="FileInstall-{0}".format(folder),
)
thread.daemon = True
self.__threads[folder] = thread
thread.start()
listeners.add(listener)
return True
return False
def remove_listener(self, folder, listener):
"""
Manual unregistration of a folder listener.
:param folder: Path to the folder the listener watched
:param listener: Listener to unregister
:raise ValueError: The listener wasn't watching this folder
"""
with self.__lock:
# Remove the listener
listeners = self._folder_listeners[folder]
listeners.remove(listener)
if not listeners:
try:
# Stop the corresponding thread
self.__stoppers.pop(folder).set()
except KeyError:
# Component invalidated
pass
else:
# Wait for the thread to stop
self.__threads.pop(folder).join()
# No more listener for this folder
del self._folder_listeners[folder]
def __notify(self, folder, added, updated, deleted):
"""
Notifies listeners that files of a folder has been modified
:param folder: Folder where changes occurred
:param added: Names of added files
:param updated: Names of modified files
:param deleted: Names of removed files
"""
with self.__lock:
try:
# Get a copy of the listeners for this folder
listeners = self._folder_listeners[folder].copy()
except KeyError:
# No (more) listeners: do nothing
return
for listener in listeners:
try:
listener.folder_change(folder, added, updated, deleted)
except Exception as ex:
_logger.exception("Error notifying a folder listener: %s", ex)
@staticmethod
def __get_checksum(filepath):
"""
Returns the checksum (Adler32) of the given file
:param filepath: Path to the file
:return: The checksum (int) of the given file
:raise OSError: File not accessible
:raise IOError: File not readable
"""
# Don't forget to open the file in binary mode
with open(filepath, "rb") as filep:
# Return the checksum of the given file
return zlib.adler32(filep.read())
def __get_file_info(self, folder, filename):
"""
Returns the (mtime, checksum) tuple for the given file
:param folder: Path to the parent folder
:param filename: Base name of the file
:return: A tuple containing file information
:raise OSError: File not accessible
:raise IOError: File not readable
"""
filepath = os.path.join(folder, filename)
return os.path.getmtime(filepath), self.__get_checksum(filepath)
def __check_different(self, folder, filename, file_info, updated):
"""
Checks if the given file has changed since the previous check
:param folder: Path to the parent folder
:param filename: Base name of the file
:param file_info: Current information about the file
:param updated: Set of updated files, where the file name might be
added
:return: The (updated) file information tuple
:raise OSError: File not accessible
:raise IOError: File not readable
"""
# Compute the file path
filepath = os.path.join(folder, filename)
# Get the previous modification time
previous_mtime = file_info[0]
# Get the new modification time
mtime = os.path.getmtime(filepath)
if previous_mtime == mtime:
# No modification (no need to compute the checksum)
return file_info
# Get the previous checksum
previous_checksum = file_info[1]
# Compute the new one
checksum = self.__get_checksum(filepath)
if previous_checksum == checksum:
# No real modification, update file info
return mtime, checksum
# File modified
updated.add(filename)
return mtime, checksum
def __watch(self, folder, stopper):
"""
Loop that looks for changes in the given folder
:param folder: Folder to watch
:param stopper: An Event object that will stop the loop once set
"""
# File name -> (modification time, checksum)
previous_info = {}
while not stopper.wait(self._poll_time) and not stopper.is_set():
if not os.path.exists(folder):
# Nothing to do yet
continue
# Look for files
filenames = {
filename
for filename in os.listdir(folder)
if os.path.isfile(os.path.join(folder, filename))
}
# Prepare the sets
added = set()
updated = set()
deleted = set(previous_info.keys()).difference(filenames)
# Compute differences
for filename in filenames:
try:
# Get previous information
file_info = previous_info[filename]
except KeyError:
# Unknown file: added one
added.add(filename)
previous_info[filename] = self.__get_file_info(
folder, filename
)
else:
try:
# Known file name
new_info = self.__check_different(
folder, filename, file_info, updated
)
# Store new information
previous_info[filename] = new_info
except (IOError, OSError):
# Error reading file, do nothing
pass
# Remove information about deleted files
for filename in deleted:
del previous_info[filename]
if added or updated or deleted:
# Something changed: notify listeners
self.__pool.enqueue(
self.__notify, folder, added, updated, deleted
)
|
|
import csv
import json
import cStringIO
import time
import pystache
from flask import make_response, request
from flask_login import current_user
from flask_restful import abort
import xlsxwriter
from redash import models, settings, utils
from redash.tasks import QueryTask, record_event
from redash.permissions import require_permission, not_view_only, has_access, require_access, view_only
from redash.handlers.base import BaseResource, get_object_or_404
from redash.utils import collect_query_parameters, collect_parameters_from_request
from redash.tasks.queries import enqueue_query
def error_response(message):
return {'job': {'status': 4, 'error': message}}, 400
def run_query(data_source, parameter_values, query_text, query_id, max_age=0):
query_parameters = set(collect_query_parameters(query_text))
missing_params = set(query_parameters) - set(parameter_values.keys())
if missing_params:
return error_response('Missing parameter value for: {}'.format(", ".join(missing_params)))
if data_source.paused:
if data_source.pause_reason:
message = '{} is paused ({}). Please try later.'.format(data_source.name, data_source.pause_reason)
else:
message = '{} is paused. Please try later.'.format(data_source.name)
return error_response(message)
if query_parameters:
query_text = pystache.render(query_text, parameter_values)
if max_age == 0:
query_result = None
else:
query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
if query_result:
return {'query_result': query_result.to_dict()}
else:
job = enqueue_query(query_text, data_source, current_user.id, metadata={"Username": current_user.email, "Query ID": query_id})
return {'job': job.to_dict()}
class QueryResultListResource(BaseResource):
@require_permission('execute_query')
def post(self):
"""
Execute a query (or retrieve recent results).
:qparam string query: The query text to execute
:qparam number query_id: The query object to update with the result (optional)
:qparam number max_age: If query results less than `max_age` seconds old are available, return them, otherwise execute the query; if omitted, always execute
:qparam number data_source_id: ID of data source to query
"""
params = request.get_json(force=True)
parameter_values = collect_parameters_from_request(request.args)
query = params['query']
max_age = int(params.get('max_age', -1))
query_id = params.get('query_id', 'adhoc')
data_source = models.DataSource.get_by_id_and_org(params.get('data_source_id'), self.current_org)
if not has_access(data_source.groups, self.current_user, not_view_only):
return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403
self.record_event({
'action': 'execute_query',
'timestamp': int(time.time()),
'object_id': data_source.id,
'object_type': 'data_source',
'query': query
})
return run_query(data_source, parameter_values, query, query_id, max_age)
ONE_YEAR = 60 * 60 * 24 * 365.25
class QueryResultResource(BaseResource):
@staticmethod
def add_cors_headers(headers):
if 'Origin' in request.headers:
origin = request.headers['Origin']
if set(['*', origin]) & settings.ACCESS_CONTROL_ALLOW_ORIGIN:
headers['Access-Control-Allow-Origin'] = origin
headers['Access-Control-Allow-Credentials'] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()
@require_permission('view_query')
def options(self, query_id=None, query_result_id=None, filetype='json'):
headers = {}
self.add_cors_headers(headers)
if settings.ACCESS_CONTROL_REQUEST_METHOD:
headers['Access-Control-Request-Method'] = settings.ACCESS_CONTROL_REQUEST_METHOD
if settings.ACCESS_CONTROL_ALLOW_HEADERS:
headers['Access-Control-Allow-Headers'] = settings.ACCESS_CONTROL_ALLOW_HEADERS
return make_response("", 200, headers)
@require_permission('view_query')
def get(self, query_id=None, query_result_id=None, filetype='json'):
"""
Retrieve query results.
:param number query_id: The ID of the query whose results should be fetched
:param number query_result_id: the ID of the query result to fetch
:param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.
:<json number id: Query result ID
:<json string query: Query that produced this result
:<json string query_hash: Hash code for query text
:<json object data: Query output
:<json number data_source_id: ID of data source that produced this result
:<json number runtime: Length of execution time in seconds
:<json string retrieved_at: Query retrieval date/time, in ISO format
"""
# TODO:
# This method handles two cases: retrieving result by id & retrieving result by query id.
# They need to be split, as they have different logic (for example, retrieving by query id
# should check for query parameters and shouldn't cache the result).
should_cache = query_result_id is not None
if query_result_id is None and query_id is not None:
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
if query:
query_result_id = query.latest_query_data_id
if query_result_id:
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
else:
query_result = None
if query_result:
require_access(query_result.data_source.groups, self.current_user, view_only)
if isinstance(self.current_user, models.ApiUser):
event = {
'user_id': None,
'org_id': self.current_org.id,
'action': 'api_get',
'timestamp': int(time.time()),
'api_key': self.current_user.name,
'file_type': filetype,
'user_agent': request.user_agent.string,
'ip': request.remote_addr
}
if query_id:
event['object_type'] = 'query'
event['object_id'] = query_id
else:
event['object_type'] = 'query_result'
event['object_id'] = query_result_id
record_event.delay(event)
if filetype == 'json':
response = self.make_json_response(query_result)
elif filetype == 'xlsx':
response = self.make_excel_response(query_result)
else:
response = self.make_csv_response(query_result)
if len(settings.ACCESS_CONTROL_ALLOW_ORIGIN) > 0:
self.add_cors_headers(response.headers)
if should_cache:
response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)
return response
else:
abort(404, message='No cached result found for this query.')
def make_json_response(self, query_result):
data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
headers = {'Content-Type': "application/json"}
return make_response(data, 200, headers)
@staticmethod
def make_csv_response(query_result):
s = cStringIO.StringIO()
query_data = json.loads(query_result.data)
writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']])
writer.writer = utils.UnicodeWriter(s)
writer.writeheader()
for row in query_data['rows']:
writer.writerow(row)
headers = {'Content-Type': "text/csv; charset=UTF-8"}
return make_response(s.getvalue(), 200, headers)
@staticmethod
def make_excel_response(query_result):
s = cStringIO.StringIO()
query_data = json.loads(query_result.data)
book = xlsxwriter.Workbook(s)
sheet = book.add_worksheet("result")
column_names = []
for (c, col) in enumerate(query_data['columns']):
sheet.write(0, c, col['name'])
column_names.append(col['name'])
for (r, row) in enumerate(query_data['rows']):
for (c, name) in enumerate(column_names):
sheet.write(r + 1, c, row.get(name))
book.close()
headers = {'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
return make_response(s.getvalue(), 200, headers)
class JobResource(BaseResource):
def get(self, job_id):
"""
Retrieve info about a running query job.
"""
job = QueryTask(job_id=job_id)
return {'job': job.to_dict()}
def delete(self, job_id):
"""
Cancel a query job in progress.
"""
job = QueryTask(job_id=job_id)
job.cancel()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalDiagPlusLowRankTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testDiagBroadcastBothBatchAndEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [1], event_shape: []
identity_multiplier = np.array([5.])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 5, 0],
[0, 4 + 5]],
[[5 + 5, 0],
[0, 6 + 5]]]),
dist.scale.to_dense().eval())
def testDiagBroadcastBothBatchAndEvent2(self):
# This test differs from `testDiagBroadcastBothBatchAndEvent` in that it
# broadcasts batch_shape's from both the `scale_diag` and
# `scale_identity_multiplier` args.
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3, 1], event_shape: []
identity_multiplier = np.array([[5.], [4], [3]])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllEqual(
[3, 3, 2, 2],
dist.scale.to_dense().get_shape())
def testDiagBroadcastOnlyEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 4, 0],
[0, 4 + 4]],
[[5 + 3, 0],
[0, 6 + 3]]]), # shape: [3, 2, 2]
dist.scale.to_dense().eval())
def testDiagBroadcastMultiplierAndLoc(self):
# batch_shape: [], event_shape: [3]
loc = np.array([1., 0, -1])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[5, 0, 0],
[0, 5, 0],
[0, 0, 5]],
[[4, 0, 0],
[0, 4, 0],
[0, 0, 4]],
[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]]),
dist.scale.to_dense().eval())
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.test_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testSample(self):
# TODO(jvdillon): This test should be the basis of a new test fixture which
# is applied to every distribution. When we make this fixture, we'll also
# separate the analytical- and sample-based tests as well as for each
# function tested. For now, we group things so we can recycle one batch of
# samples (thus saving resources).
mu = np.array([-1., 1, 0.5], dtype=np.float32)
diag_large = np.array([1., 0.5, 0.75], dtype=np.float32)
diag_small = np.array([-1.1, 1.2], dtype=np.float32)
v = np.array([[0.7, 0.8],
[0.9, 1],
[0.5, 0.6]], dtype=np.float32) # shape: [k, r] = [3, 2]
true_mean = mu
true_scale = np.diag(diag_large) + np.matmul(np.matmul(
v, np.diag(diag_small)), v.T)
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
true_det_covariance = np.linalg.det(true_covariance)
true_log_det_covariance = np.log(true_det_covariance)
with self.test_session() as sess:
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_identity = ds.MultivariateNormalDiag(
loc=np.array([1., 2, 0.25], dtype=np.float32),
validate_args=True)
mvn_scaled = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_identity_multiplier=2.2,
validate_args=True)
mvn_diag = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_diag=np.array([0.5, 1.5, 1.], dtype=np.float32),
validate_args=True)
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([1., 2, -1], dtype=np.float32),
scale_tril=np.array([[6., 0, 0],
[2, 5, 0],
[1, 3, 4]], dtype=np.float32) / 10.,
validate_args=True)
scale = dist.scale.to_dense()
n = int(30e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_identity = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity = ds.kl(dist, mvn_identity)
sample_kl_scaled = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled = ds.kl(dist, mvn_scaled)
sample_kl_diag = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag = ds.kl(dist, mvn_diag)
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl(dist, mvn_chol)
n = int(10e3)
baseline = ds.MultivariateNormalDiag(
loc=np.array([-1., 0.25, 1.25], dtype=np.float32),
scale_diag=np.array([1.5, 0.5, 1.], dtype=np.float32),
validate_args=True)
samps = baseline.sample(n, seed=0)
sample_kl_identity_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity_diag_baseline = ds.kl(baseline, mvn_identity)
sample_kl_scaled_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled_diag_baseline = ds.kl(baseline, mvn_scaled)
sample_kl_diag_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag_diag_baseline = ds.kl(baseline, mvn_diag)
sample_kl_chol_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol_diag_baseline = ds.kl(baseline, mvn_chol)
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
analytical_log_det_covariance_,
analytical_det_covariance_,
scale_,
sample_kl_identity_, analytical_kl_identity_,
sample_kl_scaled_, analytical_kl_scaled_,
sample_kl_diag_, analytical_kl_diag_,
sample_kl_chol_, analytical_kl_chol_,
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
sample_kl_scaled_diag_baseline_, analytical_kl_scaled_diag_baseline_,
sample_kl_diag_diag_baseline_, analytical_kl_diag_diag_baseline_,
sample_kl_chol_diag_baseline_, analytical_kl_chol_diag_baseline_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
dist.log_det_covariance(),
dist.det_covariance(),
scale,
sample_kl_identity, analytical_kl_identity,
sample_kl_scaled, analytical_kl_scaled,
sample_kl_diag, analytical_kl_diag,
sample_kl_chol, analytical_kl_chol,
sample_kl_identity_diag_baseline,
analytical_kl_identity_diag_baseline,
sample_kl_scaled_diag_baseline, analytical_kl_scaled_diag_baseline,
sample_kl_diag_diag_baseline, analytical_kl_diag_diag_baseline,
sample_kl_chol_diag_baseline, analytical_kl_chol_diag_baseline,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
sample_det_covariance_ = np.linalg.det(sample_covariance_)
sample_log_det_covariance_ = np.log(sample_det_covariance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(2, "analytical_covariance:\n{}".format(
analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_log_det_covariance:\n{}".format(
true_log_det_covariance))
logging.vlog(2, "sample_log_det_covariance:\n{}".format(
sample_log_det_covariance_))
logging.vlog(2, "analytical_log_det_covariance:\n{}".format(
analytical_log_det_covariance_))
logging.vlog(2, "true_det_covariance:\n{}".format(
true_det_covariance))
logging.vlog(2, "sample_det_covariance:\n{}".format(
sample_det_covariance_))
logging.vlog(2, "analytical_det_covariance:\n{}".format(
analytical_det_covariance_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_identity: analytical:{} sample:{}".format(
analytical_kl_identity_, sample_kl_identity_))
logging.vlog(2, "kl_scaled: analytical:{} sample:{}".format(
analytical_kl_scaled_, sample_kl_scaled_))
logging.vlog(2, "kl_diag: analytical:{} sample:{}".format(
analytical_kl_diag_, sample_kl_diag_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
logging.vlog(
2, "kl_identity_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_identity_diag_baseline_,
sample_kl_identity_diag_baseline_))
logging.vlog(
2, "kl_scaled_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_scaled_diag_baseline_,
sample_kl_scaled_diag_baseline_))
logging.vlog(2, "kl_diag_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_diag_diag_baseline_,
sample_kl_diag_diag_baseline_))
logging.vlog(2, "kl_chol_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_chol_diag_baseline_,
sample_kl_chol_diag_baseline_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.02)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.02)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_log_det_covariance, sample_log_det_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_log_det_covariance,
analytical_log_det_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_det_covariance, sample_det_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_det_covariance, analytical_det_covariance_,
atol=0., rtol=1e-5)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_identity_, analytical_kl_identity_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_scaled_, analytical_kl_scaled_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_diag_, analytical_kl_diag_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_scaled_diag_baseline_,
analytical_kl_scaled_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_diag_diag_baseline_,
analytical_kl_diag_diag_baseline_,
atol=0., rtol=0.04)
self.assertAllClose(
sample_kl_chol_diag_baseline_,
analytical_kl_chol_diag_baseline_,
atol=0., rtol=0.02)
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
logging.vlog(2, "expected_cov:\n{}".format(cov))
with self.test_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
|
from copy import deepcopy
from elasticsearch_dsl import search, query, F, Q, DocType
def test_search_starts_with_empty_query():
s = search.Search()
assert s.query._proxied == query.MatchAll()
def test_search_query_combines_query():
s = search.Search()
s2 = s.query('match', f=42)
assert s2.query._proxied == query.Match(f=42)
assert s.query._proxied == query.MatchAll()
s3 = s2.query('match', f=43)
assert s2.query._proxied == query.Match(f=42)
assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)])
def test_query_can_be_assigned_to():
s = search.Search()
q = Q('match', title='python')
s.query = q
assert s.query._proxied is q
def test_query_can_be_wrapped():
s = search.Search().query('match', title='python')
s.query = Q('function_score', query=s.query, field_value_factor={'field': 'rating'})
assert {
'query': {
'function_score': {
'functions': [{'field_value_factor': {'field': 'rating'}}],
'query': {'match': {'title': 'python'}}
}
}
}== s.to_dict()
def test_filter_can_be_overriden():
s = search.Search().filter('term', tag='python')
s.filter = ~F(s.filter)
assert {
"query": {
"filtered": {
"query": {"match_all": {}},
"filter": {"bool": {"must_not": [{"term": {"tag": "python"}}]}}
}
}
} == s.to_dict()
def test_using():
o = object()
o2 = object()
s = search.Search(using=o)
assert s._using is o
s2 = s.using(o2)
assert s._using is o
assert s2._using is o2
def test_methods_are_proxied_to_the_query():
s = search.Search()
assert s.query.to_dict() == {'match_all': {}}
def test_query_always_returns_search():
s = search.Search()
assert isinstance(s.query('match', f=42), search.Search)
def test_aggs_get_copied_on_change():
s = search.Search()
s.aggs.bucket('per_tag', 'terms', field='f').metric('max_score', 'max', field='score')
s2 = s.query('match_all')
s2.aggs.bucket('per_month', 'date_histogram', field='date', interval='month')
s3 = s2.query('match_all')
s3.aggs['per_month'].metric('max_score', 'max', field='score')
s4 = s3._clone()
s4.aggs.metric('max_score', 'max', field='score')
d = {
'query': {'match_all': {}},
'aggs': {
'per_tag': {
'terms': {'field': 'f'},
'aggs': {'max_score': {'max': {'field': 'score'}}}
}
}
}
assert d == s.to_dict()
d['aggs']['per_month'] = {"date_histogram": {'field': 'date', 'interval': 'month'}}
assert d == s2.to_dict()
d['aggs']['per_month']['aggs'] = {"max_score": {"max": {"field": 'score'}}}
assert d == s3.to_dict()
d['aggs']['max_score'] = {"max": {"field": 'score'}}
assert d == s4.to_dict()
def test_search_index():
s = search.Search(index='i')
assert s._index == ['i']
s = s.index('i2')
assert s._index == ['i', 'i2']
s = s.index()
assert s._index is None
s = search.Search(index=('i', 'i2'))
assert s._index == ['i', 'i2']
s = search.Search(index=['i', 'i2'])
assert s._index == ['i', 'i2']
s = search.Search()
s = s.index('i', 'i2')
assert s._index == ['i', 'i2']
s2 = s.index('i3')
assert s._index == ['i', 'i2']
assert s2._index == ['i', 'i2', 'i3']
def test_search_doc_type():
s = search.Search(doc_type='i')
assert s._doc_type == ['i']
s = s.doc_type('i2')
assert s._doc_type == ['i', 'i2']
s = s.doc_type()
assert s._doc_type == []
s = search.Search(doc_type=('i', 'i2'))
assert s._doc_type == ['i', 'i2']
s = search.Search(doc_type=['i', 'i2'])
assert s._doc_type == ['i', 'i2']
s = search.Search()
s = s.doc_type('i', 'i2')
assert s._doc_type == ['i', 'i2']
s2 = s.doc_type('i3')
assert s._doc_type == ['i', 'i2']
assert s2._doc_type == ['i', 'i2', 'i3']
def test_doc_type_can_be_document_class():
class MyDocType(DocType):
pass
s = search.Search(doc_type=MyDocType)
assert s._doc_type == ['my_doc_type']
assert s._doc_type_map == {'my_doc_type': MyDocType.from_es}
s = search.Search().doc_type(MyDocType)
assert s._doc_type == ['my_doc_type']
assert s._doc_type_map == {'my_doc_type': MyDocType.from_es}
def test_sort():
s = search.Search()
s = s.sort('fielda', '-fieldb')
assert ['fielda', {'fieldb': {'order': 'desc'}}] == s._sort
assert {'query': {'match_all': {}}, 'sort': ['fielda', {'fieldb': {'order': 'desc'}}]} == s.to_dict()
s = s.sort()
assert [] == s._sort
assert search.Search().to_dict() == s.to_dict()
def test_slice():
s = search.Search()
assert {'query': {'match_all': {}}, 'from': 3, 'size': 7} == s[3:10].to_dict()
assert {'query': {'match_all': {}}, 'from': 0, 'size': 5} == s[:5].to_dict()
assert {'query': {'match_all': {}}, 'from': 3, 'size': 10} == s[3:].to_dict()
assert {'query': {'match_all': {}}, 'from': 0, 'size': 0} == s[0:0].to_dict()
def test_index():
s = search.Search()
assert {'query': {'match_all': {}}, 'from': 3, 'size': 1} == s[3].to_dict()
def test_search_to_dict():
s = search.Search()
assert {"query": {"match_all": {}}} == s.to_dict()
s = s.query('match', f=42)
assert {"query": {"match": {'f': 42}}} == s.to_dict()
assert {"query": {"match": {'f': 42}}, "size": 10} == s.to_dict(size=10)
s.aggs.bucket('per_tag', 'terms', field='f').metric('max_score', 'max', field='score')
d = {
'aggs': {
'per_tag': {
'terms': {'field': 'f'},
'aggs': {'max_score': {'max': {'field': 'score'}}}
}
},
'query': {'match': {'f': 42}}
}
assert d == s.to_dict()
s = search.Search(extra={"size": 5})
assert {"query": {"match_all": {}}, "size": 5} == s.to_dict()
s = s.extra(from_=42)
assert {"query": {"match_all": {}}, "size": 5, "from": 42} == s.to_dict()
def test_complex_example():
s = search.Search()
s = s.query('match', title='python') \
.query(~Q('match', title='ruby')) \
.filter(F('term', category='meetup') | F('term', category='conference')) \
.post_filter('terms', tags=['prague', 'czech']) \
.script_fields(more_attendees="doc['attendees'].value + 42")
s.aggs.bucket('per_country', 'terms', field='country')\
.metric('avg_attendees', 'avg', field='attendees')
s.query.minimum_should_match = 2
s = s.highlight_options(order='score').highlight('title', 'body', fragment_size=50)
assert {
'query': {
'filtered': {
'filter': {
'bool': {
'should': [
{'term': {'category': 'meetup'}},
{'term': {'category': 'conference'}}
]
}
},
'query': {
'bool': {
'must': [ {'match': {'title': 'python'}}],
'must_not': [{'match': {'title': 'ruby'}}],
'minimum_should_match': 2
}
}
}
},
'post_filter': {
'terms': {'tags': ['prague', 'czech']}
},
'aggs': {
'per_country': {
'terms': {'field': 'country'},
'aggs': {
'avg_attendees': {'avg': {'field': 'attendees'}}
}
}
},
"highlight": {
'order': 'score',
'fields': {
'title': {'fragment_size': 50},
'body': {'fragment_size': 50}
}
},
'script_fields': {
'more_attendees': {'script': "doc['attendees'].value + 42"}
}
} == s.to_dict()
def test_reverse():
d = {
'query': {
'filtered': {
'filter': {
'bool': {
'should': [
{'term': {'category': 'meetup'}},
{'term': {'category': 'conference'}}
]
}
},
'query': {
'bool': {
'must': [ {'match': {'title': 'python'}}],
'must_not': [{'match': {'title': 'ruby'}}],
'minimum_should_match': 2
}
}
}
},
'post_filter': {
'bool': {'must': [{'terms': {'tags': ['prague', 'czech']}}]}
},
'aggs': {
'per_country': {
'terms': {'field': 'country'},
'aggs': {
'avg_attendees': {'avg': {'field': 'attendees'}}
}
}
},
"sort": [
"title",
{"category": {"order": "desc"}},
"_score"
],
"fields": [
"category",
"title"
],
"size": 5,
"highlight": {
'order': 'score',
'fields': {
'title': {'fragment_size': 50}
}
},
"suggest": {
"my-title-suggestions-1" : {
"text" : "devloping distibutd saerch engies",
"term" : {
"size" : 3,
"field" : "title"
}
}
},
'script_fields': {
'more_attendees': {'script': "doc['attendees'].value + 42"}
}
}
d2 = deepcopy(d)
s = search.Search.from_dict(d)
# make sure we haven't modified anything in place
assert d == d2
assert {"size": 5} == s._extra
assert d == s.to_dict()
def test_from_dict_doesnt_need_query():
s = search.Search.from_dict({"size": 5})
assert {
"query": {"match_all": {}},
"size": 5
} == s.to_dict()
def test_params_being_passed_to_search(mock_client):
s = search.Search('mock')
s = s.params(routing='42')
s.execute()
mock_client.search.assert_called_once_with(
doc_type=[],
index=None,
body={'query': {'match_all': {}}},
routing='42'
)
def test_fields():
assert {
'query': {
'match_all': {}
},
'fields': ['title']
} == search.Search().fields(['title']).to_dict()
assert {
'query': {
'match_all': {}
},
'fields': ['id', 'title']
} == search.Search().fields(['id', 'title']).to_dict()
assert {
'query': {
'match_all': {}
},
'fields': []
} == search.Search().fields([]).to_dict()
assert {
'query': {
'match_all': {}
}
} == search.Search().fields().to_dict()
assert {
'query': {
'match_all': {}
}
} == search.Search().fields(None).to_dict()
def test_fields_on_clone():
assert {
'query': {
'filtered': {
'filter': {'term': {'title': 'python'}},
'query': {'match_all': {}}
}
},
'fields': ['title']
} == search.Search().fields(['title']).filter('term', title='python').to_dict()
def test_partial_fields():
assert {
'query': {
'match_all': {}
},
} == search.Search().partial_fields().to_dict()
assert {
'query': {
'match_all': {}
},
'partial_fields': {
'foo': {
'include': ['foo.bar.*'],
'exclude': ['foo.one']
}
}
} == search.Search().partial_fields(foo={
'include': ['foo.bar.*'],
'exclude': ['foo.one']
}).to_dict()
assert {
'query': {
'match_all': {}
},
'partial_fields': {
'foo': {
'include': ['foo.bar.*'],
'exclude': ['foo.one'],
},
'bar': {
'include': ['bar.bar.*'],
}
}
} == search.Search().partial_fields(foo={
'include': ['foo.bar.*'],
'exclude': ['foo.one']
}, bar={
'include': ['bar.bar.*']
}).to_dict()
assert {
'query': {
'match_all': {}
},
'partial_fields': {
'bar': {
'include': ['bar.*'],
}
}
} == search.Search().partial_fields(foo={
'include': ['foo.bar.*']
}).partial_fields(bar={
'include': ['bar.*']
}).to_dict()
def test_partial_fields_on_clone():
assert {
'query': {
'filtered': {
'filter': {
'term': {
'title': 'python',
}
},
'query': {
'match_all': {},
}
}
},
'partial_fields': {
'foo': {
'include': ['foo.bar.*'],
'exclude': ['foo.one']
}
}
} == search.Search().partial_fields(foo={
'include': ['foo.bar.*'],
'exclude': ['foo.one']
}).filter('term', title='python').to_dict()
def test_suggest_accepts_global_text():
s = search.Search.from_dict({
"query": {"match_all": {}},
"suggest" : {
"text" : "the amsterdma meetpu",
"my-suggest-1" : {
"term" : {"field" : "title"}
},
"my-suggest-2" : {
"text": "other",
"term" : {"field" : "body"}
}
}
})
assert {
'query': {'match_all': {}},
'suggest': {
'my-suggest-1': {
'term': {'field': 'title'},
'text': 'the amsterdma meetpu'
},
'my-suggest-2': {
'term': {'field': 'body'},
'text': 'other'}
}
} == s.to_dict()
def test_suggest():
s = search.Search()
s = s.suggest('my_suggestion', 'pyhton', term={'field': 'title'})
assert {
'query': {'match_all': {}},
'suggest': {
'my_suggestion': {
'term': {'field': 'title'},
'text': 'pyhton'
}
}
} == s.to_dict()
|
|
import re
from datetime import timedelta
from enum import Enum
from typing import Union, Tuple, List
from django.apps import apps
from django.core import exceptions
from django.conf import settings
from django.db.models import F, Count, Min, Max, Sum, Value, Avg, ExpressionWrapper, DurationField, FloatField, Model
from django.db.models import functions as func
from data_interrogator import exceptions as di_exceptions
from data_interrogator.db import GroupConcat, DateDiff, ForceDate, SumIf
# Utility functions
math_infix_symbols = {
'-': lambda a, b: a - b,
'+': lambda a, b: a + b,
'/': lambda a, b: a / b,
'*': lambda a, b: a * b,
}
# Large unit multipliers to filter across
BIG_MULTIPLIERS = {
'day': 1,
'week': 7,
'fortnight': 14,
'month': 30, # close enough
'year': 365,
'decade': 10 * 365,
}
# Small unit multipliers to filter across
LITTLE_MULTIPLIERS = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'microfortnight': 1.2, # sure why not?
}
def get_base_model(app_label: str, model: str) -> Model:
"""Get the actual base model, from the """
return apps.get_model(app_label.lower(), model.lower())
def normalise_field(text) -> str:
"""Replace the UI access with the backend Django access"""
return text.strip().replace('(', '::').replace(')', '').replace(".", "__")
def normalise_math(expression):
"""Normalise math from UI """
if not any(s in expression for s in math_infix_symbols.keys()):
# we're aggregating some mathy things, these are tricky
return F(normalise_field(expression))
math_operator_re = '[\-\/\+\*]'
a, b = [v.strip() for v in re.split(math_operator_re, expression, 1)]
first_operator = re.findall(math_operator_re, expression)[0]
if first_operator == "-" and a.endswith('date') and b.endswith('date'):
expr = ExpressionWrapper(
DateDiff(
ForceDate(F(a)),
ForceDate(F(b))
), output_field=DurationField()
)
else:
expr = ExpressionWrapper(
math_infix_symbols[first_operator](F(a), F(b)),
output_field=FloatField()
)
return expr
def clean_filter(text: str) -> Union[str, Tuple[str, str, str]]:
"""Return the (cleaned) filter for replacement"""
maps = [('<>', 'ne'), ('<=', 'lte'), ('<', 'lt'), ('>=', 'gte'), ('>', 'gt'), ('=', '')]
for interrogator_filter, django_filter in maps:
candidate = text.split(interrogator_filter)
if len(candidate) == 2:
if interrogator_filter == "=":
return candidate[0], django_filter, candidate[1]
return candidate[0], '__%s' % django_filter, candidate[1]
return text
class Allowable(Enum):
ALL_APPS = 1
ALL_MODELS = 1
ALL_FIELDS = 3
class Interrogator:
available_aggregations = {
"min": Min,
"max": Max,
"sum": Sum,
'avg': Avg,
"count": Count,
"substr": func.Substr,
"group": GroupConcat,
"concat": func.Concat,
"sumif": SumIf,
}
errors = []
report_models = Allowable.ALL_MODELS
# both of these are lists of either:
# ('app_label',)
# ('app_label', 'model_name')
# Not this yet: ('app_label', 'model_name', ['list of field names'])
allowed = Allowable.ALL_MODELS
excluded = []
def __init__(self, report_models=None, allowed=None, excluded=None):
if report_models is not None:
self.report_models = report_models
if allowed is not None:
self.allowed = allowed
if excluded is not None:
self.excluded = excluded
# Clean up rules if they aren't lower cased.
fixed_excluded = []
for rule in self.excluded:
if len(rule) == 1:
rule = (rule[0].lower(),)
if len(rule) == 2:
rule = (rule[0].lower(), rule[1].lower())
if len(rule) == 3:
rule = (rule[0].lower(), rule[1].lower(), rule[2])
fixed_excluded.append(rule)
self.excluded = fixed_excluded
if self.allowed != Allowable.ALL_MODELS:
self.allowed_apps = [
i[0] for i in allowed
if type(i) is str or type(i) is tuple and len(i) == 1
]
if self.allowed != Allowable.ALL_APPS:
self.allowed_models = [
i[:2] for i in allowed
if type(i) is tuple and len(i) == 2
]
else:
self.allowed_models = Allowable.ALL_MODELS
def is_hidden_field(self, field) -> bool:
"""Returns whether a field begins with an underscore and so is hidden"""
if hasattr(settings, 'INTERROGATOR_INCLUDED_HIDDEN_FIELDS') and field.name in settings.INTERROGATOR_INCLUDED_HIDDEN_FIELDS:
return False
return field.name.startswith('_')
def get_model_queryset(self):
return self.base_model.objects.all()
def process_annotation_concat(self, column):
pass
def process_annotation(self, column):
pass
def is_allowed_model(self, model):
pass
def verify_column(self, column):
model = self.base_model
args = column.split('__')
for a in args:
model = [f for f in model._meta.get_fields() if f.name == a][0].related_model
def get_field_by_name(self, model, field_name):
return model._meta.get_field(field_name)
def is_excluded_field(self, field_path, base_model=None) -> bool:
"""
Accepts dundered path from model
TODO: currently we're not doing per field permission checks, add this later
"""
return False
def is_excluded_model(self, model_class) -> bool:
"""Returns whether a model should be excluded"""
app_label = model_class._meta.app_label
model_name = model_class._meta.model_name
# Special case to include content type
if model_name == 'contenttype':
return False
if app_label in self.excluded or (app_label, model_name) in self.excluded:
return True
if self.allowed == Allowable.ALL_MODELS:
return False
excluded = not (app_label in self.allowed or ((app_label, model_name) in self.allowed))
return excluded
def has_forbidden_join(self, column) -> bool:
"""Return whether a forbidden join exists in the query"""
checking_model = self.base_model
joins = column.split('__')
for _, relation in enumerate(joins):
if checking_model:
try:
attr = self.get_field_by_name(checking_model, relation)
if attr.related_model:
if self.is_excluded_model(attr.related_model):
# Despite the join/field being named differently, this column is forbidden!
return True
checking_model = attr.related_model
except exceptions.FieldDoesNotExist:
pass
return False
def get_base_annotations(self):
return {}
def get_annotation(self, column):
agg, field = column.split('::', 1)
if agg == 'sumif':
try:
field, cond = field.split(',', 1)
except:
raise di_exceptions.InvalidAnnotationError("SUMIF must have a condition")
field = normalise_math(field)
conditions = {}
for condition in cond.split(','):
condition_key, condition_val = condition.split('=', 1)
conditions[normalise_field(condition_key)] = normalise_field(condition_val)
annotation = self.available_aggregations[agg](field=field, **conditions)
elif agg == 'join':
fields = []
for f in field.split(','):
if f.startswith(('"', "'")):
# its a string!
fields.append(Value(f.strip('"').strip("'")))
else:
fields.append(f)
annotation = self.available_aggregations[agg](*fields)
elif agg == "substr":
field, i, j = (field.split(',') + [None])[0:3]
annotation = self.available_aggregations[agg](field, i, j)
else:
field = normalise_math(field)
annotation = self.available_aggregations[agg](field, distinct=False)
return annotation
def validate_report_model(self, base_model):
app_label, model = base_model.split(':', 1)
base_model = apps.get_model(app_label.lower(), model.lower())
extra_data = {}
if (app_label, model) in self.excluded or base_model in self.excluded:
self.base_model = None
raise di_exceptions.ModelNotAllowedException(model=base_model)
if self.report_models == Allowable.ALL_MODELS:
return base_model, extra_data
for opts in self.report_models:
if opts[:2] == (app_label, model):
return base_model, extra_data
self.base_model = None
raise di_exceptions.ModelNotAllowedException()
def check_for_forbidden_column(self, column) -> List[str]:
"""Check if column is forbidden for whatever reason, and return the value of it"""
errors: List[str] = []
# Check if the column has permission
if self.has_forbidden_join(column):
errors.append(
"Joining tables with the column [{}] is forbidden, this column is removed from the output.".format(
column))
# Check aggregation includes a forbidden column
if '::' in column:
check_col = column.split('::', 1)[-1]
if self.has_forbidden_join(check_col):
errors.append(
"Aggregating tables using the column [{}] is forbidden, this column is removed from the output.".format(
column))
return errors
def generate_filters(self, filters, annotations, expression_columns):
errors = []
annotation_filters = {}
_filters = {}
excludes = {}
filters_all = {}
for index, expression in enumerate(filters):
field, exp, val = clean_filter(normalise_field(expression))
if self.has_forbidden_join(field):
errors.append(
f"Filtering with the column [{field}] is forbidden, this filter is removed from the output."
)
continue
key = '%s%s' % (field.strip(), exp)
val = val.strip()
if val.startswith('~'):
val = F(val[1:])
elif key.endswith('date'):
val = (val + '-01-01')[:10] # If we are filtering by a date, make sure its 'date-like'
elif key.endswith('__isnull'):
if val.lower() in ['false', 'f', '0']:
val = False
else:
val = bool(val)
if '::' in field:
# We've got an annotated filter
agg, f = field.split('::', 1)
field = 'f%s%s' % (index, field)
key = 'f%s%s' % (index, key)
annotations[field] = self.available_aggregations[agg](f, distinct=True)
annotation_filters[key] = val
elif key in annotations.keys():
annotation_filters[key] = val
elif key.split('__')[0] in expression_columns:
k = key.split('__')[0]
if 'date' in k and key.endswith('date') or 'date' in str(annotations[k]):
val, period = (val.rsplit(' ', 1) + ['days'])[0:2]
# this line is complicated, just in case there is no period or space
period = period.rstrip('s') # remove plurals
kwargs = {}
if BIG_MULTIPLIERS.get(period, None):
kwargs['days'] = int(val) * BIG_MULTIPLIERS[period]
elif LITTLE_MULTIPLIERS.get(period, None):
kwargs['seconds'] = int(val) * LITTLE_MULTIPLIERS[period]
annotation_filters[key] = timedelta(**kwargs)
else:
annotation_filters[key] = val
elif key.endswith('__all'):
key = key.rstrip('_all')
val = [v for v in val.split(',')]
filters_all[key] = val
else:
exclude = key.endswith('!')
if exclude:
key = key[:-1]
if key.endswith('__in'):
val = [v for v in val.split(',')]
if exclude:
excludes[key] = val
else:
_filters[key] = val
# _filters.update(**annotation_filters)
return filters_all, _filters, annotation_filters, annotations, expression_columns, excludes
def get_model_restriction(self, model):
return {}
def get_model_restriction_filters(self, column) -> bool:
"""Return whether a forbidden join exists in the query"""
checking_model = self.base_model
restriction_filters = {}
joins = column.split('__')
for i, relation in enumerate(joins):
try:
attr = self.get_field_by_name(checking_model, relation)
if attr.related_model:
if restriction := self.get_model_restriction(attr.related_model):
for k, v in restriction.items():
joined_rest = "__".join(joins[:i+1]) + "__" + k
restriction_filters[joined_rest] = v
checking_model = attr.related_model
except exceptions.FieldDoesNotExist:
pass
return restriction_filters
def generate_queryset(self, base_model, columns=None, filters=None, order_by=None, limit=None, offset=0):
errors = []
annotation_filters = {}
self.base_model, base_model_data = self.validate_report_model(base_model)
wrap_sheets = base_model_data.get('wrap_sheets', {})
annotations = self.get_base_annotations()
expression_columns = []
output_columns = []
query_columns = []
model_restriction_filters = {}
model_restriction_filters.update(self.get_model_restriction(self.base_model))
# Generate filters
for column in columns:
var_name = None
if column == "":
# If the field is empty, don't do anything
continue
if ':=' in column:
var_name, column = column.split(':=', 1)
# Map names in UI to django functions
column = normalise_field(column)
if var_name is None:
var_name = column
# Check if the column has permission
column_permission_errors = self.check_for_forbidden_column(column)
if column_permission_errors:
# If there are permission errors, add to error list, and don't continue
errors.extend(column_permission_errors)
continue
# Build columns
if column.startswith(tuple([a + '::' for a in self.available_aggregations.keys()])):
annotations[var_name] = self.get_annotation(column)
elif any(s in column for s in math_infix_symbols.keys()):
annotations[var_name] = self.normalise_math(column)
expression_columns.append(var_name)
else:
if column in wrap_sheets.keys():
cols = wrap_sheets.get(column).get('columns', [])
query_columns = query_columns + cols
else:
if var_name == column:
query_columns.append(var_name)
else:
annotations[var_name] = F(column)
model_restriction_filters.update(self.get_model_restriction_filters(column))
output_columns.append(var_name)
rows = self.get_model_queryset()
# Generate filters
filters_all, _filters, annotation_filters, annotations, expression_columns, excludes = self.generate_filters(
filters=filters,
annotations=annotations,
expression_columns=expression_columns
)
rows = rows.filter(**_filters)
for key, val in filters_all.items():
for v in val:
rows = rows.filter(**{key: v})
rows = rows.exclude(**excludes)
if model_restriction_filters:
rows = rows.filter(**model_restriction_filters)
rows = rows.values(*query_columns)
if annotations:
rows = rows.annotate(**annotations)
rows = rows.filter(**annotation_filters)
if order_by:
ordering = map(normalise_field, order_by)
rows = rows.order_by(*ordering)
if limit:
lim = abs(int(limit))
rows = rows[offset:lim]
return rows, errors, output_columns, base_model_data
def interrogate(self, base_model, columns=None, filters=None, order_by=None, limit=None, offset=0):
if order_by is None: order_by = []
if filters is None: filters = []
if columns is None: columns = []
errors = []
base_model_data = {}
output_columns = []
count = 0
rows = []
try:
rows, errors, output_columns, base_model_data = self.generate_queryset(
base_model, columns, filters, order_by, limit, offset
)
if errors:
rows = rows.none()
rows = list(rows) # Force a database hit to check the in database state
_rows = []
for row in rows:
if row not in _rows:
_rows.append(row)
rows = _rows
count = len(rows)
except di_exceptions.InvalidAnnotationError as e:
errors.append(e)
except ValueError as e:
rows = []
if limit is None:
errors.append("Limit must be a number")
elif limit < 1:
errors.append("Limit must be a number greater than zero")
else:
errors.append("Something went wrong - %s" % e)
except IndexError as e:
rows = []
errors.append("No rows returned for your query, try broadening your search.")
except exceptions.FieldError as e:
rows = []
if str(e).startswith('Cannot resolve keyword'):
field = str(e).split("'")[1]
errors.append("The requested field '%s' was not found in the database." % field)
else:
errors.append("An error was found with your query:\n%s" % e)
except Exception as e:
rows = []
errors.append("Something went wrong - %s" % e)
return {
'rows': rows, 'count': count, 'columns': output_columns, 'errors': errors,
'base_model': base_model_data,
# 'query': query # DEBUG Tool
}
class PivotInterrogator(Interrogator):
def __init__(self, aggregators, **kwargs):
super().__init__(**kwargs)
self.aggregators = aggregators
def get_base_annotations(self):
aggs = {
x: self.get_annotation(normalise_field(x)) for x in self.aggregators
if not self.has_forbidden_join(column=x)
}
aggs.update({"cell": Count(1)})
return aggs
def pivot(self):
# Only accept the first two valid columns
self.columns = [normalise_field(c) for c in self.columns if not self.has_forbidden_join(column=c)][:2]
data = self.interrogate()
out_rows = {}
col_head = self.base_model.objects.values(self.columns[0]).order_by(self.columns[0]).distinct()
x, y = self.columns[:2]
from collections import OrderedDict
default = OrderedDict([(c[x], {'count': 0}) for c in col_head])
for r in data['rows']:
this_row = out_rows.get(r[y], default.copy())
this_row[r[x]] = {'count': r['cell'],
'aggs': [(k, v) for k, v in r.items() if k not in ['cell', x, y]]
}
out_rows[r[y]] = this_row
return {
'rows': out_rows, 'col_head': col_head, 'errors': data['errors'],
'base_model': data['base_model'], 'headers': data['headers']
}
|
|
"""The test for binary_sensor device automation."""
from datetime import timedelta
import pytest
from unittest.mock import patch
from homeassistant.components.binary_sensor import DOMAIN, DEVICE_CLASSES
from homeassistant.components.binary_sensor.device_condition import ENTITY_CONDITIONS
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.helpers import device_registry
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
async_get_device_automations,
async_get_device_automation_capabilities,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a binary_sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_CONDITIONS[device_class]
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a binary_sensor condition."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on event - test_event1"
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off event - test_event2"
async def test_if_fires_on_for_condition(hass, calls):
"""Test for firing if condition is on with delay."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=10)
point3 = point2 + timedelta(seconds=10)
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "event.event_type")
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 10 secs into the future
mock_utcnow.return_value = point2
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 20 secs into the future
mock_utcnow.return_value = point3
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_off event - test_event1"
|
|
import struct
import synapse.axon as s_axon
import synapse.common as s_common
import synapse.neuron as s_neuron
import synapse.lib.iq as s_iq
import synapse.lib.cell as s_cell
import synapse.lib.crypto.vault as s_vault
from synapse.tests.common import *
logger = logging.getLogger(__name__)
# This causes blocks which are not homogeneous when sliced in kibibyte lengths
bbuf = b'0123456' * 4585
nullhash = hashlib.sha256(b'').digest()
bbufhash = hashlib.sha256(bbuf).digest()
asdfhash = hashlib.sha256(b'asdfasdf').digest()
hehahash = hashlib.sha256(b'hehehaha').digest()
ohmyhash = hashlib.sha256(b'ohmyohmy').digest()
qwerhash = hashlib.sha256(b'qwerqwer').digest()
def u64(x):
return struct.pack('>Q', x)
class AxonTest(SynTest):
def test_axon_blob(self):
with self.getTestDir() as dirn:
path0 = os.path.join(dirn, 'blob0')
with s_axon.BlobStor(path0, mapsize=s_iq.TEST_MAP_SIZE) as bst0:
tbuid = b'\x56' * 32
blobs = (
(tbuid + u64(0), b'asdf'),
(tbuid + u64(1), b'qwer'),
(tbuid + u64(2), b'hehe'),
(tbuid + u64(3), b'haha'),
)
bst0.save(blobs)
retn = b''.join(bst0.load(tbuid))
self.eq(retn, b'asdfqwerhehehaha')
# Order doesn't matter since we're indexed chunks
buid2 = b'\x01' * 32
blobs = (
(buid2 + u64(3), b'sale'),
(buid2 + u64(1), b'b33f'),
(buid2 + u64(0), b'dead'),
(buid2 + u64(2), b'f0re'),
)
# We do not have bytes for buid2 yet
bl = []
for byts in bst0.load(buid2):
bl.append(byts)
self.eq(bl, [])
bst0.save(blobs)
retn = b''.join(bst0.load(buid2))
self.eq(retn, b'deadb33ff0resale')
# We can store and retrieve an empty string
buid3 = b'\x02' * 32
blobs = (
(buid3 + u64(0), b''),
)
bst0.save(blobs)
bl = []
for byts in bst0.load(buid3):
bl.append(byts)
self.eq(bl, [b''])
retn = b''.join(bl)
self.eq(retn, b'')
path1 = os.path.join(dirn, 'blob1')
with s_axon.BlobStor(path1, mapsize=s_iq.TEST_MAP_SIZE) as bst1:
bst1.addCloneRows(bst0.clone(0))
retn = b''.join(bst1.load(tbuid))
self.eq(retn, b'asdfqwerhehehaha')
retn = b''.join(bst1.load(buid2))
self.eq(retn, b'deadb33ff0resale')
retn = b''.join(bst0.load(buid3))
self.eq(retn, b'')
bst1.addCloneRows([]) # Empty addCloneRows call for coverage
def test_axon_blob_stat(self):
with self.getTestDir() as dirn:
path0 = os.path.join(dirn, 'blob0')
with s_axon.BlobStor(path0, mapsize=s_iq.TEST_MAP_SIZE) as bst0:
tbuid = b'\x56' * 32
blobs = (
(tbuid + u64(0), os.urandom(1000)),
(tbuid + u64(1), b'qwer'),
(tbuid + u64(2), b'hehe'),
(tbuid + u64(3), b'haha'),
) # 4 blocks, size 1000 + 4 + 4 + 4 = 1012 bytes
stats = bst0.stat()
self.eq(stats, {})
bst0.save(blobs[0:1])
stats = bst0.stat()
self.eq(stats, {'bytes': 1000, 'blocks': 1})
bst0.save(blobs[1:])
stats = bst0.stat()
self.eq(stats, {'bytes': 1012, 'blocks': 4})
def test_axon_blob_metrics(self):
with self.getTestDir() as dirn:
path0 = os.path.join(dirn, 'blob0')
with s_axon.BlobStor(path0, mapsize=s_iq.TEST_MAP_SIZE) as bst0:
tbuid = b'\x56' * 32
blobs = (
(tbuid + u64(0), os.urandom(1000)),
(tbuid + u64(1), b'qwer'),
(tbuid + u64(2), b'hehe'),
(tbuid + u64(3), b'haha'),
) # 4 blocks, size 1000 + 4 + 4 + 4 = 1012 bytes
metrics = sorted(list(bst0.metrics()))
self.eq(metrics, [])
bst0.save(blobs[0:1])
metrics = []
for item in bst0.metrics():
item[1].pop('time')
metrics.append(item[1])
tooks = [m.pop('took') for m in metrics] # remove took since it may vary
self.eq(metrics, [{'size': 1000, 'blocks': 1}])
self.len(1, tooks)
# These are time based and cannot be promised to be a particular value
for took in tooks:
self.lt(took, 10000)
bst0.save(blobs[1:])
metrics = []
for item in bst0.metrics():
item[1].pop('time')
metrics.append(item[1])
tooks = [m.pop('took') for m in metrics] # remove took since it may vary
self.eq(metrics, [{'size': 1000, 'blocks': 1}, {'blocks': 3, 'size': 12}])
self.len(2, tooks)
# These are time based and cannot be promised to be a particular value
for took in tooks:
self.lt(took, 10000)
def test_axon_cell(self):
# implement as many tests as possible in this one
# since it *has* to use a neuron to work correctly
# put all the things that need fini() into a BusRef...
with self.getTestDir() as dirn:
with s_eventbus.BusRef() as bref:
# neur00 ############################################
# Set port to zero to allow a port to be automatically assigned during testing
conf = {'host': 'localhost', 'bind': '127.0.0.1', 'port': 0}
path = s_common.gendir(dirn, 'neuron')
logger.debug('Bringing Neuron online')
neur = s_neuron.Neuron(path, conf)
bref.put('neur00', neur)
root = neur.getCellAuth()
addr = neur.getCellAddr()
nport = addr[1] # Save the port for later use
# blob00 ############################################
path = s_common.gendir(dirn, 'blob00')
authblob00 = neur.genCellAuth('blob00')
s_msgpack.dumpfile(authblob00, os.path.join(path, 'cell.auth'))
logger.debug('Bringing blob00 online')
conf = {'host': 'localhost', 'bind': '127.0.0.1', 'blob:mapsize': s_iq.TEST_MAP_SIZE}
blob00 = s_axon.BlobCell(path, conf)
bref.put('blob00', blob00)
self.true(blob00.cellpool.neurwait(timeout=3))
user = s_cell.CellUser(root)
blob00sess = user.open(blob00.getCellAddr(), timeout=3)
bref.put('blob00sess', blob00sess)
mesg = ('blob:stat', {})
ok, retn = blob00sess.call(mesg, timeout=3)
self.true(ok)
self.eq(retn, {}) # Nothing there yet
# blob01 ############################################
path = s_common.gendir(dirn, 'blob01')
authblob01 = neur.genCellAuth('blob01')
s_msgpack.dumpfile(authblob01, os.path.join(path, 'cell.auth'))
blob01conf = dict(conf)
blob01conf['blob:cloneof'] = 'blob00@localhost'
logger.debug('Bringing blob01 online')
blob01 = s_axon.BlobCell(path, blob01conf)
bref.put('blob01', blob01)
self.true(blob01.cellpool.neurwait(timeout=3))
blob01sess = user.open(blob01.getCellAddr(), timeout=3)
bref.put('blob01sess', blob01sess)
blob01wait = blob01.waiter(1, 'blob:clone:rows')
# axon00 ############################################
path = s_common.gendir(dirn, 'axon00')
authaxon00 = neur.genCellAuth('axon00')
s_msgpack.dumpfile(authaxon00, os.path.join(path, 'cell.auth'))
axonconf = {
'host': 'localhost',
'bind': '127.0.0.1',
'axon:blobs': ('blob00@localhost',),
'axon:mapsize': s_iq.TEST_MAP_SIZE,
}
logger.debug('Bringing axon00 online')
axon00 = s_axon.AxonCell(path, axonconf)
bref.put('axon00', axon00)
self.true(axon00.cellpool.neurwait(timeout=3))
#####################################################
sess = user.open(axon00.getCellAddr(), timeout=3)
bref.put('sess', sess)
# wait for the axon to have blob00
ready = False
for i in range(30):
if axon00.blobs.items():
ready = True
break
time.sleep(0.1)
self.true(ready)
axon = s_axon.AxonClient(sess)
blob = s_axon.BlobClient(blob00sess)
blob01c = s_axon.BlobClient(blob01sess)
self.eq((), tuple(axon.metrics()))
self.eq((), tuple(blob.metrics()))
self.len(1, axon.wants([asdfhash]))
# Asking for bytes prior to the bytes being present raises
self.genraises(RetnErr, axon.bytes, asdfhash, timeout=3)
self.eq(1, axon.save([b'asdfasdf'], timeout=3))
self.eq((), tuple(axon.metrics(offs=999999999)))
self.eq((), tuple(blob.metrics(offs=99999999, timeout=3)))
metrics = list(blob.metrics(timeout=3))
self.len(1, metrics)
self.eq(8, metrics[0][1].get('size'))
self.eq(1, metrics[0][1].get('blocks'))
self.len(0, axon.wants([asdfhash], timeout=3))
self.eq(b'asdfasdf', b''.join(axon.bytes(asdfhash, timeout=3)))
stat = axon.stat(timeout=3)
self.eq(1, stat.get('files'))
self.eq(8, stat.get('bytes'))
# Save it again - we should have no change in metrics/storage
self.eq(0, axon.save([b'asdfasdf'], timeout=3))
metrics = list(blob.metrics(timeout=3))
self.len(1, metrics)
self.eq(8, metrics[0][1].get('size'))
self.eq(1, metrics[0][1].get('blocks'))
stat = axon.stat(timeout=3)
self.eq(1, stat.get('files'))
self.eq(8, stat.get('bytes'))
# FIXME - What is the behavior we want here?
# Currently, we duplicate the uploaded bytes with a new buid.
# self.eq(asdfhash, axon.upload([b'asdf', b'asdf'], timeout=3))
# metrics = list(blob.metrics(timeout=3))
# self.len(1, metrics)
# self.eq(8, metrics[0][1].get('size'))
# self.eq(1, metrics[0][1].get('blocks'))
# stat = axon.stat(timeout=3)
# self.eq(1, stat.get('files'))
# self.eq(8, stat.get('bytes'))
# lets see if the bytes made it to the blob clone...
self.nn(blob01wait.wait(timeout=10))
newp = os.urandom(32)
def loop():
s_common.spin(axon.bytes(newp))
self.raises(s_exc.RetnErr, loop)
blob01wait = blob01.waiter(1, 'blob:clone:rows')
self.eq(qwerhash, axon.upload([b'qwer', b'qwer'], timeout=3))
self.len(0, axon.wants([qwerhash]))
self.eq(b'qwerqwer', b''.join(axon.bytes(qwerhash, timeout=3)))
self.nn(blob01wait.wait(3))
retn = list(axon.metrics(0, timeout=3))
self.eq(retn[0][1].get('size'), 8)
self.eq(retn[0][1].get('cell'), 'blob00@localhost')
# Try uploading a large file
logger.debug('Large file test')
# Monkeypatch axon to a smaller blocksize
s_axon.blocksize = s_const.kibibyte
self.raises(RetnErr, axon.locs, bbufhash, timeout=3)
genr = s_common.chunks(bbuf, s_axon.blocksize)
# It is possible that we may need multiple events captured
# to avoid a timing issue
blob01wait = blob01.waiter(2, 'blob:clone:rows')
self.eq(bbufhash, axon.upload(genr, timeout=3))
self.eq((), axon.wants([bbufhash], timeout=3))
# Then retrieve it
size = 0
gots = []
testhash = hashlib.sha256()
for byts in axon.bytes(bbufhash, timeout=3):
size += len(byts)
gots.append(byts)
testhash.update(byts)
self.eq(bbufhash, testhash.digest())
try:
self.eq(size, len(bbuf))
self.eq(bbufhash, testhash.digest())
except Exception as e:
for byts in gots:
print(repr(byts))
print('SIZE: %d/%d' % (size, len(bbuf)))
raise
blob01wait.wait(3)
self.ne(blob01wait.events, [])
locs = axon.locs(bbufhash, timeout=3)
self.len(1, locs)
self.isin('blob00', locs[0][0])
# Use the buid to retrieve the large file from blob01
tbuid = locs[0][1]
testhash = hashlib.sha256()
for byts in blob01c.bytes(tbuid, timeout=3):
testhash.update(byts)
self.eq(bbufhash, testhash.digest())
# Try storing a empty file
logger.debug('Nullfile test')
axon.save([b''])
self.eq((), tuple(axon.wants([nullhash])))
# Then retrieve it
parts = []
for part in axon.bytes(nullhash):
parts.append(part)
self.eq([b''], parts)
logger.debug('Shutdown / restart blob01 test')
bref.pop('blob01')
blob01.fini()
self.true(blob01.isfini)
axon.save([b'hehehaha'], timeout=3)
self.eq((), axon.wants([hehahash], timeout=3))
# Now bring blob01 back online
logger.debug('Bringing blob01 back online')
blob01 = s_axon.BlobCell(path, blob01conf)
bref.put('blob01', blob01)
self.true(blob01.cellpool.neurwait(timeout=3))
blob01wait = blob01.waiter(1, 'blob:clone:rows')
# Cloning should start up shortly
self.nn(blob01wait.wait(10))
# Ask a blobclient for data for a random buid
newp = buid()
parts = []
for part in blob.bytes(newp):
parts.append(part)
self.eq(parts, [])
# Let everything get shut down by the busref fini
logger.debug('Bringing everything back up')
with s_eventbus.BusRef() as bref:
# neur00 ############################################
conf = {'host': 'localhost', 'bind': '127.0.0.1', 'port': nport}
path = s_common.gendir(dirn, 'neuron')
logger.debug('Bringing Neuron Back online')
neur = s_neuron.Neuron(path, conf)
bref.put('neur00', neur)
root = neur.getCellAuth()
# blob00 ############################################
path = s_common.gendir(dirn, 'blob00')
logger.debug('Bringing blob00 back online')
conf = {'host': 'localhost', 'bind': '127.0.0.1', 'blob:mapsize': s_iq.TEST_MAP_SIZE}
blob00 = s_axon.BlobCell(path, conf)
bref.put('blob00', blob00)
self.true(blob00.cellpool.neurwait(timeout=3))
user = s_cell.CellUser(root)
blob00sess = user.open(blob00.getCellAddr(), timeout=3)
bref.put('blob00sess', blob00sess)
# blob01 ############################################
path = s_common.gendir(dirn, 'blob01')
blob01conf = dict(conf)
blob01conf['blob:cloneof'] = 'blob00@localhost'
logger.debug('Bringing blob01 back online')
blob01 = s_axon.BlobCell(path, blob01conf)
bref.put('blob01', blob01)
self.true(blob01.cellpool.neurwait(timeout=3))
blob01wait = blob01.waiter(1, 'blob:clone:rows')
# axon00 ############################################
path = s_common.gendir(dirn, 'axon00')
authaxon00 = neur.genCellAuth('axon00')
s_msgpack.dumpfile(authaxon00, os.path.join(path, 'cell.auth'))
axonconf = {
'host': 'localhost',
'bind': '127.0.0.1',
'axon:blobs': ('blob00@localhost',),
'axon:mapsize': s_iq.TEST_MAP_SIZE
}
logger.debug('Bringing axon00 online')
axon00 = s_axon.AxonCell(path, axonconf)
bref.put('axon00', axon00)
self.true(axon00.cellpool.neurwait(timeout=3))
#####################################################
sess = user.open(axon00.getCellAddr(), timeout=3)
bref.put('sess', sess)
# wait for the axon to have blob00
ready = False
for i in range(30):
if axon00.blobs.items():
ready = True
break
time.sleep(0.1)
self.true(ready)
axon = s_axon.AxonClient(sess)
# Try retrieving a large file
testhash = hashlib.sha256()
for byts in axon.bytes(bbufhash, timeout=3):
testhash.update(byts)
self.eq(bbufhash, testhash.digest())
# Try saving a new file and a existing file to the cluster and ensure it is replicated
self.eq((ohmyhash,), axon.wants((ohmyhash, hehahash, nullhash), 3))
self.eq(1, axon.save([b'ohmyohmyy', b'']))
self.nn(blob01wait.wait(10))
|
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
and _encode_plus, in which the Rust tokenizer is used.
"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import normalizers
from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
from ...tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
BatchEncoding,
EncodedInput,
PreTokenizedInput,
TextInput,
TextInputPair,
TruncationStrategy,
)
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_layoutlmv2 import LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
}
class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents: (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original LayoutLMv2).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = LayoutLMv2Tokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
cls_token_box=cls_token_box,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
# additional properties
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
assert boxes is not None, "You must provide corresponding bounding boxes"
if is_batched:
assert len(words) == len(boxes), "You must provide words and boxes for an equal amount of examples"
for words_example, boxes_example in zip(words, boxes):
assert len(words_example) == len(
boxes_example
), "You must provide as many words as there are bounding boxes"
else:
assert len(words) == len(boxes), "You must provide as many words as there are bounding boxes"
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(
batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
)
return encodings[0].tokens
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
boxes=boxes,
text_pair=text_pair,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[List[List[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
)
# Convert encoding to dict
# `Tokens` has type: Tuple[
# List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
# List[EncodingFast]
# ]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=True
if word_labels is not None
else return_offsets_mapping, # we use offsets to create the labels
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
# From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
# (we say ~ because the number of overflow varies with the example in the batch)
#
# To match each overflowing sample with the original sample in the batch
# we add an overflow_to_sample_mapping array (see below)
sanitized_tokens = {}
for key in tokens_and_encodings[0][0].keys():
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
# create the token boxes
token_boxes = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_encodings[batch_index].sequence_ids,
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
else:
if id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError("Id not recognized")
token_boxes.append(token_boxes_example)
sanitized_tokens["bbox"] = token_boxes
# optionally, create the labels
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_tokens["offset_mapping"][batch_index],
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens["labels"] = labels
# finally, remove offsets if the user didn't want them
if not return_offsets_mapping:
del sanitized_tokens["offset_mapping"]
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# make it a batched input
# 2 options:
# 1) only text, in case text must be a list of str
# 2) text + text_pair, in which case text = str and text_pair a list of str
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(
batched_input,
is_pair=bool(text_pair is not None),
boxes=batched_boxes,
word_labels=batched_word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
# Overflowing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
return batched_output
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
|
"""
Test SBProcess APIs, including ReadMemory(), WriteMemory(), and others.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbutil import get_stopped_thread, state_type_to_str
class ProcessAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number(
"main.cpp",
"// Set break point at this line and check variable 'my_char'.")
@add_test_categories(['pyapi'])
def test_read_memory(self):
"""Test Python SBProcess.ReadMemory() API."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
error = lldb.SBError()
self.assertFalse(val.TypeIsPointerType())
content = process.ReadMemory(
val.AddressOf().GetValueAsUnsigned(), 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'x'",
exe=False,
startstr=b'x')
# Read (char *)my_char_ptr.
val = frame.FindValue("my_char_ptr", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
cstring = process.ReadCStringFromMemory(
val.GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='Does it work?')
# Get the SBValue for the global variable 'my_cstring'.
val = frame.FindValue("my_cstring", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 256 to read at most 256 bytes
# from the address, and expect to get a Python string as the result
# object!
self.assertFalse(val.TypeIsPointerType())
cstring = process.ReadCStringFromMemory(
val.AddressOf().GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='lldb.SBProcess.ReadCStringFromMemory() works!')
# Get the SBValue for the global variable 'my_uint32'.
val = frame.FindValue("my_uint32", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 4 to read 4 bytes
# from the address, and expect to get an int as the result!
self.assertFalse(val.TypeIsPointerType())
my_uint32 = process.ReadUnsignedFromMemory(
val.AddressOf().GetValueAsUnsigned(), 4, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("uint32 read is:", my_uint32)
if my_uint32 != 12345:
self.fail(
"Result from SBProcess.ReadUnsignedFromMemory() does not match our expected output")
@add_test_categories(['pyapi'])
def test_write_memory(self):
"""Test Python SBProcess.WriteMemory() API."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
# continuing.
if not val.GetLocation().startswith("0x"):
return
# OK, let's get the hex location of the variable.
location = int(val.GetLocation(), 16)
# The program logic makes the 'my_char' variable to have memory content as 'x'.
# But we want to use the WriteMemory() API to assign 'a' to the
# variable.
# Now use WriteMemory() API to write 'a' into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
# Read from the memory location. This time it should be 'a'.
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
content = process.ReadMemory(location, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
@add_test_categories(['pyapi'])
def test_access_my_int(self):
"""Test access 'my_int' using Python SBProcess.GetByteOrder() and other APIs."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_int'.
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
# continuing.
if not val.GetLocation().startswith("0x"):
return
# OK, let's get the hex location of the variable.
location = int(val.GetLocation(), 16)
# Note that the canonical from of the bytearray is little endian.
from lldbsuite.test.lldbutil import int_to_bytearray, bytearray_to_int
byteSize = val.GetByteSize()
bytes = int_to_bytearray(256, byteSize)
byteOrder = process.GetByteOrder()
if byteOrder == lldb.eByteOrderBig:
bytes.reverse()
elif byteOrder == lldb.eByteOrderLittle:
pass
else:
# Neither big endian nor little endian? Return for now.
# Add more logic here if we want to handle other types.
return
# The program logic makes the 'my_int' variable to have int type and value of 0.
# But we want to use the WriteMemory() API to assign 256 to the
# variable.
# Now use WriteMemory() API to write 256 into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, bytes, error)
if not error.Success() or result != byteSize:
self.fail("SBProcess.WriteMemory() failed")
# Make sure that the val we got originally updates itself to notice the
# change:
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# And for grins, get the SBValue for the global variable 'my_int'
# again, to make sure that also tracks the new value:
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# Now read the memory content. The bytearray should have (byte)1 as
# the second element.
content = process.ReadMemory(location, byteSize, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
# The bytearray_to_int utility function expects a little endian
# bytearray.
if byteOrder == lldb.eByteOrderBig:
content = bytearray(content, 'ascii')
content.reverse()
new_value = bytearray_to_int(content, byteSize)
if new_value != 256:
self.fail("Memory content read from 'my_int' does not match (int)256")
# Dump the memory content....
if self.TraceOn():
for i in content:
print("byte:", i)
@add_test_categories(['pyapi'])
def test_remote_launch(self):
"""Test SBProcess.RemoteLaunch() API with a process not in eStateConnected, and it should fail."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if self.TraceOn():
print("process state:", state_type_to_str(process.GetState()))
self.assertTrue(process.GetState() != lldb.eStateConnected)
error = lldb.SBError()
success = process.RemoteLaunch(
None, None, None, None, None, None, 0, False, error)
self.assertTrue(
not success,
"RemoteLaunch() should fail for process state != eStateConnected")
@add_test_categories(['pyapi'])
def test_get_num_supported_hardware_watchpoints(self):
"""Test SBProcess.GetNumSupportedHardwareWatchpoints() API with a process."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
error = lldb.SBError()
num = process.GetNumSupportedHardwareWatchpoints(error)
if self.TraceOn() and error.Success():
print("Number of supported hardware watchpoints: %d" % num)
@add_test_categories(['pyapi'])
@no_debug_info_test
def test_get_process_info(self):
"""Test SBProcess::GetProcessInfo() API with a locally launched process."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process and stop at the entry point.
launch_info = lldb.SBLaunchInfo(None)
launch_info.SetWorkingDirectory(self.get_process_working_directory())
launch_flags = launch_info.GetLaunchFlags()
launch_flags |= lldb.eLaunchFlagStopAtEntry
launch_info.SetLaunchFlags(launch_flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
if not error.Success():
self.fail("Failed to launch process")
# Verify basic process info can be retrieved successfully
process_info = process.GetProcessInfo()
self.assertTrue(process_info.IsValid())
file_spec = process_info.GetExecutableFile()
self.assertTrue(file_spec.IsValid())
process_name = process_info.GetName()
self.assertIsNotNone(process_name, "Process has a name")
self.assertGreater(len(process_name), 0, "Process name isn't blank")
self.assertEqual(file_spec.GetFilename(), "a.out")
self.assertNotEqual(
process_info.GetProcessID(), lldb.LLDB_INVALID_PROCESS_ID,
"Process ID is valid")
# Additional process info varies by platform, so just check that
# whatever info was retrieved is consistent and nothing blows up.
if process_info.UserIDIsValid():
self.assertNotEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is valid")
else:
self.assertEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is invalid")
if process_info.GroupIDIsValid():
self.assertNotEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is valid")
else:
self.assertEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is invalid")
if process_info.EffectiveUserIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is invalid")
if process_info.EffectiveGroupIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is invalid")
process_info.GetParentProcessID()
|
|
#!/usr/bin/python
#
# Siglent power supply-based battery charger for lead acid batteries.
#
# Implementation Plan
# (* indicates latest released version)
#
# Rev Goal
#*0.10 Initial release, emulates BQ24450 dual state charger, but
# with many additional features.
# 0.20 Retrofit planned PS interface API
# 0.30 Four state charger option for AGM batteries
#
# (C) Peter Soper (pete@soper.us) MIT License
# September, 2016
#
#usage: leadacid-charger.py [-h] [-b BULK_TIME_LIMIT] [-c CURRENT_LIMIT]
# [-d DEBUG_LEVEL] [-f] [-i ITAPER] [-n] [-o OUTPUT]
# [-r RATED_CAPACITY] [-t TIME_LIMIT] [-u USB_PATH]
#
#Siglent SPD3303S-based Lead Acid Charger.
#
#optional arguments:
# -h, --help show this help message and exit
# -b BULK_TIME_LIMIT, --bulk_time_limit BULK_TIME_LIMIT
# Bulk Charge time limit in seconds, default 18000
# -c CURRENT_LIMIT, --current_limit CURRENT_LIMIT
# Bulk current limit in amps, default 0.7
# -d DEBUG_LEVEL, --debug_level DEBUG_LEVEL
# Debug level, default 0
# -f, --float Skip straight to float state
# -i ITAPER, --itaper ITAPER
# Minimum taper current for bulk=>float transistion,
# default 0.14
# -n, --no_float No float state
# -o OUTPUT, --output OUTPUT
# Log output pathname, default /b/batt/logs/spd3303s-
# charger.log
# -r RATED_CAPACITY, --rated_capacity RATED_CAPACITY
# Battery rated capacity, default 7.0
# -t TIME_LIMIT, --time_limit TIME_LIMIT
# Charging time limit in seconds, default 43200
# -u USB_PATH, --usb_path USB_PATH
# USB (instrument) device pathname, default /dev/usbtmc1
import argparse, os, sys, time, datetime
VERSION_STRING = '0.14'
FLOAT_EQUALITY_PERCENT = 1 # "close enough" % between two float values
RATED_CAPACITY = 7.0 # Battery amp hours
rated_capacity = RATED_CAPACITY # Can be overriden
CURRENT_LIMIT = RATED_CAPACITY * 0.2 # Current limit during bulk state
current_limit = CURRENT_LIMIT # Can be overriden
ITAPER = RATED_CAPACITY * 0.02 # Min current to reach before bulk=>float
itaper = ITAPER # Can be overriden
FLOAT_CURRENT = ITAPER * 2.0
SAMPLE_DELAY = 10.0
PS_CHARGER_CHANNEL = 'CH1' # Which PS channel used as charger
debug_level = 0 # Debug/verbosity level
LOOP_COUNT = 2 # Debug mode creates artificially short states
TRICKLE_THRESHOLD = 10.5 # Battery voltage to get out of trickle state
TRICKLE_CURRENT = 0.025 # Charge current in trickle state
# was 14.45 until 9/6
BULK_VOLTAGE = 14.4 # Bulk charge voltage
FLOAT_VOLTAGE = 13.6 # Float charge voltage
FLOAT_ONLY_CURRENT_LIMIT = CURRENT_LIMIT# Max current when -f option used
FLOAT_ONLY_CURRENT_LIMIT_DURATION = 10800 # Max time max current allowed w -f
DEFAULT_CHARGING_TIME_LIMIT = 3600*7 # Charging time limit
DEFAULT_BULK_TIME_LIMIT = 3600*5 # Bulk state time limit
DEFAULT_MAX_PERCENT_RISE = 10 # Percent current rise allowed during bulk state
max_percent_rise = DEFAULT_MAX_PERCENT_RISE
DEFAULT_MAX_CURRENT_RISE = 20 # Milliampere current rise allowed during bulk state
max_current_rise = DEFAULT_MAX_CURRENT_RISE
PS_WRITE_DELAY = 0.25 # Seconds to pause after write to PS
charging_time_limit = DEFAULT_CHARGING_TIME_LIMIT # Max time for a charge
bulk_time_limit = DEFAULT_BULK_TIME_LIMIT # Max time spent in bulk
DEFAULT_USB_PATH = '/dev/usbtmc1' # USBTMC interface for PS
DEFAULT_LOG_PATH = 'spd3303s-charger.log' # Log output path
out = None # Logging output file
pwr = None # Python Serial object for power supply
start_time = int(time.time()) # Unix Epoch seconds since Jan 1, 1970
coulombs = 0.0 # Cumulative amp-seconds
wattseconds = 0.0 # Cumulative
class SPDException(Exception):
"Exception base class"
class SPDWriteException(SPDException):
"Failed to complete an SCPI command write to the power supply"
def float_equals(v1, v2):
diff = abs(v1 - v2)
if v1 > v2:
temp = v1
else:
temp = v2
return ((diff / temp) * 100.0) < FLOAT_EQUALITY_PERCENT
def elapsed_time():
"Return seconds since execution start"
return int(time.time()) - start_time
def myprint(s, console_only = False):
"Print a string and flush output"
print s
sys.stdout.flush()
if not console_only:
try:
out.write('{0:s}\n'.format(s))
except KeyboardInterrupt:
finish('Keyboard Interrupt')
except OSError:
finish('tried to write to log but got OSError')
except Exception as e:
finish('tried to write to log but got unknown error')
def write_inst(s, no_exception_handling = False):
"Write SCPI command to device. A write error is currently fatal."
global pwr
if debug_level > 0:
myprint('write_inst: ' + s, True)
if no_exception_handling:
os.write(pwr, s)
return
try:
os.write(pwr, s)
except KeyboardInterrupt:
finish('Keyboard Interrupt')
except OSError:
finish('tried to write ' + s + ' but got a write error')
except Exception as e:
finish('tried to write ' + s + ' but got unknown error')
def read_inst(cmd):
"Write SCPI command to device, retrying as needed to compensate for Siglent"
global pwr
if debug_level > 0:
myprint('read_inst: ' + cmd, True)
try:
s = os.read(pwr, 300)
except KeyboardInterrupt:
finish('Keyboard Interrupt')
except OSError:
myprint('read error: repeating query: ' + cmd)
write_inst(cmd)
time.sleep(PS_WRITE_DELAY)
s = read_inst(cmd)
except Exception as e:
finish('tried to write ' + s + ' but got unknown error')
if debug_level > 0:
myprint('read_inst returning: ' + s, True)
return s
def ask(cmd):
"Write SCPI query and return response value"
write_inst(cmd)
time.sleep(PS_WRITE_DELAY)
return read_inst(cmd)
def get_current():
"Return PS output current"
return float(ask('MEAS:CURR? ' + PS_CHARGER_CHANNEL))
def get_voltage():
"Return PS output voltage"
return float(ask('MEAS:VOLT? ' + PS_CHARGER_CHANNEL))
def get_current_limit():
"Return PS current limit"
return float(ask(PS_CHARGER_CHANNEL + ':CURR?'))
def get_voltage_limit():
"Return PS voltage limit"
return float(ask(PS_CHARGER_CHANNEL + ':VOLT?'))
def set_current_limit(current):
"Change current limit"
while True:
try:
write_inst(PS_CHARGER_CHANNEL + ':CURR '+ str(current))
time.sleep(PS_WRITE_DELAY)
if float_equals(get_current_limit(), current):
return
except KeyboardInterrupt:
finish('Keyboard Interrupt')
except:
supply_off()
raise SPDWriteException
def set_voltage_limit(voltage):
"Change voltage limit"
while True:
try:
write_inst(PS_CHARGER_CHANNEL + ':VOLT ' + str(voltage))
time.sleep(PS_WRITE_DELAY)
if float_equals(get_voltage_limit(), voltage):
return
except KeyboardInterrupt:
finish('Keyboard Interrupt')
except:
supply_off()
raise SPDWriteException
def supply_on():
"Turn on the supply channel"
write_inst('OUTP ' + PS_CHARGER_CHANNEL + ', ON')
time.sleep(1)
myprint('Supply on', True)
def supply_off():
"Turn off the supply channel"
global coulombs, wattseconds
write_inst('OUTP ' + PS_CHARGER_CHANNEL + ', OFF', True)
time.sleep(1)
myprint('Supply off', True)
myprint('Total coulombs: ' + str(coulombs) +
' Total wattseconds: ' + str(wattseconds))
coulombs = wattseconds = 0.0
def finish(msg):
"Print optional message, turn off power supply channel, close paths, quit"
if msg != '':
print(msg)
supply_off()
os.close(pwr)
out.close()
quit()
def log(interval_start, return_current = True):
global coulombs, wattseconds
current = get_current()
voltage = get_voltage()
interval = time.time() - interval_start
interval_coulombs = current * interval
coulombs += interval_coulombs
interval_wattseconds = current * voltage * interval
wattseconds += interval_wattseconds
now = datetime.datetime.now().isoformat(sep=' ')
myprint('{0:s} {1:7.3f} {2:7.3f} {3:6.2f} {4:6.2f} {5:s}'.format(str(elapsed_time()),
voltage, current, coulombs / 3600.0, wattseconds / 3600.0, now))
if return_current:
return current
else:
return voltage
# State 1: trickle. While battery voltage lower than 10.5 volts apply
# 25 milliamperes
def trickle_state():
"Charger Trickle State"
if debug_level > 0:
myprint("Trickle", True)
set_current_limit(TRICKLE_CURRENT)
set_voltage_limit(BULK_VOLTAGE)
supply_on()
current = get_current()
voltage = get_voltage()
if debug_level > 1:
myprint('current: ' + str(current) + ' voltage: ' + str(voltage), True)
while voltage < TRICKLE_THRESHOLD:
interval_start = time.time()
if elapsed_time() >= charging_time_limit:
finish('charging time limit reached')
time.sleep(SAMPLE_DELAY)
voltage = log(interval_start, False)
# State 2: Bulk charge until current drops to Itaper.
# Watch out for current switching from falling back to rising. Detect this and
# stop the charger if detected.
def bulk_state():
"Bulk Charge State."
if debug_level > 0:
myprint("Bulk", True)
relative_start_bulk = int(time.time()-start_time)
set_current_limit(current_limit)
set_voltage_limit(BULK_VOLTAGE)
time.sleep(1.0)
current = get_current()
minimum_current = current
voltage = get_voltage()
myprint('current: ' + str(current) + ' voltage: ' + str(voltage), True)
if debug_level > 1:
myprint('current: ' + str(current) + ' voltage: ' + str(voltage), True)
if debug_level > 0:
myprint('Starting voltage ' + str(voltage), True)
myprint('Starting current ' + str(current))
while current > itaper:
interval_start = time.time()
current = get_current()
relative_time = elapsed_time()
if relative_time >= charging_time_limit:
finish('charging time limit reached')
if relative_time >= bulk_time_limit:
finish('bulk charging time limit reached')
# Don't tolerate increase in current
if current < minimum_current:
minimum_current = current
elif current > minimum_current:
if (((current - minimum_current) / minimum_current) *
100.0) > max_percent_rise:
finish('Bulk charge aborted due to % current rise')
if (current - minimum_current) > max_current_rise:
finish('Bulk charge aborted due to current rise')
time.sleep(SAMPLE_DELAY)
current = log(interval_start, True)
# State 3: Float charge
def float_state(is_just_float):
"Float Charge State."
if debug_level > 0:
myprint("Float", True)
set_voltage_limit(FLOAT_VOLTAGE)
set_current_limit(FLOAT_CURRENT)
if is_just_float:
set_current_limit(FLOAT_ONLY_CURRENT_LIMIT)
supply_on()
while True:
interval_start = time.time()
relative_time = elapsed_time()
if relative_time >= charging_time_limit:
finish('charging time limit reached')
time.sleep(SAMPLE_DELAY)
# If in float state following bulk, set limit at max current
# Don't remember what this is. Looks useless
#if not is_just_float:
# if (current >= FLOAT_ONLY_CURRENT_LIMIT):
# limit_duration += 1
# if limit_duration >= FLOAT_ONLY_CURRENT_LIMIT_DURATION:
# finish(
# 'Float current limit for ' +
# str(FLOAT_ONLY_CURRENT_LIMIT_DURATION) + ' seconds')
log(interval_start)
if __name__ == '__main__':
"Handle command line arguments(options) and run charging states in order"
parser = argparse.ArgumentParser(
description='Siglent SPD3303S-based Lead Acid Charger ' + 'rev ' +
VERSION_STRING)
parser.add_argument(
'-b', '--bulk_time_limit', default=str(DEFAULT_BULK_TIME_LIMIT),
help='Bulk Charge time limit in seconds, default ' +
str(DEFAULT_BULK_TIME_LIMIT))
parser.add_argument(
'-c', '--current_limit', default=str(CURRENT_LIMIT),
help='Bulk current limit in amps, default ' +
str(CURRENT_LIMIT))
parser.add_argument(
'-d', '--debug_level', default='0', help='Debug level, default 0')
parser.add_argument(
'-f', '--float', help='Skip straight to float state',
action='store_true')
parser.add_argument(
'-i', '--itaper', default=str(ITAPER),
help='Minimum taper current for bulk=>float transistion, default ' +
str(ITAPER))
parser.add_argument(
'-m', '--max_current_rise', default=DEFAULT_MAX_CURRENT_RISE, help=
'Max allowed bulk current rise, default ' + str(DEFAULT_MAX_CURRENT_RISE))
parser.add_argument(
'-n', '--no_float', help='No float state', action='store_true')
parser.add_argument(
'-o', '--output', default=DEFAULT_LOG_PATH, help=
'Log output pathname, default ' + str(DEFAULT_LOG_PATH))
parser.add_argument(
'-p', '--max_percent_rise', default=DEFAULT_MAX_PERCENT_RISE, help=
'Max allowed bulk current rise, default ' + str(DEFAULT_MAX_PERCENT_RISE))
parser.add_argument(
'-r', '--rated_capacity', default=RATED_CAPACITY,
help='Battery rated capacity, default ' + str(RATED_CAPACITY))
parser.add_argument(
'-t', '--time_limit', default=str(DEFAULT_CHARGING_TIME_LIMIT),
help='Charging time limit in seconds, default ' +
str(DEFAULT_CHARGING_TIME_LIMIT))
parser.add_argument(
'-u', '--usb_path', default=DEFAULT_USB_PATH, help=
'USB (instrument) device pathname, default ' + str(DEFAULT_USB_PATH))
args = parser.parse_args()
debug_level = int(args.debug_level)
bulk_time_limit = int(args.bulk_time_limit)
charging_time_limit = int(args.time_limit)
max_current_rise = int(args.max_current_rise)
max_percent_rise = int(args.max_percent_rise)
if not float_equals(args.rated_capacity, RATED_CAPACITY):
rated_capacity = float(args.rated_capacity)
current_limit = rated_capacity * 0.1
itaper = rated_capacity * 0.02
if not float_equals(float(args.current_limit), CURRENT_LIMIT):
current_limit = float(args.current_limit)
if not float_equals(float(args.itaper), ITAPER):
itaper = float(args.itaper)
myprint('open: ' + args.usb_path + ' ' + args.output, True)
pwr = os.open(args.usb_path, os.O_RDWR)
out = open(args.output, 'a', 0)
myprint('SPD-3303S Programmed Lead Acid Charger', True)
myprint('USB Path: ' + args.usb_path, True)
myprint('LOG Path: ' + args.output, True)
myprint('Rated battery capacity: ' + str(rated_capacity) + ' aH', True)
myprint('Bulk current limit: ' + str(current_limit) + ' A', True)
myprint('Bulk current taper cuttoff: ' + str(itaper) + ' A', True)
myprint('Trickle current: ' + str(TRICKLE_CURRENT) + ' A', True)
myprint('Trickle threshold: ' + str(TRICKLE_THRESHOLD) + ' V', True)
myprint('Bulk charge voltage: ' + str(BULK_VOLTAGE) + ' V', True)
myprint('Float voltage: ' + str(FLOAT_VOLTAGE) + ' V', True)
if not args.float:
trickle_state()
bulk_state()
if not args.no_float:
float_state(False)
else:
float_state(True)
finish('')
|
|
import csv
import datetime
import logging
import math
import os
import re
import pandas as pd
from django.db import connections
from django.conf import settings
from md.models import (
FAILURE_TO_REMAIN_PURPOSE, INVESTIGATION_PURPOSE, PURPOSE_BY_INDEX,
PURPOSE_CHOICES, UNKNOWN_PURPOSE
)
from tsdata.sql import drop_constraints_and_indexes
from tsdata.utils import (call, download_and_unzip_data, flush_memcached,
get_csv_path, get_datafile_path, line_count)
logger = logging.getLogger(__name__)
STOP_REASON_CSV = 'md/data/STOP_REASON-normalization.csv'
PURPOSE_BY_STOP_REASON = dict()
AGENCY_MAPPING_CSV = 'md/data/MD_agencies.csv'
AGENCY_NAME_BY_CODE = dict()
TIME_OF_STOP_re = re.compile(r'(\d?\d):(\d\d)( [AP]M)?$')
DEFAULT_TIME_OF_STOP = '00:00'
GENDER_MALE_re = re.compile(r'^(M|male|MALE|m +)$')
GENDER_FEMALE_re = re.compile(r'^(F|female|w|F +)$')
SEIZED_CONTRABAND_re = re.compile(r'^(Contraband.*|paraphernalia.*|Both)$')
ETHNICITY_WHITE_re = re.compile(r'^(WHITE|W|W.)$')
ETHNICITY_BLACK_re = re.compile(r'^(BLACK|BLK)$')
ETHNICITY_TO_CODE = {
'HISPANIC': 'H',
'ASIAN': 'A',
'NATIVE AMERICAN': 'I',
'UNKNOWN': 'U',
'OTHER': 'U'
}
# Helpers for cleaning raw STOP_REASON:
# used to remove blanks and paragraph
STOP_REASON_cleanup_a_re = re.compile(r'^ *(\d\d?) *- *(\d+)\.?\d?\d?-? *[A-Za-z]?\d*[A-Za-z]? *(\(.*\))? *$') # noqa
# used to remove extraneous characters from two-digit codes
STOP_REASON_cleanup_b_re = re.compile(r'^ *(\d\d)\*?\-?`? *$')
# used to remove extraneous characters from three-digit codes
STOP_REASON_cleanup_c_re = re.compile(r'^ *(\d\d\d)($|\-|\.)')
DOB_re = re.compile(r'^(\d\d?)/(\d\d?)/(\d\d?)$')
MD_COLUMNS_TO_DROP = (
'WHATSEARCHED', 'STOPOUTCOME', 'CRIME_CHARGED',
'REGISTRATION_STATE', 'RESIDENCE_STATE', 'MD_COUNTY',
)
MD_FIRST_YEAR_TO_KEEP = 2013
def load_MD_agency_mappings():
"""
Read a CSV file that maps agency codes (as used in raw stop data) to
agency names and optional census GEOID values.
When the proper agency name hasn't been determined, it has the same value as
the agency code.
"""
AGENCY_NAME_BY_CODE.clear()
with open(AGENCY_MAPPING_CSV, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
next(reader) # skip headings
line_number = 1
for code, name, _ in reader:
line_number += 1
if code in AGENCY_NAME_BY_CODE:
raise ValueError('Line %d of %s has duplicated agency code "%s"' % (
line_number, AGENCY_MAPPING_CSV, code,
))
AGENCY_NAME_BY_CODE[code] = name
def load_STOP_REASON_normalization_rules():
"""
Read a CSV file that contains a column of STOP_REASON values for each of
the md.models.PURPOSE_CHOICES. The CSV file is from an Excel workbook
provided by SCSJ with minimal editing.
Sanity check that the headings in the CSV roughly match PURPOSE_CHOICES.
Output: Fill in global dictionary PURPOSE_BY_STOP_REASON.
"""
# expressions used to clean stop reasons as they appear in
# STOP_REASON_CSV
twodigit_code_re = re.compile(r'^(\d\d)\*? *$')
complex_code_re = re.compile(r'^(\d\d?-\d\d\d\d?)(\(|\.|-| |$)')
threedigit_code_re = re.compile(r'^(\d\d\d)$')
# match a small number of odd codes in column L ("Unknown") of STOP_REASON_CSV
known_weird_re = re.compile(r'^(06-b1b|11-140210) *$')
blank_re = re.compile(r'^ *$')
def clean_cell(s, line_number):
"""
Extract a code from a cell of the CSV, removing extraneous text.
E.g.,
"64*" => "64"
"22-412 - Seat belts required" => "22-412"
"13-106(d2)" => "13-106"
"10-309 (c)" => "13-309"
"401" => "401"
"""
m = twodigit_code_re.match(s)
if m:
return m.group(1)
m = complex_code_re.match(s)
if m:
return m.group(1)
m = threedigit_code_re.match(s)
if m:
return m.group(1)
m = known_weird_re.match(s)
if m:
# XXX One of the weird codes is lower-case in the spreadsheet
# but upper-case in the raw data.
return m.group(1).upper()
raise ValueError('Line %d of %s has bad cell value "%s"' % (
line_number, STOP_REASON_CSV, s
))
with open(STOP_REASON_CSV, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
next(reader)
headings = next(reader)
# Ensure that the headings in STOP_REASON_CSV roughly match the strings
# in PURPOSE_CHOICES. (Ignore case and extra trailing text in the CSV.)
if len(headings) != len(PURPOSE_CHOICES):
raise ValueError('PURPOSE_CHOICES out of sync with headings in %s' % STOP_REASON_CSV)
for i, heading in enumerate(headings):
if not heading.lower().startswith(PURPOSE_BY_INDEX[i].lower()):
raise ValueError('PURPOSE_CHOICES[%d] out of sync with heading in %s' % (
i, STOP_REASON_CSV
))
line_number = 2
for row in reader:
line_number += 1
for i, val in enumerate(row):
if not blank_re.match(val):
val = clean_cell(val, line_number)
# Does STOP_REASON_CSV have the same stop reason in multiple
# columns?
if val in PURPOSE_BY_STOP_REASON and PURPOSE_BY_STOP_REASON[val] != i:
raise ValueError('"%s" is in columns %d and %d' % (
val, PURPOSE_BY_STOP_REASON[val], i
))
PURPOSE_BY_STOP_REASON[val] = i
def fix_ETHNICITY(s):
if ETHNICITY_WHITE_re.match(s):
return 'W'
elif ETHNICITY_BLACK_re.match(s):
return 'B'
else:
code = ETHNICITY_TO_CODE.get(s)
if code:
return code
logger.info('Bad ethnicity: "%s"', s)
return 'U'
def fix_GENDER(s):
if GENDER_MALE_re.match(s):
return 'M'
elif GENDER_FEMALE_re.match(s):
return 'F'
else:
logger.info('Bad gender: "%s"', s)
return 'U'
def fix_SEIZED(s):
if SEIZED_CONTRABAND_re.match(s):
return 'Y'
else:
return 'N'
def fix_STOP_REASON(s):
"""
This takes a raw STOP_REASON value and clean it up and simplify
it enough to find it in STOP_REASON_CSV.
"""
m = STOP_REASON_cleanup_a_re.match(s)
if m:
return m.group(1) + '-' + m.group(2)
m = STOP_REASON_cleanup_b_re.match(s) or STOP_REASON_cleanup_c_re.match(s)
if m:
return m.group(1)
else:
return s
def purpose_from_STOP_REASON(s):
normalized = PURPOSE_BY_STOP_REASON.get(s)
if normalized is None:
if s not in ('', '-'):
logger.info('Bad STOP_REASON: "%s"', s)
normalized = UNKNOWN_PURPOSE
return normalized
def fix_TIME_OF_STOP(s):
s = s.strip()
m = TIME_OF_STOP_re.match(s)
if not m:
logger.info('Bad time of stop: "%s"', s)
return DEFAULT_TIME_OF_STOP
hour = int(m.group(1))
minute = int(m.group(2))
if not 0 <= hour < 24 or not 0 <= minute < 60:
logger.info('Bad time of stop: "%s"', s)
return DEFAULT_TIME_OF_STOP
return s
def fix_AGENCY(s):
name = AGENCY_NAME_BY_CODE.get(s)
if not name:
logger.error('Agency code "%s" not in %s', s, AGENCY_MAPPING_CSV)
name = s
return name
def compute_AGE(row):
dob = row['DOB']
stop_date = row['date']
m = DOB_re.match(dob)
if not m:
age = 0
else:
dob_year = int(m.group(3))
stop_date_year = stop_date.year
stop_date_year_of_century = stop_date_year % 100
stop_date_century = int(math.floor(stop_date_year / 100.0)) * 100
if dob_year >= stop_date_year_of_century:
dob_year += (stop_date_century - 100)
else:
dob_year += stop_date_century
dob = datetime.datetime(dob_year, int(m.group(1)), int(m.group(2)))
delta = stop_date - dob
age = int(delta.days / 365.25)
return age
def load_xls(xls_path):
logger.info('Loading {} into pandas'.format(xls_path))
xl = pd.ExcelFile(xls_path)
stops = pd.DataFrame()
columns = None
for sheet_num in range(len(xl.sheet_names)):
logger.info('Reading sheet "{}"'.format(xl.sheet_names[sheet_num]))
sheet = xl.parse(sheet_num, keep_default_na=False, na_values=[])
if sheet_num == 0:
columns = sheet.columns
else:
sheet.columns = columns
stops = stops.append(sheet, ignore_index=True)
return stops
def add_date_column(stops):
blank = pd.DataFrame({'blank': ' '}, index=range(len(stops['STOPDATE'])))
stops['date'] = pd.to_datetime(
stops['STOPDATE'].map(str) +
blank['blank'].map(str) +
stops['TIME_OF_STOP'].map(str)
)
def skip_initial_years(stops):
beginning = datetime.date(year=MD_FIRST_YEAR_TO_KEEP, month=1, day=1)
return stops.drop(stops[stops.date < beginning].index)
def process_time_of_stop(stops):
stops['TIME_OF_STOP'] = stops['TIME_OF_STOP'].apply(fix_TIME_OF_STOP)
add_date_column(stops)
return skip_initial_years(stops)
def add_age_column(stops):
stops['computed_AGE'] = stops.apply(compute_AGE, axis=1)
def add_purpose_column(stops):
load_STOP_REASON_normalization_rules()
stops['purpose'] = stops['STOP_REASON'].apply(purpose_from_STOP_REASON)
def drop_stops_by_purpose(stops):
rows_to_drop = stops[
(stops.purpose == INVESTIGATION_PURPOSE) | (stops.purpose == FAILURE_TO_REMAIN_PURPOSE)
].index
return stops.drop(rows_to_drop)
def fix_AGENCY_column(stops):
load_MD_agency_mappings()
stops['AGENCY'] = stops['AGENCY'].apply(fix_AGENCY)
def process_raw_data(stops, to_drop=MD_COLUMNS_TO_DROP):
# Drop some columns
stops.drop(list(to_drop), axis=1, inplace=True)
# Date manipulation first, to cut out some of the rows before other
# cleanup occurs
stops = process_time_of_stop(stops)
# Fix other data
stops['GENDER'] = stops['GENDER'].apply(fix_GENDER)
stops['SEIZED'] = stops['SEIZED'].apply(fix_SEIZED)
stops['ETHNICITY'] = stops['ETHNICITY'].apply(fix_ETHNICITY)
stops['STOP_REASON'] = stops['STOP_REASON'].apply(fix_STOP_REASON)
fix_AGENCY_column(stops)
# Add age, purpose, and index columns
add_age_column(stops)
add_purpose_column(stops)
stops = drop_stops_by_purpose(stops)
stops['index'] = range(1, len(stops) + 1) # adds column at end
# move the index column to the front
stops = stops[stops.columns.tolist()[-1:] + stops.columns.tolist()[:-1]]
return stops
def xls_to_csv(xls_path, csv_path):
assert not os.path.exists(csv_path)
stops = load_xls(xls_path)
stops = process_raw_data(stops)
logger.info("Converting {} > {}".format(xls_path, csv_path))
stops.to_csv(csv_path, index=False)
return stops
def run(url, destination=None, download=True):
"""Download MD data, extract, convert to CSV, and load into PostgreSQL"""
logger.info('*** MD Data Import Started ***')
destination = download_and_unzip_data(url, destination)
# Convert to CSV
xls_path = get_datafile_path(url, destination)
csv_path = get_csv_path(url, destination)
if not os.path.exists(csv_path):
xls_to_csv(xls_path, csv_path)
else:
logger.info("{} exists, skipping XLS->CSV conversion".format(csv_path))
csv_count = line_count(csv_path)
logger.debug('Rows: {}'.format(csv_count))
# drop constraints/indexes
drop_constraints_and_indexes(connections['traffic_stops_md'].cursor())
# use COPY to load CSV files as quickly as possible
copy_from(csv_path)
# Clear the query cache
flush_memcached()
def copy_from(csv_path):
"""Execute copy.sql to COPY csv data files into PostgreSQL database"""
sql_file = os.path.join(os.path.dirname(__file__), 'copy.sql')
md_csv_path = os.path.join(
os.path.dirname(__file__),
os.path.basename(AGENCY_MAPPING_CSV)
)
cmd = ['psql',
'-v', 'data_file={}'.format(csv_path),
'-v', 'md_time_zone={}'.format(settings.MD_TIME_ZONE),
'-v', 'md_csv_table={}'.format(md_csv_path),
'-f', sql_file,
settings.DATABASES['traffic_stops_md']['NAME']]
if settings.DATABASE_ETL_USER:
cmd.append(settings.DATABASE_ETL_USER)
call(cmd)
# https://gist.github.com/mangecoeur/1fbd63d4758c2ba0c470
|
|
# -*- coding: utf-8 -*-
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
import waffle
from django_statsd.clients import statsd
from post_request_task.task import (
_discard_tasks,
_start_queuing_tasks,
_send_tasks_and_stop_queuing,
_stop_queuing_tasks,
)
import olympia.core.logger
from olympia import amo
from olympia.amo.decorators import use_primary_db
from olympia.files.utils import lock
from olympia.lib.crypto.signing import SigningError
from olympia.reviewers.models import (
AutoApprovalNotEnoughFilesError,
AutoApprovalNoValidationResultError,
AutoApprovalSummary,
clear_reviewing_cache,
set_reviewing_cache,
)
from olympia.reviewers.utils import ReviewHelper
from olympia.scanners.models import ScannerResult
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.reviewers.auto_approve')
LOCK_NAME = 'auto-approve' # Name of the lock() used.
class Command(BaseCommand):
help = 'Auto-approve add-on versions based on predefined criteria'
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Fetch version candidates and perform all checks but do not '
'actually approve anything.',
)
def fetch_candidates(self):
"""Return a queryset with the Version instances that should be
considered for auto approval."""
return (
Version.objects.auto_approvable()
.order_by('nomination', 'created')
.distinct()
)
@use_primary_db
def handle(self, *args, **options):
"""Command entry point."""
self.dry_run = options.get('dry_run', False)
self.successful_verdict = (
amo.WOULD_HAVE_BEEN_AUTO_APPROVED if self.dry_run else amo.AUTO_APPROVED
)
self.stats = Counter()
# Get a lock before doing anything, we don't want to have multiple
# instances of the command running in parallel.
with lock(settings.TMP_PATH, LOCK_NAME) as lock_attained:
if lock_attained:
qs = self.fetch_candidates()
self.stats['total'] = len(qs)
for version in qs:
self.process(version)
self.log_final_summary(self.stats)
else:
# We didn't get the lock...
log.error('auto-approve lock present, aborting.')
def process(self, version):
"""Process a single version, figuring out if it should be auto-approved
and calling the approval code if necessary."""
already_locked = AutoApprovalSummary.check_is_locked(version)
if not already_locked:
# Lock the addon for ourselves if possible. Even though
# AutoApprovalSummary.create_summary_for_version() will do
# call check_is_locked() again later when calculating the verdict,
# we have to do it now to prevent overwriting an existing lock with
# our own.
set_reviewing_cache(version.addon.pk, settings.TASK_USER_ID)
# Discard any existing celery tasks that may have been queued before:
# If there are any left at this point, it means the transaction from
# the previous loop iteration was not committed and we shouldn't
# trigger the corresponding tasks.
_discard_tasks()
# Queue celery tasks for this version, avoiding triggering them too
# soon...
_start_queuing_tasks()
try:
with transaction.atomic():
# ...and release the queued tasks to celery once transaction
# is committed.
transaction.on_commit(_send_tasks_and_stop_queuing)
log.info(
'Processing %s version %s...',
str(version.addon.name),
str(version.version),
)
if waffle.switch_is_active('run-action-in-auto-approve'):
# We want to execute `run_action()` only once.
summary_exists = AutoApprovalSummary.objects.filter(
version=version
).exists()
if summary_exists:
log.info(
'Not running run_action() because it has '
'already been executed'
)
else:
ScannerResult.run_action(version)
summary, info = AutoApprovalSummary.create_summary_for_version(
version, dry_run=self.dry_run
)
self.stats.update({k: int(v) for k, v in info.items()})
if summary.verdict == self.successful_verdict:
if summary.verdict == amo.AUTO_APPROVED:
self.approve(version)
self.stats['auto_approved'] += 1
verdict_string = summary.get_verdict_display()
else:
verdict_string = '%s (%s)' % (
summary.get_verdict_display(),
', '.join(summary.verdict_info_prettifier(info)),
)
log.info(
'Auto Approval for %s version %s: %s',
str(version.addon.name),
str(version.version),
verdict_string,
)
# At this point, any exception should have rolled back the transaction,
# so even if we did create/update an AutoApprovalSummary instance that
# should have been rolled back. This ensures that, for instance, a
# signing error doesn't leave the version and its autoapprovalsummary
# in conflicting states.
except (AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError):
log.info(
'Version %s was skipped either because it had no '
'files or because it had no validation attached.',
version,
)
self.stats['error'] += 1
except SigningError:
statsd.incr('reviewers.auto_approve.approve.failure')
log.info('Version %s was skipped because of a signing error', version)
self.stats['error'] += 1
finally:
# Always clear our own lock no matter what happens (but only ours).
if not already_locked:
clear_reviewing_cache(version.addon.pk)
# Stop post request task queue before moving on (useful in tests to
# leave a fresh state for the next test. Note that we don't want to
# send or clear queued tasks (they may belong to a transaction that
# has been rolled back, or they may not have been processed by the
# on commit handler yet).
_stop_queuing_tasks()
@statsd.timer('reviewers.auto_approve.approve')
def approve(self, version):
"""Do the approval itself, caling ReviewHelper to change the status,
sign the files, send the e-mail, etc."""
# Note: this should automatically use the TASK_USER_ID user.
helper = ReviewHelper(addon=version.addon, version=version)
if version.channel == amo.RELEASE_CHANNEL_LISTED:
helper.handler.data = {
# The comment is not translated on purpose, to behave like
# regular human approval does.
'comments': 'This version has been screened and approved for the '
'public. Keep in mind that other reviewers may look into '
'this version in the future and determine that it '
'requires changes or should be taken down.'
'\r\n\r\nThank you!'
}
else:
helper.handler.data = {'comments': 'automatic validation'}
helper.handler.approve_latest_version()
statsd.incr('reviewers.auto_approve.approve.success')
def log_final_summary(self, stats):
"""Log a summary of what happened."""
log.info('There were %d webextensions add-ons in the queue.', stats['total'])
if stats['error']:
log.info(
'%d versions were skipped because they had no files or had '
'no validation attached to their files, or signing failed on '
'their files.',
stats['error'],
)
if self.dry_run:
log.info(
'%d versions were marked as would have been approved.',
stats['auto_approved'],
)
else:
log.info('%d versions were approved.', stats['auto_approved'])
|
|
"""
DESCRIPTORS.TXTGREY: textural descriptors from grey-scale images.
@author: vlad
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
__version__ = 0.05
__author__ = 'Vlad Popovici'
__all__ = ['GaborDescriptor', 'LBPDescriptor', 'GLCMDescriptor', 'HOGDescriptor',
'HistDescriptor', 'HaarLikeDescriptor', 'MFSDescriptor', 'StatsDescriptor']
# from abc import ABCMeta, abstractmethod
import numpy as np
from numpy import dot
from scipy.stats.mstats import kurtosis, skew
from matplotlib.cbook import flatten
from future.utils import bytes_to_native_str as nstr
from scipy import ndimage as nd
from scipy.linalg import norm
from scipy.stats import entropy
from scipy.signal import convolve2d
from skimage.filters import gabor_kernel
from skimage.util import img_as_float
from skimage.feature.texture import greycoprops, greycomatrix, \
local_binary_pattern
from skimage.exposure import rescale_intensity
from skimage.feature import hog
# from skimage.transform import integral_image
from .basic import *
class GaborDescriptor(LocalDescriptor):
"""
Computes Gabor descriptors from an image. These descriptors are the means
and variances of the filter responses obtained by convolving an image with
a bank of Gabor filters.
"""
name = nstr(b'gabor')
def __init__(self, theta=np.array([0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0],
dtype=np.double),
freq=np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double),
sigma=np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double),
normalized=True):
"""
Initialize the Gabor kernels (only real part).
Args:
theta: numpy.ndarray (vector)
Contains the orientations of the filter; defaults to [0, pi/4, pi/2, 3*pi/4].
freq: numpy.ndarray (vector)
The frequencies of the Gabor filter; defaults to [3/4, 3/8, 3/16].
sigma: numpy.ndarray (vector)
The sigma parameter for the Gaussian smoothing filter; defaults to [1, 2*sqrt(2)].
normalized: bool
If true, the kernels are normalized
"""
self.kernels_ = [np.real(gabor_kernel(frequency=f, theta=t, sigma_x=s,
sigma_y=s))
for f in freq for s in sigma for t in theta]
if normalized:
for k, krn in enumerate(self.kernels_):
self.kernels_[k] = krn / np.sqrt((krn ** 2).sum())
return
def compute(self, image):
"""
Compute the Gabor descriptors on the given image.
Args:
image: numpy.ndarray (.ndim=2)
Grey scale image.
Returns:
numpy.ndarray (vector) containing the Gabor descriptors (means followed
by the variances of the filter responses)
"""
try:
image = img_as_float(image)
nk = len(self.kernels_)
ft = np.zeros(2 * nk, dtype=np.double)
for k, krn in enumerate(self.kernels_):
flt = nd.convolve(image, krn, mode='wrap')
ft[k] = flt.mean()
ft[k + nk] = flt.var()
except:
print("Error in GaborDescriptor.compute()")
return ft
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Compute the distance between two sets of Gabor features. Possible distance types
are:
-euclidean
-cosine distance: this is not a proper distance!
"""
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm:
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
## end class GaborDescriptors
class GLCMDescriptor(LocalDescriptor):
"""
Grey Level Co-occurrence Matrix: the image is decomposed into a number of
non-overlapping regions, and the GLCM features are computed on each of these
regions.
"""
name = nstr(b'glcm')
def __init__(self, wsize, dist=0.0, theta=0.0, levels=256, which=None,
symmetric=True, normed=True):
"""
Initialize GLCM.
Args:
wsize: uint
window size: the image is decomposed into small non-overlapping regions of size
<wsize x wsize> from which the GLCMs are computed. If the last region in a row or
the last row in an image are smaller than the required size, then they are not
used in computing the features.
dist: uint
pair distance
theta: float
pair angle
levels: uint
number of grey levels
which: string
which features to be computed from the GLCM. See the help for
skimage.feature.texture.greycoprops for details
symmetric: bool
consider symmetric pairs?
normed: bool
normalize the co-occurrence matrix, before computing the features?
"""
self.wsize_ = wsize
self.dist_ = dist
self.theta_ = theta
self.levels_ = levels
if which is None:
which = ['dissimilarity', 'correlation']
self.which_feats_ = [w.lower() for w in which]
self.symmetric_ = symmetric
self.normed_ = normed
return
def compute(self, image):
"""
Compute the GLCM features.
"""
assert (image.ndim == 2)
w, h = image.shape
nw = int(w / self.wsize_)
nh = int(h / self.wsize_)
nf = len(self.which_feats_)
ft = np.zeros((nf, nw * nh)) # features will be on rows
k = 0
for x in np.arange(0, nw):
for y in np.arange(0, nh):
x0, y0 = x * self.wsize_, y * self.wsize_
x1, y1 = x0 + self.wsize_, y0 + self.wsize_
glcm = greycomatrix(image[y0:y1, x0:x1],
self.dist_, self.theta_, self.levels_,
self.symmetric_, self.normed_)
ft[:, k] = np.array([greycoprops(glcm, f)[0, 0] for f in self.which_feats_])
k += 1
res = {}
k = 0
for f in self.which_feats_:
res[f] = ft[k, :]
k += 1
return res
@staticmethod
def dist(ft1, ft2, method='bh'):
"""
Computes the distance between two sets of GLCM features. The features are
assumed to have been computed using the same parameters. The distance is
based on comparing the distributions of these features.
Args:
ft1, ft2: dict
each dictionary contains for each feature a vector of values computed
from the images
method: string
the method used for computing the distance between the histograms of features:
'kl' - Kullback-Leibler divergence (symmetrized by 0.5*(KL(p,q)+KL(q,p))
'js' - Jensen-Shannon divergence: 0.5*(KL(p,m)+KL(q,m)) where m=(p+q)/2
'bh' - Bhattacharyya distance: -log(sqrt(sum_i (p_i*q_i)))
'ma' - Matusita distance: sqrt(sum_i (sqrt(p_i)-sqrt(q_i))**2)
Returns:
dict
a dictionary with distances computed between pairs of features
"""
# distance methods
dm = {'kl': lambda x_, y_: 0.5 * (entropy(x_, y_) + entropy(y_, x_)),
'js': lambda x_, y_: 0.5 * (entropy(x_, 0.5 * (x_ + y_)) + entropy(y_, 0.5 * (x_ + y_))),
'bh': lambda x_, y_: -np.log(np.sum(np.sqrt(x_ * y_))),
'ma': lambda x_, y_: np.sqrt(np.sum((np.sqrt(x_) - np.sqrt(y_)) ** 2))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
res = {}
for k in ft1.keys():
if k in ft2.keys():
# build the histograms:
mn = min(ft1[k].min(), ft2[k].min())
mx = max(ft1[k].max(), ft2[k].max())
h1, _ = np.histogram(ft1[k], normed=True, bins=10, range=(mn, mx))
h2, _ = np.histogram(ft2[k], normed=True, bins=10, range=(mn, mx))
res[k] = dm[method](h1, h2)
return res
# end class GLCMDescriptors
class LBPDescriptor(LocalDescriptor):
"""
Local Binary Pattern for texture description. A LBP descriptor set is a
histogram of LBPs computed from the image.
"""
name = nstr(b'lbp')
def __init__(self, radius=3, npoints=None, method='uniform'):
"""
Initialize a LBP descriptor set. See skimage.feature.texture.local_binary_pattern
for details on the meaning of parameters.
Args:
radius: int
defaults to 3
npoints: int
defaults to None. If None, npoints is set to 8*radius
method: string
defaults to 'uniform'. Could be 'uniform', 'ror', 'var', 'nri_uniform'
"""
self.radius_ = radius
self.npoints_ = radius * 8 if npoints is None else npoints
self.method_ = method.lower()
self.nhbins_ = self.npoints_ + 2
return
def compute(self, image):
"""
Compute the LBP features. These features are returned as histograms of
LBPs.
"""
try:
lbp = local_binary_pattern(image, self.npoints_, self.radius_, self.method_)
hist, _ = np.histogram(lbp, normed=True, bins=self.nhbins_, range=(0, self.nhbins_))
except:
print("Error in LBPDescriptor.compute()")
return hist
@staticmethod
def dist(ft1, ft2, method='bh'):
"""
Computes the distance between two sets of LBP features. The features are
assumed to have been computed using the same parameters. The features
are represented as histograms of LBPs.
Args:
ft1, ft2: numpy.ndarray (vector)
histograms of LBPs as returned by compute()
method: string
the method used for computing the distance between the two sets of features:
'kl' - Kullback-Leibler divergence (symmetrized by 0.5*(KL(p,q)+KL(q,p))
'js' - Jensen-Shannon divergence: 0.5*(KL(p,m)+KL(q,m)) where m=(p+q)/2
'bh' - Bhattacharyya distance: -log(sqrt(sum_i (p_i*q_i)))
'ma' - Matusita distance: sqrt(sum_i (sqrt(p_i)-sqrt(q_i))**2)
"""
# distance methods
dm = {'kl': lambda x_, y_: 0.5 * (entropy(x_, y_) + entropy(y_, x_)),
'js': lambda x_, y_: 0.5 * (entropy(x_, 0.5 * (x_ + y_)) + entropy(y_, 0.5 * (x_ + y_))),
'bh': lambda x_, y_: -np.log(np.sum(np.sqrt(x_ * y_))),
'ma': lambda x_, y_: np.sqrt(np.sum((np.sqrt(x_) - np.sqrt(y_)) ** 2))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end class LBPDescriptors
# MFSDescriptors - Multi-Fractal Dimensions
class MFSDescriptor(LocalDescriptor):
"""
Multi-Fractal Dimensions for texture description.
Adapted from IMFRACTAL project at https://github.com/rbaravalle/imfractal
"""
name = nstr(b'fract')
def __init__(self, _nlevels_avg=1, _wsize=15, _niter=1):
"""
Initialize an MFDDescriptors object.
Arguments:
_nlevels_avg: number of levels to be averaged in density computation (uint)
=1: no averaging
_wsize: size of the window for computing descriptors (uint)
_niter: number of iterations
"""
self.nlevels_avg = _nlevels_avg
self.wsize = _wsize
self.niter = _niter
return
def compute(self, im):
"""
Computes MFS over the given image.
Arguments:
im: image (grey-scale) (numpy.ndarray)
Returns:
a vector of descriptors (numpy.array)
"""
## TODO: this needs much polishing to get it run faster!
assert (im.ndim == 2)
# Using [0..255] to denote the intensity profile of the image
grayscale_box = [0, 255]
# Preprocessing: default intensity value of image ranges from 0 to 255
if abs(im).max() < 1:
im = rescale_intensity(im, out_range=(0, 255))
#######################
### Estimating density function of the image
### by solving least squares for D in the equation
### log10(bw) = D*log10(c) + b
r = 1.0 / max(im.shape)
c = np.log10(r * np.arange(start=1, stop=self.nlevels_avg + 1))
bw = np.zeros((self.nlevels_avg, im.shape[0], im.shape[1]), dtype=np.float32)
bw[0, :, :] = im + 1
def _gauss_krn(size):
""" Returns a normalized 2D gauss kernel array for convolutions """
if size <= 3:
sigma = 1.5
else:
sigma = size / 2.0
y, x = np.mgrid[-(size - 1.0) / 2.0:(size - 1.0) / 2.0 + 1, -(size - 1.0) / 2.0:(size - 1.0) / 2.0 + 1]
s2 = 2.0 * sigma ** 2
g = np.exp(-(x ** 2 + y ** 2) / s2)
return g / g.sum()
k = 1
if self.nlevels_avg > 1:
bw[1, :, :] = convolve2d(bw[0, :, :], _gauss_krn(k + 1), mode="full")[1:, 1:] * ((k + 1) ** 2)
for k in np.arange(2, self.nlevels_avg):
temp = convolve2d(bw[0, :, :], _gauss_krn(k + 1), mode="full") * ((k + 1) ** 2)
if k == 4:
bw[k] = temp[k - 1 - 1:temp.shape[0] - (k / 2), k - 1 - 1:temp.shape[1] - (k / 2)]
else:
bw[k] = temp[k - 1:temp.shape[0] - (1), k - 1:temp.shape[1] - (1)]
bw = np.log10(bw)
n1 = np.sum(c ** 2)
n2 = bw[0] * c[0]
for k in np.arange(1, self.nlevels_avg):
n2 += bw[k] * c[k]
sum3 = np.sum(bw, axis=0)
if self.nlevels_avg > 1:
D = (n2 * self.nlevels_avg - c.sum() * sum3) / (n1 * self.nlevels_avg - c.sum() ** 2)
min_D, max_D = 1.0, 4.0
D = grayscale_box[1] * (D - min_D) / (max_D - min_D) + grayscale_box[0]
else:
D = im
D = D[self.nlevels_avg - 1:D.shape[0] - self.nlevels_avg + 1,
self.nlevels_avg - 1:D.shape[1] - self.nlevels_avg + 1]
IM = np.zeros(D.shape)
gap = np.ceil((grayscale_box[1] - grayscale_box[0]) / np.float32(self.wsize))
center = np.zeros(self.wsize)
for k in np.arange(1, self.wsize + 1):
bin_min = (k - 1) * gap
bin_max = k * gap - 1
center[k - 1] = round((bin_min + bin_max) / 2.0)
D = ((D <= bin_max) & (D >= bin_min)).choose(D, center[k - 1])
D = ((D >= bin_max)).choose(D, 0)
D = ((D < 0)).choose(D, 0)
IM = D
# Constructing the filter for approximating log fitting
r = max(IM.shape)
c = np.zeros(self.niter)
c[0] = 1;
for k in range(1, self.niter):
c[k] = c[k - 1] / (k + 1)
c = c / sum(c);
# Construct level sets
Idx_IM = np.zeros(IM.shape);
for k in range(0, self.wsize):
IM = (IM == center[k]).choose(IM, k + 1)
Idx_IM = IM
IM = np.zeros(IM.shape)
# Estimate MFS by box-counting
num = np.zeros(self.niter)
MFS = np.zeros(self.wsize)
for k in range(1, self.wsize + 1):
IM = np.zeros(IM.shape)
IM = (Idx_IM == k).choose(Idx_IM, 255 + k)
IM = (IM < 255 + k).choose(IM, 0)
IM = (IM > 0).choose(IM, 1)
temp = max(IM.sum(), 1)
num[0] = np.log10(temp) / np.log10(r);
for j in range(2, self.niter + 1):
mask = np.ones((j, j))
bw = convolve2d(IM, mask, mode="full")[1:, 1:]
indx = np.arange(0, IM.shape[0], j)
indy = np.arange(0, IM.shape[1], j)
bw = bw[np.ix_(indx, indy)]
idx = (bw > 0).sum()
temp = max(idx, 1)
num[j - 1] = np.log10(temp) / np.log10(r / j)
MFS[k - 1] = sum(c * num)
return MFS
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Compute the distance between two sets of multifractal dimension features.
Possible distance types are:
-Euclidean
-cosine distance: this is not a proper distance!
"""
assert (ft1.ndim == ft2.ndim == 1)
assert (ft1.size == ft2.size)
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end class MFSDescriptors
class HOGDescriptor(LocalDescriptor):
"""
Provides local descriptors in terms of histograms of oriented gradients.
"""
name = nstr(b'hog')
def __init__(self, _norient=9, _ppc=(128, 128), _cpb=(4, 4)):
"""
Initialize an HOGDescriptors object. For details see the HOG
descriptor in sciki-image package:
skimage.feature.hog
:param _norient: uint
number of orientations of the gradients
:param _ppc: uint
pixels per cell
:param _cpb: uint
cells per block
"""
self.norient = _norient
self.ppc = _ppc
self.cpb = _cpb
return
def compute(self, image):
"""
Computes HOG on a given image.
:param image: numpy.ndarray
:return: numpy.ndarray
a vector of features
"""
r = hog(image, pixels_per_cell=self.ppc, cells_per_block=self.cpb,
visualise=False, normalise=False)
return r
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Compute the distance between two sets of HOG features. Possible distance types
are:
-Euclidean
-cosine distance: this is not a proper distance!
"""
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end HOGDescriptors
class HistDescriptor(LocalDescriptor):
"""
Provides local descriptors in terms of histograms of grey levels.
"""
name = nstr(b'hist')
def __init__(self, _interval=(0, 1), _nbins=10):
"""
Initialize an HistDescriptors object: a simple histogram of
grey-levels
:param _interval: tuple
the minimum and maximum values to be accounted for
:param _nbins: uint
number of bins in the histogram
"""
self.interval = _interval
self.nbins = _nbins
return
def compute(self, image):
"""
Computes the histogram on a given image.
:param image: numpy.ndarray
:return: numpy.ndarray
a vector of frequencies
"""
if image.ndim != 2:
raise ValueError("Only grey-level images are supported")
h, _ = np.histogram(image, normed=True, bins=self.nbins, range=self.interval)
return h
@staticmethod
def dist(ft1, ft2, method='bh'):
"""
Computes the distance between two sets of histogram features.
Args:
ft1, ft2: numpy.ndarray (vector)
histograms as returned by compute()
method: string
the method used for computing the distance between the two sets of features:
'kl' - Kullback-Leibler divergence (symmetrized by 0.5*(KL(p,q)+KL(q,p))
'js' - Jensen-Shannon divergence: 0.5*(KL(p,m)+KL(q,m)) where m=(p+q)/2
'bh' - Bhattacharyya distance: -log(sqrt(sum_i (p_i*q_i)))
'ma' - Matusita distance: sqrt(sum_i (sqrt(p_i)-sqrt(q_i))**2)
"""
# distance methods
dm = {'kl': lambda x_, y_: 0.5 * (entropy(x_, y_) + entropy(y_, x_)),
'js': lambda x_, y_: 0.5 * (entropy(x_, 0.5 * (x_ + y_)) + entropy(y_, 0.5 * (x_ + y_))),
'bh': lambda x_, y_: -np.log(np.sum(np.sqrt(x_ * y_))),
'ma': lambda x_, y_: np.sqrt(np.sum((np.sqrt(x_) - np.sqrt(y_)) ** 2))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end HistDescriptors
# Haar-like descriptors
class HaarLikeDescriptor(LocalDescriptor):
"""
Provides local descriptors in terms of respones to a series of Haar-like
features [1]_.
The coding is inspired by HaarLikeFeature class from SimpleCV (www.simplecv.org).
.. [1] http://en.wikipedia.org/wiki/Haar-like_features
"""
name = nstr(b'haar')
def __init__(self, _haars, _norm=True):
"""
Initialize an HaarLikeDescriptors object.
:param _haars: list
a list of feature descriptors. A feature descriptor is a list of points (row, column) in a normalized
coordinate system ((0,0) -> (1,1)) describing the "positive" (black) patches from a Haar-like
feature. All the patches not specified in this list are considered "negative" (white).
The value corresponding to such a feature is the (weighted) sum of pixel intensities covered by
"positive" patches from which the (weighted) sum of pixel intensities covered by "negative" patches
is subtracted.
See some examples at:
- http://www.codeproject.com/Articles/27125/Ultra-Rapid-Object-Detection-in-Computer-Vision-Ap
- http://en.wikipedia.org/wiki/Haar-like_features
Examples of Haar-like features coding:
- a Haar-like feature in which the left side is "positive" (*) and the right side "negative" (.):
+-------+-------+
|*******|.......|
|*******|.......|
|*******|.......|
|*******|.......|
|*******|.......|
|*******|.......|
+-------+-------+
The corresponding coding is: [[(0.0, 0.0), (0.5, 1.0)]].
- a Haar-like feature with diagonal "positive" (*) patches:
+-------+-------+
|*******|.......|
|*******|.......|
|*******|.......|
+-------+-------+
|.......|*******|
|.......|*******|
|.......|*******|
+-------+-------+
The corresponding coding is: [[(0.0, 0.0), (0.5, 0.5)], [(0.5, 0.5), (1.0, 1.0)]].
:param _norm: boolean
Should the features be normalized? (scale-independent?) Default: True
"""
self.haars = _haars
self.nfeats = len(_haars)
self.norm = _norm
# Check that all coordinates are between 0 and 1:
if any([_p < 0.0 or _p > 1.0 for _p in flatten(_haars)]):
raise ValueError("Improper Haar feature specification.")
return
def compute(self, image):
"""
Computes the Haar-like descriptors on an INTEGRAL image.
:param image: numpy.ndarray
This must be the integral image, as computed by skimage.transform.integral_image(),
for example. This format does not contain the first row and column of 0s.
:param _norm: boolean
If True, the features are normalized by half the number of pixels in the image.
:return: numpy.ndarray
a vector of feature values (one per Haar-like feature)
"""
if image.ndim != 2:
raise ValueError("Only grey-level images are supported")
h, w = image.shape
h -= 1
w -= 1
nrm_fact = h * w if self.norm else 1.0
f = np.zeros(self.nfeats, dtype=np.float)
i = 0
S0 = image[h, w] + image[0, 0] - image[h, 0] - image[0, w] # integral over the image
for hr in self.haars: # for each Haar-like feature
S = 0L # will contain the sum of positive patches in the feature
for p in hr: # for each patch in the current feature
a, b = p # coords of the corners of the patch
row_a = np.int(np.floor(p[0][0] * h))
col_a = np.int(np.floor(p[0][1] * w))
row_b = np.int(np.floor(p[1][0] * h))
col_b = np.int(np.floor(p[1][1] * w))
S += image[row_b, col_b] + image[row_a, col_a] - image[row_b, col_a] - image[row_a, col_b]
# The final value of the Haar-like feature is the sum of positive patches minus
# the sum of negative patches. Since everything that is not specified as positive
# patch is considered negative, the sum of the negative patches is the total sum
# in the image (corner bottom-right in the integral image) minus the sum of positive
# ones. Hence, the value of the Haar-like feature is 2*S - S0
f[i] = (2.0 * S - S0) / nrm_fact
i += 1
return f
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Computes the distance between two Haar-like feature vectors.
:param ft1: a vector of features
:type ft1: numpy.array (1xn)
:param ft2: a vector of features
:type ft2: numpy.array (1xn)
:param method: the method for computing the distance
:type method: string
:return: a distance
:rtype: float
"""
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
@staticmethod
def haars1():
"""
Generates a list of Haar-like feature specifications.
:return:
:rtype:
"""
h = [
[[(0.0, 0.0), (0.5, 0.5)], [(0.5, 0.5), (1.0, 1.0)]], # diagonal blocks
[[(0.0, 0.0), (1.0, 0.5)]], # vertical edge
[[(0.0, 0.0), (0.5, 1.0)]], # horizontal edge
[[(0.0, 0.33), (1.0, 0.67)]], # vertical central band
[[(0.33, 0.0), (0.67, 1.0)]], # horizontal central band
[[(0.25, 0.25), (0.75, 0.75)]]
]
return h
# end HaarLikeDescriptor
# Summary statistics descriptor
class StatsDescriptor(LocalDescriptor):
"""
A very simple local descriptor based on the first moments
statistics.
"""
name = nstr(b'stats')
def __init__(self, stats=None):
self._statsfn = {
'mean': lambda x_: x_.mean(),
'std': lambda x_: x_.std(),
'kurtosis': lambda x_: kurtosis(x_, axis=None, fisher=True),
'skewness': lambda x_: skew(x_, axis=None, bias=True)
}
if stats is None:
self.stats = ['mean', 'std']
else:
self.stats = [s.lower() for s in stats]
for s in self.stats:
if s not in self._statsfn:
raise ValueError('Unknown summary statistic')
def compute(self, image):
return np.array([self._statsfn[s](image) for s in self.stats])
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Computes the distance between two Stats feature vectors.
:param ft1: a vector of features
:type ft1: numpy.array (1xn)
:param ft2: a vector of features
:type ft2: numpy.array (1xn)
:param method: the method for computing the distance
:type method: string
:return: a distance
:rtype: float
"""
return norm(ft1 - ft2)
# end StatsDescriptor
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Provides tests related with allowed TLS protocol version restrictions.
"""
import os
import ssl
import sys
import re
import time
from subprocess import Popen, PIPE
from qpid_dispatch.management.client import Node
from system_test import TestCase, main_module, Qdrouterd, DIR, SkipIfNeeded
from system_test import unittest
from proton import SASL, Url, SSLDomain, SSLUnavailable
from proton.utils import BlockingConnection
from distutils.version import StrictVersion
import proton
import cproton
class RouterTestSslBase(TestCase):
"""
Base class to help with SSL related testing.
"""
# If unable to determine which protocol versions are allowed system wide
DISABLE_SSL_TESTING = False
DISABLE_REASON = "Unable to determine MinProtocol"
@staticmethod
def ssl_file(name):
"""
Returns fully qualified ssl certificate file name
:param name:
:return:
"""
return os.path.join(DIR, 'ssl_certs', name)
@classmethod
def create_sasl_files(cls):
"""
Creates the SASL DB
:return:
"""
# Create a sasl database.
pipe = Popen(['saslpasswd2', '-c', '-p', '-f', 'qdrouterd.sasldb',
'-u', 'domain.com', 'test'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
result = pipe.communicate('password')
assert pipe.returncode == 0, \
"saslpasswd2 exit status %s, output:\n%s" % (pipe.returncode, result)
# Create a SASL configuration file.
with open('tests-mech-PLAIN.conf', 'w') as sasl_conf:
sasl_conf.write("""
pwcheck_method: auxprop
auxprop_plugin: sasldb
sasldb_path: qdrouterd.sasldb
mech_list: ANONYMOUS DIGEST-MD5 EXTERNAL PLAIN
# The following line stops spurious 'sql_select option missing' errors when cyrus-sql-sasl plugin is installed
sql_select: dummy select
""")
class RouterTestSslClient(RouterTestSslBase):
"""
Starts a router with multiple listeners, all of them using an sslProfile.
Then it runs multiple tests to validate that only the allowed protocol versions
are being accepted through the related listener.
"""
# Listener ports for each TLS protocol definition
PORT_TLS1 = 0
PORT_TLS11 = 0
PORT_TLS12 = 0
PORT_TLS13 = 0
PORT_TLS1_TLS11 = 0
PORT_TLS1_TLS12 = 0
PORT_TLS11_TLS12 = 0
PORT_TLS_ALL = 0
PORT_TLS_SASL = 0
PORT_SSL3 = 0
TIMEOUT = 3
# If using OpenSSL 1.1 or greater, TLSv1.2 is always being allowed
OPENSSL_OUT_VER = None
try:
OPENSSL_VER_1_1_GT = ssl.OPENSSL_VERSION_INFO[:2] >= (1, 1)
except AttributeError:
OPENSSL_VER_1_1_GT = False
# If still False, try getting it from "openssl version" (command output)
# The version from ssl.OPENSSL_VERSION_INFO reflects OpenSSL version in which
# Python was compiled with, not the one installed in the system.
if not OPENSSL_VER_1_1_GT:
print("Python libraries SSL Version < 1.1")
try:
p = Popen(['openssl', 'version'], stdout=PIPE, universal_newlines=True)
openssl_out = p.communicate()[0]
m = re.search(r'[0-9]+\.[0-9]+\.[0-9]+', openssl_out)
OPENSSL_OUT_VER = m.group(0)
OPENSSL_VER_1_1_GT = StrictVersion(OPENSSL_OUT_VER) >= StrictVersion('1.1')
print("OpenSSL Version found = %s" % OPENSSL_OUT_VER)
except:
pass
# Following variables define TLS versions allowed by openssl
OPENSSL_MIN_VER = 0
OPENSSL_MAX_VER = 9999
OPENSSL_ALLOW_TLSV1 = True
OPENSSL_ALLOW_TLSV1_1 = True
OPENSSL_ALLOW_TLSV1_2 = True
OPENSSL_ALLOW_TLSV1_3 = False
# Test if OpenSSL has TLSv1_3
OPENSSL_HAS_TLSV1_3 = False
if OPENSSL_VER_1_1_GT:
try:
ssl.TLSVersion.TLSv1_3
OPENSSL_HAS_TLSV1_3 = True
except:
pass
# Test if Proton supports TLSv1_3
try:
dummydomain = SSLDomain(SSLDomain.MODE_CLIENT)
PROTON_HAS_TLSV1_3 = cproton.PN_OK == cproton.pn_ssl_domain_set_protocols(dummydomain._domain, "TLSv1.3")
print("TLSV1_3? Proton has: %s, OpenSSL has: %s" % (PROTON_HAS_TLSV1_3, OPENSSL_HAS_TLSV1_3))
except SSLUnavailable:
PROTON_HAS_TLSV1_3 = False
# When using OpenSSL >= 1.1 and python >= 3.7, we can retrieve OpenSSL min and max protocols
if OPENSSL_VER_1_1_GT:
if sys.version_info >= (3, 7):
if OPENSSL_HAS_TLSV1_3 and not PROTON_HAS_TLSV1_3:
# If OpenSSL has 1.3 but proton won't let us turn it on and off then
# this test fails because v1.3 runs unexpectedly.
RouterTestSslBase.DISABLE_SSL_TESTING = True
RouterTestSslBase.DISABLE_REASON = "Proton version does not support TLSv1.3 but OpenSSL does"
else:
OPENSSL_CTX = ssl.create_default_context()
OPENSSL_MIN_VER = OPENSSL_CTX.minimum_version
OPENSSL_MAX_VER = OPENSSL_CTX.maximum_version if OPENSSL_CTX.maximum_version > 0 else 9999
OPENSSL_ALLOW_TLSV1 = OPENSSL_MIN_VER <= ssl.TLSVersion.TLSv1 <= OPENSSL_MAX_VER
OPENSSL_ALLOW_TLSV1_1 = OPENSSL_MIN_VER <= ssl.TLSVersion.TLSv1_1 <= OPENSSL_MAX_VER
OPENSSL_ALLOW_TLSV1_2 = OPENSSL_MIN_VER <= ssl.TLSVersion.TLSv1_2 <= OPENSSL_MAX_VER
OPENSSL_ALLOW_TLSV1_3 = OPENSSL_HAS_TLSV1_3 and PROTON_HAS_TLSV1_3 \
and OPENSSL_MIN_VER <= ssl.TLSVersion.TLSv1_3 <= OPENSSL_MAX_VER
else:
# At this point we are not able to precisely determine what are the minimum and maximum
# TLS versions allowed in the system, so tests will be disabled
RouterTestSslBase.DISABLE_SSL_TESTING = True
RouterTestSslBase.DISABLE_REASON = "OpenSSL >= 1.1 but Python < 3.7 - Unable to determine MinProtocol"
else:
if OPENSSL_HAS_TLSV1_3 and not PROTON_HAS_TLSV1_3:
# If OpenSSL has 1.3 but proton won't let us turn it on and off then
# this test fails because v1.3 runs unexpectedly.
RouterTestSslBase.DISABLE_SSL_TESTING = True
RouterTestSslBase.DISABLE_REASON = "Proton version does not support TLSv1.3 but OpenSSL does"
@classmethod
def setUpClass(cls):
"""
Prepares a single router with multiple listeners, each one associated with a particular
sslProfile and each sslProfile has its own specific set of allowed protocols.
"""
super(RouterTestSslClient, cls).setUpClass()
cls.routers = []
if SASL.extended():
router = ('router', {'id': 'QDR.A',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()})
# Generate authentication DB
super(RouterTestSslClient, cls).create_sasl_files()
else:
router = ('router', {'id': 'QDR.A',
'mode': 'interior'})
# Saving listener ports for each TLS definition
cls.PORT_TLS1 = cls.tester.get_port()
cls.PORT_TLS11 = cls.tester.get_port()
cls.PORT_TLS12 = cls.tester.get_port()
cls.PORT_TLS13 = cls.tester.get_port()
cls.PORT_TLS1_TLS11 = cls.tester.get_port()
cls.PORT_TLS1_TLS12 = cls.tester.get_port()
cls.PORT_TLS11_TLS12 = cls.tester.get_port()
cls.PORT_TLS_ALL = cls.tester.get_port()
cls.PORT_TLS_SASL = cls.tester.get_port()
cls.PORT_SSL3 = cls.tester.get_port()
conf = [
router,
# TLSv1 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS1,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls1'}),
# TLSv1.1 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS11,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls11'}),
# TLSv1.2 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS12,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls12'}),
# TLSv1 and TLSv1.1 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS1_TLS11,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls1-tls11'}),
# TLSv1 and TLSv1.2 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS1_TLS12,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls1-tls12'}),
# TLSv1.1 and TLSv1.2 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS11_TLS12,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls11-tls12'}),
# All TLS versions
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS_ALL,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls-all'}),
# Invalid protocol version
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_SSL3,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-ssl3'})
]
# Adding SASL listener only when SASL is available
if SASL.extended():
conf += [
# TLS 1 and 1.2 with SASL PLAIN authentication for proton client validation
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS_SASL,
'authenticatePeer': 'yes', 'saslMechanisms': 'PLAIN',
'requireSsl': 'yes', 'requireEncryption': 'yes',
'sslProfile': 'ssl-profile-tls1-tls12'})
]
# Adding SSL profiles
conf += [
# SSL Profile for TLSv1
('sslProfile', {'name': 'ssl-profile-tls1',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1',
'password': 'server-password'}),
# SSL Profile for TLSv1.1
('sslProfile', {'name': 'ssl-profile-tls11',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.1',
'password': 'server-password'}),
# SSL Profile for TLSv1.2
('sslProfile', {'name': 'ssl-profile-tls12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.2',
'password': 'server-password'}),
# SSL Profile for TLSv1 and TLSv1.1
('sslProfile', {'name': 'ssl-profile-tls1-tls11',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1 TLSv1.1',
'password': 'server-password'}),
# SSL Profile for TLSv1 and TLSv1.2
('sslProfile', {'name': 'ssl-profile-tls1-tls12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1 TLSv1.2',
'password': 'server-password'}),
# SSL Profile for TLSv1.1 and TLSv1.2
('sslProfile', {'name': 'ssl-profile-tls11-tls12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.1 TLSv1.2',
'password': 'server-password'}),
# SSL Profile for all TLS versions (protocols element not defined)
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'password': 'server-password'}),
# SSL Profile for invalid protocol version SSLv23
('sslProfile', {'name': 'ssl-profile-ssl3',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'SSLv23',
'password': 'server-password'})
]
if cls.OPENSSL_ALLOW_TLSV1_3:
conf += [
# TLSv1.3 only
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_TLS13,
'authenticatePeer': 'no',
'sslProfile': 'ssl-profile-tls13'}),
# SSL Profile for TLSv1.3
('sslProfile', {'name': 'ssl-profile-tls13',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'protocols': 'TLSv1.3',
'password': 'server-password'})
]
config = Qdrouterd.Config(conf)
cls.routers.append(cls.tester.qdrouterd("A", config, wait=False))
cls.routers[0].wait_ports()
def get_allowed_protocols(self, listener_port):
"""
Loops through TLSv1, TLSv1.1 and TLSv1.2 and attempts to connect
to the listener_port using each version. The result is a boolean list
with results in respective order for TLSv1 [0], TLSv1.1 [1] and TLSv1.2 [2].
:param listener_port:
:return:
"""
results = []
for proto in ['TLSv1', 'TLSv1.1', 'TLSv1.2']:
results.append(self.is_proto_allowed(listener_port, proto))
if self.OPENSSL_ALLOW_TLSV1_3:
results.append(self.is_proto_allowed(listener_port, 'TLSv1.3'))
else:
results.append(False)
return results
def is_proto_allowed(self, listener_port, tls_protocol):
"""
Opens a simple proton client connection to the provided TCP port using
a specific TLS protocol version and returns True in case connection
was established and accepted or False otherwise.
:param listener_port: TCP port number
:param tls_protocol: TLSv1, TLSv1.1 or TLSv1.2 (string)
:return:
"""
# Management address to connect using the given TLS protocol
url = Url("amqps://0.0.0.0:%d/$management" % listener_port)
# Preparing SSLDomain (client cert) and SASL authentication info
domain = SSLDomain(SSLDomain.MODE_CLIENT)
# Enforcing given TLS protocol
cproton.pn_ssl_domain_set_protocols(domain._domain, tls_protocol)
# Try opening the secure and authenticated connection
try:
connection = BlockingConnection(url, sasl_enabled=False, ssl_domain=domain, timeout=self.TIMEOUT)
except proton.Timeout:
return False
except proton.ConnectionException:
return False
except:
return False
# TLS version provided was accepted
connection.close()
return True
def is_ssl_sasl_client_accepted(self, listener_port, tls_protocol):
"""
Attempts to connect a proton client to the management address
on the given listener_port using the specific tls_protocol provided.
If connection was established and accepted, returns True and False otherwise.
:param listener_port:
:param tls_protocol:
:return:
"""
# Management address to connect using the given TLS protocol
url = Url("amqps://0.0.0.0:%d/$management" % listener_port)
# Preparing SSLDomain (client cert) and SASL authentication info
domain = SSLDomain(SSLDomain.MODE_CLIENT)
domain.set_credentials(self.ssl_file('client-certificate.pem'),
self.ssl_file('client-private-key.pem'),
'client-password')
# Enforcing given TLS protocol
cproton.pn_ssl_domain_set_protocols(domain._domain, tls_protocol)
# Try opening the secure and authenticated connection
try:
connection = BlockingConnection(url,
sasl_enabled=True,
ssl_domain=domain,
allowed_mechs='PLAIN',
user='test@domain.com',
password='password')
except proton.ConnectionException:
return False
# TLS version provided was accepted
connection.close()
return True
def get_expected_tls_result(self, expected_results):
"""
Expects a list with three boolean elements, representing
TLSv1, TLSv1.1 and TLSv1.2 (in the respective order).
When using OpenSSL >= 1.1.x, allowance of a given TLS version is
based on MinProtocol / MaxProtocol definitions.
It is also important
to mention that TLSv1.2 is being allowed even when not specified in a
listener when using OpenSSL >= 1.1.x.
:param expected_results:
:return:
"""
(tlsv1, tlsv1_1, tlsv1_2, tlsv1_3) = expected_results
return [self.OPENSSL_ALLOW_TLSV1 and tlsv1,
self.OPENSSL_ALLOW_TLSV1_1 and tlsv1_1,
self.OPENSSL_ALLOW_TLSV1_2 and tlsv1_2,
self.OPENSSL_ALLOW_TLSV1_3 and tlsv1_3]
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls1_only(self):
"""
Expects TLSv1 only is allowed
"""
self.assertEqual(self.get_expected_tls_result([True, False, False, False]),
self.get_allowed_protocols(self.PORT_TLS1))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls11_only(self):
"""
Expects TLSv1.1 only is allowed
"""
self.assertEqual(self.get_expected_tls_result([False, True, False, False]),
self.get_allowed_protocols(self.PORT_TLS11))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls12_only(self):
"""
Expects TLSv1.2 only is allowed
"""
self.assertEqual(self.get_expected_tls_result([False, False, True, False]),
self.get_allowed_protocols(self.PORT_TLS12))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls13_only(self):
"""
Expects TLSv1.3 only is allowed
"""
self.assertEqual(self.get_expected_tls_result([False, False, False, True]),
self.get_allowed_protocols(self.PORT_TLS13))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls1_tls11_only(self):
"""
Expects TLSv1 and TLSv1.1 only are allowed
"""
self.assertEqual(self.get_expected_tls_result([True, True, False, False]),
self.get_allowed_protocols(self.PORT_TLS1_TLS11))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls1_tls12_only(self):
"""
Expects TLSv1 and TLSv1.2 only are allowed
"""
self.assertEqual(self.get_expected_tls_result([True, False, True, False]),
self.get_allowed_protocols(self.PORT_TLS1_TLS12))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls11_tls12_only(self):
"""
Expects TLSv1.1 and TLSv1.2 only are allowed
"""
self.assertEqual(self.get_expected_tls_result([False, True, True, False]),
self.get_allowed_protocols(self.PORT_TLS11_TLS12))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_tls_all(self):
"""
Expects all supported versions: TLSv1, TLSv1.1, TLSv1.2 and TLSv1.3 to be allowed
"""
self.assertEqual(self.get_expected_tls_result([True, True, True, True]),
self.get_allowed_protocols(self.PORT_TLS_ALL))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING, RouterTestSslBase.DISABLE_REASON)
def test_ssl_invalid(self):
"""
Expects connection is rejected as SSL is no longer supported
"""
self.assertEqual(False, self.is_proto_allowed(self.PORT_SSL3, 'SSLv3'))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING or not SASL.extended(),
"Cyrus library not available. skipping test")
def test_ssl_sasl_client_valid(self):
"""
Attempts to connect a Proton client using a valid SASL authentication info
and forcing the TLS protocol version, which should be accepted by the listener.
:return:
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
exp_tls_results = self.get_expected_tls_result([True, False, True, False])
self.assertEqual(exp_tls_results[0], self.is_ssl_sasl_client_accepted(self.PORT_TLS_SASL, "TLSv1"))
self.assertEqual(exp_tls_results[2], self.is_ssl_sasl_client_accepted(self.PORT_TLS_SASL, "TLSv1.2"))
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING or not SASL.extended(),
"Cyrus library not available. skipping test")
def test_ssl_sasl_client_invalid(self):
"""
Attempts to connect a Proton client using a valid SASL authentication info
and forcing the TLS protocol version, which should be rejected by the listener.
:return:
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
exp_tls_results = self.get_expected_tls_result([True, False, True, False])
self.assertEqual(exp_tls_results[1], self.is_ssl_sasl_client_accepted(self.PORT_TLS_SASL, "TLSv1.1"))
class RouterTestSslInterRouter(RouterTestSslBase):
"""
Starts 5 routers with several listeners and connectors and validate if communication
between them is working as expected.
"""
# Listener ports for each TLS protocol definition
PORT_NO_SSL = 0
PORT_TLS_ALL = 0
PORT_TLS12 = 0
PORT_TLS1_TLS12 = 0
@classmethod
def setUpClass(cls):
"""
Prepares 5 routers to form a network. One of them will provide listeners with
multiple sslProfiles, and the other 4 will try to connect with a respective listener.
It expects that routers A to D will connect successfully, while E will not succeed due
to an SSL handshake failure (as allowed TLS protocol versions won't match).
"""
super(RouterTestSslInterRouter, cls).setUpClass()
if not SASL.extended():
return
os.environ["ENV_SASL_PASSWORD"] = "password"
# Generate authentication DB
super(RouterTestSslInterRouter, cls).create_sasl_files()
# Router expected to be connected
cls.connected_tls_sasl_routers = ['QDR.A', 'QDR.B', 'QDR.C', 'QDR.D']
# Generated router list
cls.routers = []
# Saving listener ports for each TLS definition
cls.PORT_NO_SSL = cls.tester.get_port()
cls.PORT_TLS_ALL = cls.tester.get_port()
cls.PORT_TLS12 = cls.tester.get_port()
cls.PORT_TLS1_TLS12 = cls.tester.get_port()
config_a = Qdrouterd.Config([
('router', {'id': 'QDR.A',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()}),
# No auth and no SSL
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_NO_SSL}),
# All TLS versions
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS_ALL,
'authenticatePeer': 'yes', 'saslMechanisms': 'PLAIN',
'requireEncryption': 'yes', 'requireSsl': 'yes',
'sslProfile': 'ssl-profile-tls-all'}),
# TLSv1.2 only
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS12,
'authenticatePeer': 'yes', 'saslMechanisms': 'PLAIN',
'requireEncryption': 'yes', 'requireSsl': 'yes',
'sslProfile': 'ssl-profile-tls12'}),
# TLSv1 and TLSv1.2 only
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS1_TLS12,
'authenticatePeer': 'yes', 'saslMechanisms': 'PLAIN',
'requireEncryption': 'yes', 'requireSsl': 'yes',
'sslProfile': 'ssl-profile-tls1-tls12'}),
# SSL Profile for all TLS versions (protocols element not defined)
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'password': 'server-password'}),
# SSL Profile for TLSv1.2
('sslProfile', {'name': 'ssl-profile-tls12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.2',
'password': 'server-password'}),
# SSL Profile for TLSv1 and TLSv1.2
('sslProfile', {'name': 'ssl-profile-tls1-tls12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1 TLSv1.2',
'password': 'server-password'})
])
# Router B will connect to listener that allows all protocols
config_b = Qdrouterd.Config([
('router', {'id': 'QDR.B',
'mode': 'interior'}),
# Connector to All TLS versions allowed listener
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS_ALL,
'verifyHostname': 'no', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'pass:password',
'sslProfile': 'ssl-profile-tls-all'}),
# SSL Profile for all TLS versions (protocols element not defined)
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'privateKeyFile': cls.ssl_file('client-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'password': 'client-password'})
])
# Router C will connect to listener that allows TLSv1.2 only
config_c = Qdrouterd.Config([
('router', {'id': 'QDR.C',
'mode': 'interior'}),
# Connector to listener that allows TLSv1.2 only
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS12,
'verifyHostname': 'no', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'env:ENV_SASL_PASSWORD',
'sslProfile': 'ssl-profile-tls12'}),
# SSL Profile for TLSv1.2
('sslProfile', {'name': 'ssl-profile-tls12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'privateKeyFile': cls.ssl_file('client-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.2',
'password': 'client-password'})
])
# Router D will connect to listener that allows TLSv1 and TLS1.2 only using TLSv1
config_d = Qdrouterd.Config([
('router', {'id': 'QDR.D',
'mode': 'interior'}),
# Connector to listener that allows TLSv1 and TLSv1.2 only
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS1_TLS12,
'verifyHostname': 'no', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'pass:password',
'sslProfile': 'ssl-profile-tls1'}),
# SSL Profile for TLSv1
('sslProfile', {'name': 'ssl-profile-tls1',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'privateKeyFile': cls.ssl_file('client-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1',
'password': 'client-password'})
])
# Router E will try connect to listener that allows TLSv1 and TLS1.2 only using TLSv1.1
config_e = Qdrouterd.Config([
('router', {'id': 'QDR.E',
'mode': 'interior'}),
# Connector to listener that allows TLSv1 and TLSv1.2 only
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS1_TLS12,
'verifyHostname': 'no', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'password',
'sslProfile': 'ssl-profile-tls11'}),
# SSL Profile for TLSv1.1
('sslProfile', {'name': 'ssl-profile-tls11',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'privateKeyFile': cls.ssl_file('client-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.1',
'password': 'client-password'})
])
cls.routers.append(cls.tester.qdrouterd("A", config_a, wait=False))
cls.routers.append(cls.tester.qdrouterd("B", config_b, wait=False))
cls.routers.append(cls.tester.qdrouterd("C", config_c, wait=False))
cls.routers.append(cls.tester.qdrouterd("D", config_d, wait=False))
cls.routers.append(cls.tester.qdrouterd("E", config_e, wait=False))
# Wait till listener is running and all expected connectors are connected
cls.routers[0].wait_ports()
for router in cls.connected_tls_sasl_routers[1:]:
cls.routers[0].wait_router_connected(router)
def get_router_nodes(self):
"""
Retrieves connected router nodes.
:return:
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
url = Url("amqp://0.0.0.0:%d/$management" % self.PORT_NO_SSL)
node = Node.connect(url)
response = node.query(type="org.apache.qpid.dispatch.router.node", attribute_names=["id"])
router_nodes = []
for resp in response.get_dicts():
router_nodes.append(resp['id'])
node.close()
return router_nodes
@SkipIfNeeded(RouterTestSslBase.DISABLE_SSL_TESTING or not SASL.extended(),
"Cyrus library not available. skipping test")
def test_connected_tls_sasl_routers(self):
"""
Validates if all expected routers are connected in the network
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
router_nodes = self.get_router_nodes()
self.assertTrue(router_nodes)
for node in router_nodes:
self.assertIn(node, self.connected_tls_sasl_routers)
# Router A and B are always expected (no tls version restriction)
expected_nodes = len(self.connected_tls_sasl_routers)
# Router C only if TLSv1.2 is allowed
if not RouterTestSslClient.OPENSSL_ALLOW_TLSV1_2:
expected_nodes -= 1
# Router D only if TLSv1.1 is allowed
if not RouterTestSslClient.OPENSSL_ALLOW_TLSV1_1:
expected_nodes -= 1
self.assertEqual(len(router_nodes), expected_nodes)
class RouterTestSslInterRouterWithInvalidPathToCA(RouterTestSslBase):
"""
DISPATCH-1762
Starts 2 routers:
Router A two listeners serve a normal, good certificate
Router B two connectors configured with an invalid CA file path in its profile
- one sets verifyHostname true, the other false.
Test proves:
Router B must not connect to A with mis-configured CA file path regardless of
verifyHostname setting.
"""
# Listener ports for each TLS protocol definition
PORT_NO_SSL = 0
PORT_TLS_ALL = 0
@classmethod
def setUpClass(cls):
"""
Prepares 2 routers to form a network.
"""
super(RouterTestSslInterRouterWithInvalidPathToCA, cls).setUpClass()
if not SASL.extended():
return
os.environ["ENV_SASL_PASSWORD"] = "password"
# Generate authentication DB
super(RouterTestSslInterRouterWithInvalidPathToCA, cls).create_sasl_files()
# Router expected to be connected
cls.connected_tls_sasl_routers = []
# Generated router list
cls.routers = []
# Saving listener ports for each TLS definition
cls.PORT_NO_SSL = cls.tester.get_port()
cls.PORT_TLS_ALL_1 = cls.tester.get_port()
cls.PORT_TLS_ALL_2 = cls.tester.get_port()
# Configured connector host
cls.CONNECTOR_HOST = "localhost"
config_a = Qdrouterd.Config([
('router', {'id': 'QDR.A',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()}),
# No auth and no SSL for management access
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_NO_SSL}),
# All TLS versions and normal, good sslProfile config
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS_ALL_1,
'saslMechanisms': 'PLAIN',
'requireEncryption': 'yes', 'requireSsl': 'yes',
'sslProfile': 'ssl-profile-tls-all'}),
# All TLS versions and normal, good sslProfile config
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS_ALL_2,
'saslMechanisms': 'PLAIN',
'requireEncryption': 'yes', 'requireSsl': 'yes',
'sslProfile': 'ssl-profile-tls-all'}),
# SSL Profile for all TLS versions (protocols element not defined)
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'password': 'server-password'})
])
# Router B has a connector to listener that allows all protocols but will not verify hostname.
# The sslProfile has a bad caCertFile name and this router should not connect.
config_b = Qdrouterd.Config([
('router', {'id': 'QDR.B',
'mode': 'interior'}),
# Connector to All TLS versions allowed listener
('connector', {'name': 'connector1',
'host': cls.CONNECTOR_HOST, 'role': 'inter-router',
'port': cls.PORT_TLS_ALL_1,
'verifyHostname': 'no', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'pass:password',
'sslProfile': 'ssl-profile-tls-all'}),
# Connector to All TLS versions allowed listener
('connector', {'name': 'connector2',
'host': cls.CONNECTOR_HOST, 'role': 'inter-router',
'port': cls.PORT_TLS_ALL_2,
'verifyHostname': 'yes', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'pass:password',
'sslProfile': 'ssl-profile-tls-all'}),
# SSL Profile with an invalid caCertFile file path. The correct file path here would allow this
# router to connect. The object is to trigger a specific failure in the ssl
# setup chain of calls to pn_ssl_domain_* functions.
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('ca-certificate-INVALID-FILENAME.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS'})
])
cls.routers.append(cls.tester.qdrouterd("A", config_a, wait=False))
cls.routers.append(cls.tester.qdrouterd("B", config_b, wait=False))
# Wait until A is running
cls.routers[0].wait_ports()
# Can't wait until B is connected because it's not supposed to connect.
def get_router_nodes(self):
"""
Retrieves connected router nodes from QDR.A
:return: list of connected router id's
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
url = Url("amqp://0.0.0.0:%d/$management" % self.PORT_NO_SSL)
node = Node.connect(url)
response = node.query(type="org.apache.qpid.dispatch.router.node", attribute_names=["id"])
router_nodes = []
for resp in response.get_dicts():
router_nodes.append(resp['id'])
node.close()
return router_nodes
def test_invalid_ca_path(self):
"""
Prove sslProfile with invalid path to CA prevents the router from joining the network
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
# Poll for a while until the connector error shows up in router B's log
pattern = " SERVER (error) SSL CA configuration failed"
host_port_1 = self.CONNECTOR_HOST + ":" + str(self.PORT_TLS_ALL_1)
host_port_2 = self.CONNECTOR_HOST + ":" + str(self.PORT_TLS_ALL_2)
sleep_time = 0.1 # seconds
poll_duration = 60.0 # seconds
verified = False
for tries in range(int(poll_duration / sleep_time)):
logfile = os.path.join(self.routers[1].outdir, self.routers[1].logfile)
if os.path.exists(logfile):
with open(logfile, 'r') as router_log:
log_lines = router_log.read().split("\n")
e1_lines = [s for s in log_lines if pattern in s and host_port_1 in s]
e2_lines = [s for s in log_lines if pattern in s and host_port_2 in s]
verified = len(e1_lines) > 0 and len(e2_lines) > 0
if verified:
break
time.sleep(sleep_time)
self.assertTrue(verified, "Log line containing '%s' not seen for both connectors in QDR.B log" % pattern)
verified = False
pattern1 = "Connection to %s failed:" % host_port_1
pattern2 = "Connection to %s failed:" % host_port_2
for tries in range(int(poll_duration / sleep_time)):
logfile = os.path.join(self.routers[1].outdir, self.routers[1].logfile)
if os.path.exists(logfile):
with open(logfile, 'r') as router_log:
log_lines = router_log.read().split("\n")
e1_lines = [s for s in log_lines if pattern1 in s]
e2_lines = [s for s in log_lines if pattern2 in s]
verified = len(e1_lines) > 0 and len(e2_lines) > 0
if verified:
break
time.sleep(sleep_time)
self.assertTrue(verified, "Log line containing '%s' or '%s' not seen in QDR.B log" % (pattern1, pattern2))
# Show that router A does not have router B in its network
router_nodes = self.get_router_nodes()
self.assertTrue(router_nodes)
node = "QDR.B"
self.assertNotIn(node, router_nodes, msg=("%s should not be connected" % node))
class RouterTestSslInterRouterWithoutHostnameVerificationAndMismatchedCA(RouterTestSslBase):
"""
DISPATCH-1762
Starts 2 routers:
Router A listener serves a normal, good certificate.
Router B connector is configured with a CA cert that did not sign the server cert, and verifyHostname is false.
Test proves:
Router B must not connect to A.
"""
# Listener ports for each TLS protocol definition
PORT_NO_SSL = 0
PORT_TLS_ALL = 0
@classmethod
def setUpClass(cls):
"""
Prepares 2 routers to form a network.
"""
super(RouterTestSslInterRouterWithoutHostnameVerificationAndMismatchedCA, cls).setUpClass()
if not SASL.extended():
return
os.environ["ENV_SASL_PASSWORD"] = "password"
# Generate authentication DB
super(RouterTestSslInterRouterWithoutHostnameVerificationAndMismatchedCA, cls).create_sasl_files()
# Router expected to be connected
cls.connected_tls_sasl_routers = []
# Generated router list
cls.routers = []
# Saving listener ports for each TLS definition
cls.PORT_NO_SSL = cls.tester.get_port()
cls.PORT_TLS_ALL = cls.tester.get_port()
config_a = Qdrouterd.Config([
('router', {'id': 'QDR.A',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()}),
# No auth and no SSL for management access
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.PORT_NO_SSL}),
# All TLS versions and normal, good sslProfile config
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': cls.PORT_TLS_ALL,
'saslMechanisms': 'PLAIN',
'requireEncryption': 'yes', 'requireSsl': 'yes',
'sslProfile': 'ssl-profile-tls-all'}),
# SSL Profile for all TLS versions (protocols element not defined)
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'password': 'server-password'})
])
# Router B has a connector to listener that allows all protocols but will not verify hostname.
# The sslProfile has a caCertFile that does not sign the server cert, so this router should not connect.
config_b = Qdrouterd.Config([
('router', {'id': 'QDR.B',
'mode': 'interior'}),
# Connector to All TLS versions allowed listener
('connector', {'host': 'localhost', 'role': 'inter-router', 'port': cls.PORT_TLS_ALL,
'verifyHostname': 'no', 'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com', 'saslPassword': 'pass:password',
'sslProfile': 'ssl-profile-tls-all'}),
# SSL Profile with caCertFile to cert that does not sign the server cert. The correct path here would allow this
# router to connect. The object is to trigger a certificate verification failure while hostname verification is off.
('sslProfile', {'name': 'ssl-profile-tls-all',
'caCertFile': cls.ssl_file('bad-ca-certificate.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:' \
'DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS'})
])
cls.routers.append(cls.tester.qdrouterd("A", config_a, wait=False))
cls.routers.append(cls.tester.qdrouterd("B", config_b, wait=False))
# Wait until A is running
cls.routers[0].wait_ports()
# Can't wait until B is connected because it's not supposed to connect.
def get_router_nodes(self):
"""
Retrieves connected router nodes from QDR.A
:return: list of connected router id's
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
url = Url("amqp://0.0.0.0:%d/$management" % self.PORT_NO_SSL)
node = Node.connect(url)
response = node.query(type="org.apache.qpid.dispatch.router.node", attribute_names=["id"])
router_nodes = []
for resp in response.get_dicts():
router_nodes.append(resp['id'])
node.close()
return router_nodes
def test_mismatched_ca_and_no_hostname_verification(self):
"""
Prove that improperly configured ssl-enabled connector prevents the router
from joining the network
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
# Poll for a while until the connector error shows up in router B's log
pattern = "Connection to localhost:%s failed:" % self.PORT_TLS_ALL
sleep_time = 0.1 # seconds
poll_duration = 60.0 # seconds
verified = False
for tries in range(int(poll_duration / sleep_time)):
logfile = os.path.join(self.routers[1].outdir, self.routers[1].logfile)
if os.path.exists(logfile):
with open(logfile, 'r') as router_log:
log_lines = router_log.read().split("\n")
e_lines = [s for s in log_lines if pattern in s]
verified = len(e_lines) > 0
if verified:
break
time.sleep(sleep_time)
self.assertTrue(verified, "Log line containing '%s' not seen in QDR.B log" % pattern)
# Show that router A does not have router B in its network
router_nodes = self.get_router_nodes()
self.assertTrue(router_nodes)
node = "QDR.B"
self.assertNotIn(node, router_nodes, msg=("%s should not be connected" % node))
if __name__ == '__main__':
unittest.main(main_module())
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.state_exclusive'
db.add_column('website_question', 'state_exclusive',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.state_exclusive'
db.delete_column('website_question', 'state_exclusive')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to create data for use in unit tests."""
import string
from typing import Any, Dict, List, Tuple
from google.cloud import bigquery
import constants
BATCH_NUMBER = 1
DUMMY_MERCHANT_ID = '1234567'
MULTIPLE_ITEM_COUNT = 2
SINGLE_ITEM_COUNT = 1
ROW_SCHEMA = {
'google_merchant_id': 0,
'item_id': 1,
'title': 2,
'description': 3,
'google_product_category': 4,
'product_types': 5,
'link': 6,
'image_link': 7,
'additional_image_link': 8,
'condition': 9,
'availability': 10,
'price': 11,
'brand': 12,
'gtin': 13,
'mpn': 14,
'shipping': 15,
'loyalty_points': 16,
'ads_redirect': 17,
'color': 18,
'size': 19,
'custom_label_0': 20,
'custom_label_1': 21,
'custom_label_2': 22,
'custom_label_3': 23,
'custom_label_4': 24,
'identifier_exists': 25
}
def generate_test_data(
method: constants.Method,
num_rows=1,
remove_merchant_id=False
) -> Tuple[List[bigquery.Row], constants.Batch, constants.BatchIdToItemId, Dict[
str, Any]]:
"""Generates a tuple containing a triplet of matching row, batch, response.
Args:
method: The API method for this batch (insert, delete)
num_rows: The number of rows to generate
remove_merchant_id: If true, set merchant_id to None
Returns:
A tuple containing row_data, batch, and response. The row_data represents
data pulled directly from BigQuery in as Row objects. The batch data is a
Content API request batch corresponding to the Row data but in JSON object
form. The response is a JSON object which represents the expected response
from the Content API call.
"""
rows = []
batch = {'entries': []}
batch_id_to_item_id = dict()
response = {u'kind': u'content#productsCustomBatchResponse', u'entries': []}
for batch_id in range(0, num_rows):
merchant_id, item, api_item = generate_item_dict_api_pair()
if remove_merchant_id:
merchant_id = None
rows.append(
bigquery.Row(
(merchant_id, item['item_id'], item['title'], item['description'],
item['google_product_category'], item['product_types'],
item['link'], item['image_link'], item['additional_image_link'],
item['condition'], item['availability'], item['price'],
item['brand'], item['gtin'], item['mpn'], item['shipping'],
item['loyalty_points'], item['ads_redirect'], item['color'],
item['size'], item['custom_label_0'], item['custom_label_1'],
item['custom_label_2'], item['custom_label_3'],
item['custom_label_4'], item['identifier_exists']), ROW_SCHEMA))
batch_id_to_item_id[batch_id] = item['item_id']
if method == constants.Method.INSERT:
batch['entries'].append({
'batchId': batch_id,
'merchantId': str(merchant_id),
'method': method.value,
'product': api_item
})
response['entries'].append({
u'batchId': batch_id,
u'kind': u'content#productsCustomBatchResponseEntry',
u'product': {
u'color':
item['color'],
u'offerId':
item['item_id'],
u'gtin':
item['gtin'],
u'googleProductCategory':
item['google_product_category'],
u'availability':
item['availability'],
u'targetCountry':
constants.TARGET_COUNTRY,
u'title':
item['title'],
u'item_id':
'{}:{}:{}:{}'.format(api_item['channel'],
api_item['contentLanguage'],
api_item['targetCountry'],
api_item['offerId']),
u'customLabel1':
item['custom_label_1'],
u'price': {
u'currency': constants.TARGET_CURRENCY,
u'value': item['price']
},
u'channel':
api_item['channel'],
u'description':
item['description'],
u'contentLanguage':
api_item['contentLanguage'],
u'mpn':
item['mpn'],
u'brand':
item['brand'],
u'link':
item['link'],
u'adsRedirect':
item['ads_redirect'],
u'customLabel4':
item['custom_label_4'],
u'customLabel3':
item['custom_label_3'],
u'customLabel2':
item['custom_label_2'],
u'condition':
item['condition'],
u'customLabel0':
item['custom_label_0'],
u'kind':
u'content#product',
u'identifierExists':
item['identifier_exists'],
u'imageLink':
item['image_link'],
u'productTypes': [item['product_types']]
}
})
elif method == constants.Method.DELETE:
json_snippet = {
'batchId':
batch_id,
'merchantId':
str(merchant_id),
'method':
method.value,
'productId':
'{}:{}:{}:{}'.format(api_item['channel'],
api_item['contentLanguage'],
api_item['targetCountry'],
api_item['offerId'])
}
batch['entries'].append(json_snippet)
response['entries'].append(json_snippet)
return rows, batch, batch_id_to_item_id, response
def generate_item_dict_api_pair(
**kwargs: Dict[str,
Any]) -> Tuple[str, constants.Product, constants.Product]:
"""Generate a pair of data objects for testing.
Generate a pair of objects here that can be used to compare. Initially
generate a base item that has all default values, then overwrite this with any
passed arguments, into this function, then generate an API formatted object
that is the complement to the base item.
Args:
**kwargs: A dictionary of parameters that will be used to overwrite any
default values.
Returns:
A tuple containing a pair of objects that represent the dict format item
of a Row from BigQuery and the expected resulting item that should be
returned by the API mapping method.
"""
merchant_id = DUMMY_MERCHANT_ID
item = {
'google_merchant_id': merchant_id,
'item_id': 'test id',
'title': 'test title',
'description': 'test description',
'google_product_category': 'Test > Google > Product > Category',
'product_types': 'Test > Product > Type',
'link': 'https://test.example.co.jp/products/1/',
'image_link': 'https://test.example.co.jp/products/1/image.jpg',
'additional_image_link': None,
'condition': 'new',
'availability': 'in stock',
'price': '100',
'brand': 'Test Brand',
'gtin': '12345678901234',
'mpn': 'ABC1234',
'shipping': None,
'loyalty_points': None,
'ads_redirect': 'https://redir.ex.co.jp/product/1/',
'color': 'Blue',
'size': 'M',
'custom_label_0': None,
'custom_label_1': None,
'custom_label_2': None,
'custom_label_3': None,
'custom_label_4': None,
'identifier_exists': True
}
if kwargs:
for key, value in kwargs.items():
item[key] = value
api_formatted_item = {
'offerId': item['item_id'],
'title': item['title'],
'description': item['description'],
'googleProductCategory': item['google_product_category'],
'productTypes': [item['product_types']],
'link': item['link'],
'imageLink': item['image_link'],
'additionalImageLinks': [],
'condition': item['condition'],
'availability': item['availability'],
'price': {
'currency': constants.TARGET_CURRENCY,
'value': ''.join(c for c in item['price'] if c in string.digits)
},
'brand': item['brand'],
'gtin': item['gtin'],
'mpn': item['mpn'],
'shipping': [],
'loyaltyPoints': {},
'adsRedirect': item['ads_redirect'],
'color': item['color'],
'sizes': [item['size']],
'customLabel0': item['custom_label_0'] if item['custom_label_0'] else '',
'customLabel1': item['custom_label_1'] if item['custom_label_1'] else '',
'customLabel2': item['custom_label_2'] if item['custom_label_2'] else '',
'customLabel3': item['custom_label_3'] if item['custom_label_3'] else '',
'customLabel4': item['custom_label_4'] if item['custom_label_4'] else '',
'identifierExists': item['identifier_exists'],
'contentLanguage': constants.CONTENT_LANGUAGE,
'targetCountry': constants.TARGET_COUNTRY,
'channel': constants.CHANNEL
}
empty_fields = [k for k in api_formatted_item if not api_formatted_item[k]]
for empty_field in empty_fields:
del api_formatted_item[empty_field]
return merchant_id, item, api_formatted_item
def _generate_insert_response_with_errors(num_rows=1) -> Dict[str, Any]:
"""Generates a Content API insert response with errors.
Args:
num_rows: The number of rows to generate.
Returns:
A Content API response with errors (missing currency field).
"""
response = {u'kind': u'content#productsCustomBatchResponse', u'entries': []}
for batch_id in range(0, num_rows):
response['entries'].append({
u'kind': u'content#productsCustomBatchResponseEntry',
u'batchId': batch_id,
u'errors': {
u'errors': [{
'domain': 'global',
'reason': 'required',
'message': '[price.currency] Required parameter: price.currency'
}, {
'domain': 'content.ContentErrorDomain',
'reason': 'not_inserted',
'message': 'The item could not be inserted.'
}],
'code': 400,
'message': '[price.currency] Required parameter: price.currency'
}
})
return response
def _generate_delete_response_with_errors(num_rows=1) -> Dict[str, Any]:
"""Generates a Content API delete response with errors.
Args:
num_rows: The number of rows to generate.
Returns:
A Content API response with errors (item not found).
"""
response = {u'kind': u'content#productsCustomBatchResponse', u'entries': []}
for batch_id in range(0, num_rows):
response['entries'].append({
'kind': 'content#productsCustomBatchResponseEntry',
'batchId': batch_id,
'errors': {
'errors': [{
'domain': 'global',
'reason': 'notFound',
'message': 'item not found'
}],
'code': 404,
'message': 'item not found'
}
})
return response
def _generate_response_with_invalid_kind_value(num_rows=1) -> Dict[str, Any]:
"""Generates a Content API response with an invalid kind value.
Args:
num_rows: The number of rows to generate.
Returns:
A Content API response with an invalid kind id.
"""
response = {u'kind': u'content#invalid', u'entries': []}
for _ in range(0, num_rows):
response['entries'].append({'kind': 'content#invalid'})
return response
|
|
""" pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import pickle
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle.pydevd_constants import get_frame, get_thread_id, xrange
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import to_string
SENTINEL_VALUE = []
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
errMsg = '''find_frame: frame not found.
Looking for thread_id:%s, frame_id:%s
Current thread_id:%s, available frames:
%s\n
''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
sys.stderr.write(errMsg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = 100
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)))
return xml
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise Exception("%s has more than 2 dimensions." % slice)
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in "biufc":
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
xml = slice_to_xml(name, num_rows, num_cols, "", "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in "biufc":
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in "biufc" else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
def col_to_format(c):
return format if dtypes[c] == 'f' and format else array_default_format(dtypes[c])
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, lambda r: (("%" + col_to_format(c)) % (df.iat[r, c] if dim > 1 else df.iat[r])
for c in range(cols)))
return xml
def array_data_to_xml(rows, cols, get_row):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % to_string(row)
for value in get_row(row):
xml += var_to_xml(value, '')
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, format, type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = get_label(df.axes[1].values[col]) if dim > 1 else str(col)
bounds = col_bounds[col]
col_format = "%" + col_to_format(col)
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(col), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
TYPE_TO_XML_CONVERTERS = {"ndarray": array_to_xml, "DataFrame": dataframe_to_xml, "Series": dataframe_to_xml}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.