gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
import codecs
from collections import OrderedDict
from decimal import Decimal
import warnings
import agate
from babel.numbers import format_decimal
import six
from csvkit.cli import CSVKitUtility, parse_column_identifiers
NoneType = type(None)
OPERATIONS = OrderedDict([
('type', {
'aggregation': None,
'label': 'Type of data: '
}),
('nulls', {
'aggregation': agate.HasNulls,
'label': 'Contains null values: '
}),
('unique', {
'aggregation': None,
'label': 'Unique values: '
}),
('min', {
'aggregation': agate.Min,
'label': 'Smallest value: '
}),
('max', {
'aggregation': agate.Max,
'label': 'Largest value: '
}),
('sum', {
'aggregation': agate.Sum,
'label': 'Sum: '
}),
('mean', {
'aggregation': agate.Mean,
'label': 'Mean: '
}),
('median', {
'aggregation': agate.Median,
'label': 'Median: '
}),
('stdev', {
'aggregation': agate.StDev,
'label': 'StDev: '
}),
('len', {
'aggregation': agate.MaxLength,
'label': 'Longest value: '
}),
('freq', {
'aggregation': None,
'label': 'Most common values: '
})
])
class CSVStat(CSVKitUtility):
description = 'Print descriptive statistics for each column in a CSV file.'
override_flags = ['L', 'blanks', 'date-format', 'datetime-format']
def add_arguments(self):
self.argparser.add_argument('--csv', dest='csv_output', action='store_true',
help='Output results as a CSV, rather than text.')
self.argparser.add_argument('-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument('-c', '--columns', dest='columns',
help='A comma separated list of column indices or names to be examined. Defaults to all columns.')
self.argparser.add_argument('--type', dest='type_only', action='store_true',
help='Only output data type.')
self.argparser.add_argument('--nulls', dest='nulls_only', action='store_true',
help='Only output whether columns contains nulls.')
self.argparser.add_argument('--unique', dest='unique_only', action='store_true',
help='Only output counts of unique values.')
self.argparser.add_argument('--min', dest='min_only', action='store_true',
help='Only output smallest values.')
self.argparser.add_argument('--max', dest='max_only', action='store_true',
help='Only output largest values.')
self.argparser.add_argument('--sum', dest='sum_only', action='store_true',
help='Only output sums.')
self.argparser.add_argument('--mean', dest='mean_only', action='store_true',
help='Only output means.')
self.argparser.add_argument('--median', dest='median_only', action='store_true',
help='Only output medians.')
self.argparser.add_argument('--stdev', dest='stdev_only', action='store_true',
help='Only output standard deviations.')
self.argparser.add_argument('--len', dest='len_only', action='store_true',
help='Only output the length of the longest values.')
self.argparser.add_argument('--freq', dest='freq_only', action='store_true',
help='Only output lists of frequent values.')
self.argparser.add_argument('--freq-count', dest='freq_count', type=int,
help='The maximum number of frequent values to display.')
self.argparser.add_argument('--count', dest='count_only', action='store_true',
help='Only output total row count.')
self.argparser.add_argument('-y', '--snifflimit', dest='sniff_limit', type=int,
help='Limit CSV dialect sniffing to the specified number of bytes. Specify "0" to disable sniffing entirely.')
def main(self):
if self.args.names_only:
self.print_column_names()
return
operations = [op for op in OPERATIONS.keys() if getattr(self.args, op + '_only')]
if len(operations) > 1:
self.argparser.error('Only one operation argument may be specified (--mean, --median, etc).')
if operations and self.args.csv_output:
self.argparser.error('You may not specify --csv and an operation (--mean, --median, etc) at the same time.')
if operations and self.args.count_only:
self.argparser.error('You may not specify --count and an operation (--mean, --median, etc) at the same time.')
if six.PY2:
self.output_file = codecs.getwriter('utf-8')(self.output_file)
if self.args.count_only:
count = len(list(agate.csv.reader(self.input_file)))
if not self.args.no_header_row:
count -= 1
self.output_file.write('Row count: %i\n' % count)
return
table = agate.Table.from_csv(
self.input_file,
skip_lines=self.args.skip_lines,
sniff_limit=self.args.sniff_limit,
**self.reader_kwargs
)
column_ids = parse_column_identifiers(
self.args.columns,
table.column_names,
self.get_column_offset()
)
kwargs = {}
if self.args.freq_count:
kwargs['freq_count'] = self.args.freq_count
# Output a single stat
if operations:
if len(column_ids) == 1:
self.print_one(table, column_ids[0], operations[0], label=False, **kwargs)
else:
for column_id in column_ids:
self.print_one(table, column_id, operations[0], **kwargs)
else:
stats = {}
for column_id in column_ids:
stats[column_id] = self.calculate_stats(table, column_id, **kwargs)
# Output as CSV
if self.args.csv_output:
self.print_csv(table, column_ids, stats)
# Output all stats
else:
self.print_stats(table, column_ids, stats)
def print_one(self, table, column_id, operation, label=True, **kwargs):
"""
Print data for a single statistic.
"""
column_name = table.column_names[column_id]
op_name = operation
getter = globals().get('get_%s' % op_name, None)
with warnings.catch_warnings():
warnings.simplefilter('ignore', agate.NullCalculationWarning)
try:
if getter:
stat = getter(table, column_id, **kwargs)
else:
op = OPERATIONS[op_name]['aggregation']
stat = table.aggregate(op(column_id))
if isinstance(stat, Decimal):
stat = format_decimal(stat, locale=agate.config.get_option('default_locale'))
except:
stat = None
# Formatting
if op_name == 'freq':
stat = ', '.join([(u'"%s": %s' % (six.text_type(row[column_name]), row['Count'])) for row in stat])
stat = u'{ %s }' % stat
if label:
self.output_file.write(u'%3i. %s: %s\n' % (column_id + 1, column_name, stat))
else:
self.output_file.write(u'%s\n' % stat)
def calculate_stats(self, table, column_id, **kwargs):
"""
Calculate stats for all valid operations.
"""
stats = {}
for op_name, op_data in OPERATIONS.items():
getter = globals().get('get_%s' % op_name, None)
with warnings.catch_warnings():
warnings.simplefilter('ignore', agate.NullCalculationWarning)
try:
if getter:
stats[op_name] = getter(table, column_id, **kwargs)
else:
op = op_data['aggregation']
v = table.aggregate(op(column_id))
if isinstance(v, Decimal):
v = format_decimal(v)
stats[op_name] = v
except:
stats[op_name] = None
return stats
def print_stats(self, table, column_ids, stats):
"""
Print data for all statistics.
"""
label_column_width = max([len(op_data['label']) for op_data in OPERATIONS.values()])
for column_id in column_ids:
column_name = table.column_names[column_id]
column = table.columns[column_id]
column_stats = stats[column_id]
self.output_file.write(('%3i. "%s"\n\n' % (column_id + 1, column_name)))
for op_name, op_data in OPERATIONS.items():
if column_stats[op_name] is None:
continue
label = u'{label:{label_column_width}}'.format(**{
'label_column_width': label_column_width,
'label': op_data['label']
})
if op_name == 'freq':
for i, row in enumerate(column_stats['freq']):
if i == 0:
self.output_file.write('\t{} '.format(label))
else:
self.output_file.write(u'\t{label:{label_column_width}} '.format(**{
'label_column_width': label_column_width,
'label': ''
}))
if isinstance(column.data_type, agate.Number):
v = row[column_name]
if isinstance(v, Decimal):
v = format_decimal(v)
else:
v = six.text_type(row[column_name])
self.output_file.write(u'{} ({}x)\n'.format(v, row['Count']))
continue
v = column_stats[op_name]
if op_name == 'nulls' and v:
v = '%s (excluded from calculations)' % v
elif op_name == 'len':
v = '%s characters' % v
self.output_file.write(u'\t{} {}\n'.format(label, v))
self.output_file.write('\n')
self.output_file.write('Row count: %s\n' % len(table.rows))
def print_csv(self, table, column_ids, stats):
"""
Print data for all statistics as a csv table.
"""
writer = agate.csv.writer(self.output_file)
header = ['column_id', 'column_name'] + [op_name for op_name in OPERATIONS.keys()]
writer.writerow(header)
for column_id in column_ids:
column_name = table.column_names[column_id]
column_stats = stats[column_id]
output_row = [column_id + 1, column_name]
for op_name, op_data in OPERATIONS.items():
if column_stats[op_name] is None:
output_row.append(None)
continue
if op_name == 'freq':
value = ', '.join([six.text_type(row[column_name]) for row in column_stats['freq']])
else:
value = column_stats[op_name]
output_row.append(value)
writer.writerow(output_row)
def get_type(table, column_id, **kwargs):
return '%s' % table.columns[column_id].data_type.__class__.__name__
def get_unique(table, column_id, **kwargs):
return len(table.columns[column_id].values_distinct())
def get_freq(table, column_id, freq_count=5, **kwargs):
return table.pivot(column_id).order_by('Count', reverse=True).limit(freq_count)
def launch_new_instance():
utility = CSVStat()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
|
#! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
|
import numpy
import theano
from theano import tensor
from blocks.bricks import Initializable
from blocks.bricks.recurrent import recurrent, BaseRecurrent
from blocks.bricks.base import lazy, application
from blocks.graph import ComputationGraph
from blocks_contrib.utils import diff_abs, l2_norm_cost
floatX = theano.config.floatX
def RMSPropStep(cost, states, accum_1, accum_2):
rho = .9
lr = .001
momentum = .9
epsilon = 1e-8
grads = tensor.grad(cost, states)
new_accum_1 = rho * accum_1 + (1 - rho) * grads**2
new_accum_2 = momentum * accum_2 - lr * grads / tensor.sqrt(new_accum_1 + epsilon)
new_states = states + momentum * new_accum_2 - lr * (grads /
tensor.sqrt(new_accum_1 + epsilon))
return new_states, new_accum_1, new_accum_2
class SparseFilter(BaseRecurrent, Initializable):
def __init__(self, mlp, *args, **kwargs):
super(SparseFilter, self).__init__(*args, **kwargs)
self.mlp = mlp
self.children = [mlp, ]
@application
def initial_state(self, state_name, batch_size, *args, **kwargs):
if state_name in self.apply.states:
dim = self.get_dim(state_name)
return tensor.zeros((batch_size, dim))
if state_name == 'gamma':
dim = self.get_dim('gamma')
return .1*tensor.ones((batch_size, dim))
return super(SparseFilter, self).initial_state(state_name,
batch_size, *args, **kwargs)
def get_dim(self, name):
if name in (self.apply.states +
self.apply.outputs[1:]) + ['prior', 'gamma', 'outputs']:
return self.mlp.input_dim
elif name == 'inputs':
return self.mlp.output_dim
return super(SparseFilter, self).get_dim(name)
@recurrent(sequences=[], states=['states', 'accum_1', 'accum_2'],
outputs=['outputs', 'states',
'accum_1', 'accum_2'],
contexts=['inputs', 'prior', 'gamma'])
def apply(self, inputs=None,
states=None, accum_1=None,
accum_2=None, gamma=.1, prior=None):
""" The outputs of this function are the reconstructed/filtered
version of the input and the coding coefficientes.
The `states` are the coding coefficients.
This recurrent method is the estimation process involved in
filtering.
"""
if prior is not None:
cost = .01 * diff_abs(states - prior).sum()
else:
cost = 0
outputs = self.mlp.apply(states)
# TODO accept `blocks.bricks.cost` as input to be used as reconstruction cost
rec_error = tensor.sqr(inputs - outputs).sum()
l1_norm = (gamma * diff_abs(states)).sum()
cost += rec_error + l1_norm
new_states, new_accum_1, new_accum_2 = RMSPropStep(cost, states,
accum_1, accum_2)
results = [outputs, new_states, new_accum_1, new_accum_2]
return results
@application
def cost(self, inputs, n_steps, batch_size, gamma=.1, prior=None):
z = self.apply(inputs=inputs, gamma=gamma, prior=prior, n_steps=n_steps,
batch_size=batch_size)[1][-1]
# z = theano.gradient.disconnected_grad(z)
# x_hat = tensor.dot(z, self.W)
x_hat = self.mlp.apply(z)
cost = tensor.sqr(inputs - x_hat).sum()
weights_normalization = l2_norm_cost(self.mlp, ComputationGraph([cost]), .01)
cost += weights_normalization
return cost, z, x_hat
class VarianceComponent(SparseFilter):
def __init__(self, mlp, *args, **kwargs):
super(VarianceComponent, self).__init__(mlp, *args, **kwargs)
def get_dim(self, name):
if name == 'prev_code':
return self.mlp.input_dim
if name == 'prior':
return self.mlp.outputdim
return super(VarianceComponent, self).get_dim(name)
@application
def initial_state(self, state_name, batch_size, *args, **kwargs):
if state_name in self.apply.states + ['prior', 'prev_code']:
dim = self.get_dim('states')
return tensor.zeros((batch_size, dim))
if state_name == 'prev_rec':
dim = self.get_dim('input')
return tensor.zeros((batch_size, dim))
return super(VarianceComponent, self).initial_state(state_name,
batch_size, *args, **kwargs)
@recurrent(sequences=[], states=['states', 'accum_1', 'accum_2'],
outputs=['outputs', 'states',
'accum_1', 'accum_2'],
contexts=['prior', 'prev_rec', 'prev_code'])
def apply(self, states=None, accum_1=None,
accum_2=None, batch_size=None, prior=None,
prev_rec=None, prev_code=None):
""" The outputs of this function are the higher order
variance components.
The `states` are the coding coefficients.
This recurrent method is the estimation process involved in
filtering.
"""
if prior is not None:
cost = .01 * diff_abs(states - prior).sum()
else:
cost = 0
# uW = self.mlp.apply(states)
# outputs = .05 * (1 + tensor.exp(-uW))
# outputs = .1 * tensor.nnet.sigmoid(uW)
outputs = self.get_sparseness(states)
prev_l1 = (outputs * diff_abs(prev_code)).sum()
# rec_error = tensor.sqr(inputs - prev_rec).sum()
l1_norm = diff_abs(states).sum()
cost += prev_l1 + .1 * l1_norm
new_states, new_accum_1, new_accum_2 = RMSPropStep(cost, states,
accum_1, accum_2)
results = [outputs, new_states, new_accum_1, new_accum_2]
return results
@application
def get_sparseness(self, u):
uW = self.mlp.apply(u)
return .05 * (1 + tensor.exp(-uW))
@application
def cost(self, prev_code, prior=None):
u = self.apply(batch_size=self.batch_size, prev_code=prev_code,
n_steps=self.n_steps, prior=prior)[1][-1]
u = theano.gradient.disconnected_grad(u)
# uW = self.mlp.apply(u)
# outputs = .05 * (1 + tensor.exp(-uW))
outputs = self.get_sparseness(u)
# outputs = .1 * tensor.nnet.sigmoid(uW)
final_cost = (outputs*prev_code).sum() + .01*tensor.sqr(self.W).sum()
return final_cost, u, outputs
class TemporalSparseFilter(BaseRecurrent, Initializable):
def __init__(self, proto, transition, n_steps, batch_size, *args, **kwargs):
super(TemporalSparseFilter, self).__init__(*args, **kwargs)
self.proto = proto
self.n_steps = n_steps
self.batch_size = batch_size
self.transition = transition
self.children = [proto, proto.mlp, transition]
@application
def initial_state(self, state_name, batch_size, *args, **kwargs):
return self.proto.initial_state(state_name,
batch_size, *args, **kwargs)
def get_dim(self, name):
return self.proto.get_dim(name)
@recurrent(sequences=['inputs'], states=['states'],
outputs=['outputs', 'states'],
contexts=[])
def apply(self, inputs=None, states=None, **kwargs):
""" The outputs of this function are the reconstructed/filtered
version of the input and the coding coefficientes.
The `states` are the coding coefficients.
This recurrent method is the estimation process involved in
filtering.
"""
prior = theano.gradient.disconnected_grad(states)
prior = self.transition.apply(states)
results = self.proto.apply(inputs=inputs, prior=prior,
n_steps=self.n_steps, batch_size=self.batch_size, **kwargs)
return results[0][-1], results[1][-1]
@application
def cost(self, inputs, **kwargs):
x_hat, z = self.apply(inputs=inputs, **kwargs)
z = theano.gradient.disconnected_grad(z)
prev = self.transition.apply(z)
innovation_error = .01 * diff_abs(z[1:] - prev[:-1]).sum()
x_hat = self.proto.mlp.apply(z)
main_cost = tensor.sqr(inputs - x_hat).sum() + innovation_error
cg = ComputationGraph([main_cost])
weights_normalization = l2_norm_cost(self.proto.mlp, cg, .01)
weights_normalization += l2_norm_cost(self.transition, cg, .01)
costs = main_cost + weights_normalization
return costs, z, x_hat
class TemporalVarComp(BaseRecurrent, Initializable):
def __init__(self, slayer, stransition, clayer, n_steps, batch_size, *args, **kwargs):
'''
Paramters
---------
slayer: `SparseFilter`
states layer, does sparse coding
stransition: `bricks.MLP`
transition function of the sparse coding
stransition: `VarianceComponent`
causes layers, does variance component learning
'''
super(TemporalVarComp, self).__init__(*args, **kwargs)
self.slayer = slayer
self.clayer = clayer
self.n_steps = n_steps
self.batch_size = batch_size
self.stransition = stransition
self.children = [slayer, slayer.mlp, clayer, clayer.mlp, stransition]
@application
def initial_state(self, state_name, batch_size, *args, **kwargs):
if state_name == 'sstates':
return self.slayer.initial_state('states',
batch_size, *args, **kwargs)
if state_name == 'cstates':
return self.clayer.initial_state('states',
batch_size, *args, **kwargs)
def get_dim(self, name):
if name == 'sstates':
return self.slayer.get_dim('states')
elif name == 'cstates':
return self.clayer.get_dim('states')
@recurrent(sequences=['inputs'], states=['sstates', 'cstates'],
outputs=['soutputs', 'sstates', 'coutputs', 'cstates'],
contexts=[])
def apply(self, inputs=None, sstates=None, cstates=None, **kwargs):
""" The outputs of this function are the reconstructed/filtered
version of the input and the coding coefficientes.
The `states` are the coding coefficients.
This recurrent method is the estimation process involved in
filtering.
"""
sprior = theano.gradient.disconnected_grad(sstates)
sprior = self.stransition.apply(sstates)
cprior = theano.gradient.disconnected_grad(cstates)
gamma = self.clayer.get_sparseness(cprior)
sparse_code = self.slayer.apply(inputs=inputs, prior=sprior, gamma=gamma,
n_steps=self.n_steps, batch_size=self.batch_size)
variance_code = self.clayer.apply(prior=cprior, prev_code=sparse_code[1][-1],
n_steps=self.n_steps, batch_size=self.batch_size, **kwargs)
return sparse_code[0][-1], sparse_code[1][-1], variance_code[0][-1], variance_code[1][-1]
@application
def cost(self, inputs, **kwargs):
x_hat, z, gammas, u = self.apply(inputs=inputs, **kwargs)
z = theano.gradient.disconnected_grad(z)
u = theano.gradient.disconnected_grad(z)
prev = self.stransition.apply(z)
innovation_error = .01 * diff_abs(z[1:] - prev[:-1]).sum()
x_hat = self.slayer.mlp.apply(z)
sparseness = (self.clayer.get_sparseness(u) * diff_abs(z)).sum()
main_cost = tensor.sqr(inputs - x_hat).sum() + innovation_error + sparseness
cg = ComputationGraph([main_cost])
weights_normalization = l2_norm_cost(self.slayer.mlp, cg, .01)
weights_normalization += l2_norm_cost(self.stransition, cg, .01)
weights_normalization += l2_norm_cost(self.clayer.mlp, cg, .01)
costs = main_cost + weights_normalization
return costs, z, x_hat, u
class VariationalSparseFilter(SparseFilter):
@lazy(allocation=['dim', 'input_dim', 'batch_size', 'n_steps'])
def __init__(self, mlp, *args, **kwargs):
super(VariationalSparseFilter, self).__init__(*args, **kwargs)
self.mlp = mlp
self.children = [self.mlp, ]
@recurrent(sequences=['noise'], states=['states_mean', 'accum_1_m', 'accum_2_m',
'states_log_sigma', 'accum_1_ls', 'accum_2_ls'],
outputs=['outputs', 'codes', 'states_mean',
'accum_1_m', 'accum_2_m', 'states_log_sigma', 'accum_1_ls', 'accum_2_ls'],
contexts=['inputs', 'prior', 'gamma'])
def apply(self, noise=None, inputs=None,
states_mean=None, states_log_sigma=None, accum_1_m=None,
accum_2_m=None, accum_1_ls=None, accum_2_ls=None, gamma=.1, prior=None):
""" The outputs of this function are the reconstructed/filtered
version of the input and the coding coefficientes.
The `states` are the coding coefficients.
This recurrent method is the estimation process involved in
filtering.
"""
sigma = tensor.exp(states_log_sigma)
z = states_mean + noise * sigma
if prior is not None:
tstates = tensor.dot(z, tensor.eye(self.dim))
cost = .01 * diff_abs(tstates - prior).sum()
else:
cost = 0
tinputs = tensor.dot(inputs, tensor.eye(self.input_dim))
outputs = self.mlp.apply(z) # tensor.dot(z, self.W)
rec_error = tensor.sqr(tinputs - outputs).sum()
l1_mean = diff_abs(states_mean)
l1_sigma = diff_abs(sigma - 1)
l1_norm = (gamma * (l1_mean + l1_sigma)).sum()
cost += rec_error + l1_norm
new_means_stuff = RMSPropStep(cost, states_mean, accum_1_m, accum_2_m)
new_log_sigma_stuff = RMSPropStep(cost, states_log_sigma, accum_1_ls, accum_2_ls)
results = (outputs, z) + new_means_stuff + new_log_sigma_stuff
return results
@application
def initial_state(self, state_name, batch_size, *args, **kwargs):
if state_name in self.apply.states:
dim = self.dim
# zeros = numpy.zeros((self.batch_size, dim))
# return theano.shared(zeros.astype(floatX))
return tensor.zeros((batch_size, dim))
if state_name == 'gamma':
dim = self.get_dim('gamma')
gammas = .1*numpy.ones((self.batch_size, dim))
return theano.shared(gammas.astype(floatX))
return super(VariationalSparseFilter, self).initial_state(state_name,
batch_size, *args, **kwargs)
@application
def cost(self, inputs, noise, gamma=.1, prior=None):
z = self.apply(noise=noise, inputs=inputs, gamma=gamma, prior=prior)[1][-1]
z = theano.gradient.disconnected_grad(z)
x_hat = self.mlp.apply(z) # tensor.dot(z, self.W)
cost = tensor.sqr(inputs - x_hat).sum() # + .01*tensor.sqr(self.W).sum()
cost += l2_norm_cost(self.mlp, ComputationGraph([cost]), .01)
return cost, z, x_hat
|
|
from __future__ import print_function
from collections import namedtuple
import contextlib
import itertools
import math
import sys
from numba.compiler import compile_isolated, Flags
from numba import jit, types
import numba.unittest_support as unittest
from numba import testing
from .support import TestCase, MemoryLeakMixin
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
Point = namedtuple('Point', ('a', 'b'))
def noop(x):
pass
def unbox_usecase(x):
"""
Expect a list of numbers
"""
res = 0
for v in x:
res += v
return res
def unbox_usecase2(x):
"""
Expect a list of tuples
"""
res = 0
for v in x:
res += len(v)
return res
def unbox_usecase3(x):
"""
Expect a (number, list of numbers) tuple.
"""
a, b = x
res = a
for v in b:
res += v
return res
def unbox_usecase4(x):
"""
Expect a (number, list of tuples) tuple.
"""
a, b = x
res = a
for v in b:
res += len(v)
return res
def create_list(x, y, z):
return [x, y, z]
def create_nested_list(x, y, z, a, b, c):
return [[x, y, z], [a, b, c]]
def list_comprehension1():
return sum([x**2 for x in range(10)])
def list_comprehension2():
return sum([x for x in range(10) if x % 2 == 0])
def list_comprehension3():
return sum([math.pow(x, 2) for x in range(10)])
def list_comprehension4():
return sum([x * y for x in range(10) for y in range(10)])
def list_comprehension5():
return [x * 2 for x in range(10)]
def list_comprehension6():
return [[x for x in range(y)] for y in range(3)]
def list_constructor(n):
return list(range(n))
def list_append(n):
l = []
l.append(42)
for i in range(n):
l.append(i)
return l
def list_append_heterogenous(n):
l = []
l.append(42.0)
for i in range(n):
l.append(i)
return l
def list_extend(n):
l = []
# A non-list iterable and a list
l.extend(range(n))
l.extend(l[:-1])
l.extend(range(n, 0, -1))
return l
def list_extend_heterogenous(n):
l = []
# Extend with various iterables, including lists, with different types
l.extend(range(n))
l.extend(l[:-1])
l.extend((5, 42))
l.extend([123.0])
return l
def list_pop0(n):
l = list(range(n))
res = 0
while len(l) > 0:
res += len(l) * l.pop()
return res
def list_pop1(n, i):
l = list(range(n))
x = l.pop(i)
return x, l
def list_len(n):
l = list(range(n))
return len(l)
def list_getitem(n):
l = list(range(n))
res = 0
# Positive indices
for i in range(len(l)):
res += i * l[i]
# Negative indices
for i in range(-len(l), 0):
res -= i * l[i]
return res
def list_setitem(n):
l = list(range(n))
res = 0
# Positive indices
for i in range(len(l)):
l[i] = i * l[i]
# Negative indices
for i in range(-len(l), 0):
l[i] = i * l[i]
for i in range(len(l)):
res += l[i]
return res
def list_getslice2(n, start, stop):
l = list(range(n))
return l[start:stop]
def list_getslice3(n, start, stop, step):
l = list(range(n))
return l[start:stop:step]
def list_setslice2(n, n_source, start, stop):
# Generic setslice with size change
l = list(range(n))
v = list(range(100, 100 + n_source))
l[start:stop] = v
return l
def list_setslice3(n, start, stop, step):
l = list(range(n))
v = l[start:stop:step]
for i in range(len(v)):
v[i] += 100
l[start:stop:step] = v
return l
def list_setslice3_arbitrary(n, n_src, start, stop, step):
l = list(range(n))
l[start:stop:step] = list(range(100, 100 + n_src))
return l
def list_delslice0(n):
l = list(range(n))
del l[:]
return l
def list_delslice1(n, start, stop):
l = list(range(n))
del l[start:]
del l[:stop]
return l
def list_delslice2(n, start, stop):
l = list(range(n))
del l[start:stop]
return l
def list_clear(n):
l = list(range(n))
l.clear()
return l
def list_copy(n):
l = list(range(n))
ll = l.copy()
l.append(42)
return l, ll
def list_iteration(n):
l = list(range(n))
res = 0
for i, v in enumerate(l):
res += i * v
return res
def list_contains(n):
l = list(range(n))
return (0 in l, 1 in l, n - 1 in l, n in l)
def list_index1(n, v):
l = list(range(n, 0, -1))
return l.index(v)
def list_index2(n, v, start):
l = list(range(n, 0, -1))
return l.index(v, start)
def list_index3(n, v, start, stop):
l = list(range(n, 0, -1))
return l.index(v, start, stop)
def list_remove(n, v):
l = list(range(n - 1, -1, -1))
l.remove(v)
return l
def list_insert(n, pos, v):
l = list(range(0, n))
l.insert(pos, v)
return l
def list_count(n, v):
l = []
for x in range(n):
l.append(x & 3)
return l.count(v)
def list_reverse(n):
l = list(range(n))
l.reverse()
return l
def list_add(m, n):
a = list(range(0, m))
b = list(range(100, 100 + n))
res = a + b
res.append(42) # check result is a copy
return a, b, res
def list_add_heterogenous():
a = [1]
b = [2.0]
c = a + b
d = b + a
# check result is a copy
a.append(3)
b.append(4.0)
return a, b, c, d
def list_add_inplace(m, n):
a = list(range(0, m))
b = list(range(100, 100 + n))
a += b
return a, b
def list_add_inplace_heterogenous():
a = [1]
b = [2.0]
a += b
b += a
return a, b
def list_mul(n, v):
a = list(range(n))
return a * v
def list_mul_inplace(n, v):
a = list(range(n))
a *= v
return a
def list_bool(n):
a = list(range(n))
return bool(a), (True if a else False)
def eq_usecase(a, b):
return list(a) == list(b)
def ne_usecase(a, b):
return list(a) != list(b)
def gt_usecase(a, b):
return list(a) > list(b)
def ge_usecase(a, b):
return list(a) >= list(b)
def lt_usecase(a, b):
return list(a) < list(b)
def le_usecase(a, b):
return list(a) <= list(b)
def identity_usecase(n):
a = list(range(n))
b = a
c = a[:]
return (a is b), (a is not b), (a is c), (a is not c)
def bool_list_usecase():
# Exercise getitem, setitem, iteration with bool values (issue #1373)
l = [False]
l[0] = True
x = False
for v in l:
x = x ^ v
return l, x
def reflect_simple(l, ll):
x = l.pop()
y = l.pop()
l[0] = 42.
l.extend(ll)
return l, x, y
def reflect_conditional(l, ll):
# `l` may or may not actually reflect a Python list
if ll[0]:
l = [11., 22., 33., 44.]
x = l.pop()
y = l.pop()
l[0] = 42.
l.extend(ll)
return l, x, y
def reflect_exception(l):
l.append(42)
raise ZeroDivisionError
def reflect_dual(l, ll):
l.append(ll.pop())
return l is ll
class TestLists(MemoryLeakMixin, TestCase):
def test_create_list(self):
pyfunc = create_list
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3), pyfunc(1, 2, 3))
def test_create_nested_list(self):
pyfunc = create_nested_list
with self.assertTypingError():
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3, 4, 5, 6), pyfunc(1, 2, 3, 4, 5, 6))
@testing.allow_interpreter_mode
def test_list_comprehension(self):
list_tests = [list_comprehension1,
list_comprehension2,
list_comprehension3,
list_comprehension4,
list_comprehension5,
list_comprehension6]
for test in list_tests:
pyfunc = test
cr = compile_isolated(pyfunc, ())
cfunc = cr.entry_point
self.assertEqual(cfunc(), pyfunc())
def check_unary_with_size(self, pyfunc, precise=True):
cfunc = jit(nopython=True)(pyfunc)
# Use various sizes, to stress the allocation algorithm
for n in [0, 3, 16, 70, 400]:
eq = self.assertPreciseEqual if precise else self.assertEqual
eq(cfunc(n), pyfunc(n))
def test_constructor(self):
self.check_unary_with_size(list_constructor)
def test_append(self):
self.check_unary_with_size(list_append)
def test_append_heterogenous(self):
self.check_unary_with_size(list_append_heterogenous, precise=False)
def test_extend(self):
self.check_unary_with_size(list_extend)
def test_extend_heterogenous(self):
self.check_unary_with_size(list_extend_heterogenous, precise=False)
def test_pop0(self):
self.check_unary_with_size(list_pop0)
def test_pop1(self):
pyfunc = list_pop1
cfunc = jit(nopython=True)(pyfunc)
for n in [5, 40]:
for i in [0, 1, n - 2, n - 1, -1, -2, -n + 3, -n + 1]:
expected = pyfunc(n, i)
self.assertPreciseEqual(cfunc(n, i), expected)
def test_pop_errors(self):
# XXX References are leaked when an exception is raised
self.disable_leak_check()
cfunc = jit(nopython=True)(list_pop1)
with self.assertRaises(IndexError) as cm:
cfunc(0, 5)
self.assertEqual(str(cm.exception), "pop from empty list")
with self.assertRaises(IndexError) as cm:
cfunc(1, 5)
self.assertEqual(str(cm.exception), "pop index out of range")
def test_insert(self):
pyfunc = list_insert
cfunc = jit(nopython=True)(pyfunc)
for n in [5, 40]:
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for i in indices:
expected = pyfunc(n, i, 42)
self.assertPreciseEqual(cfunc(n, i, 42), expected)
def test_len(self):
self.check_unary_with_size(list_len)
def test_getitem(self):
self.check_unary_with_size(list_getitem)
def test_setitem(self):
self.check_unary_with_size(list_setitem)
def check_slicing2(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
sizes = [5, 40]
for n in sizes:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
for start, stop in itertools.product(indices, indices):
expected = pyfunc(n, start, stop)
self.assertPreciseEqual(cfunc(n, start, stop), expected)
def test_getslice2(self):
self.check_slicing2(list_getslice2)
def test_setslice2(self):
pyfunc = list_setslice2
cfunc = jit(nopython=True)(pyfunc)
sizes = [5, 40]
for n, n_src in itertools.product(sizes, sizes):
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
for start, stop in itertools.product(indices, indices):
expected = pyfunc(n, n_src, start, stop)
self.assertPreciseEqual(cfunc(n, n_src, start, stop), expected)
def test_getslice3(self):
pyfunc = list_getslice3
cfunc = jit(nopython=True)(pyfunc)
for n in [10]:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
steps = [4, 1, -1, 2, -3]
for start, stop, step in itertools.product(indices, indices, steps):
expected = pyfunc(n, start, stop, step)
self.assertPreciseEqual(cfunc(n, start, stop, step), expected)
def test_setslice3(self):
pyfunc = list_setslice3
cfunc = jit(nopython=True)(pyfunc)
for n in [10]:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
steps = [4, 1, -1, 2, -3]
for start, stop, step in itertools.product(indices, indices, steps):
expected = pyfunc(n, start, stop, step)
self.assertPreciseEqual(cfunc(n, start, stop, step), expected)
def test_setslice3_resize(self):
# XXX References are leaked when an exception is raised
self.disable_leak_check()
pyfunc = list_setslice3_arbitrary
cfunc = jit(nopython=True)(pyfunc)
# step == 1 => can resize
cfunc(5, 10, 0, 2, 1)
# step != 1 => cannot resize
with self.assertRaises(ValueError) as cm:
cfunc(5, 100, 0, 3, 2)
self.assertIn("cannot resize", str(cm.exception))
def test_delslice0(self):
self.check_unary_with_size(list_delslice0)
def test_delslice1(self):
self.check_slicing2(list_delslice1)
def test_delslice2(self):
self.check_slicing2(list_delslice2)
def test_invalid_slice(self):
self.disable_leak_check()
pyfunc = list_getslice3
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(ValueError) as cm:
cfunc(10, 1, 2, 0)
self.assertEqual(str(cm.exception), "slice step cannot be zero")
def test_iteration(self):
self.check_unary_with_size(list_iteration)
def test_reverse(self):
self.check_unary_with_size(list_reverse)
def test_contains(self):
self.check_unary_with_size(list_contains)
def check_index_result(self, pyfunc, cfunc, args):
try:
expected = pyfunc(*args)
except ValueError:
with self.assertRaises(ValueError):
cfunc(*args)
else:
self.assertPreciseEqual(cfunc(*args), expected)
def test_index1(self):
self.disable_leak_check()
pyfunc = list_index1
cfunc = jit(nopython=True)(pyfunc)
for v in (0, 1, 5, 10, 99999999):
self.check_index_result(pyfunc, cfunc, (16, v))
def test_index2(self):
self.disable_leak_check()
pyfunc = list_index2
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 10, 99999999):
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for start in indices:
self.check_index_result(pyfunc, cfunc, (16, v, start))
def test_index3(self):
self.disable_leak_check()
pyfunc = list_index3
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 10, 99999999):
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for start, stop in itertools.product(indices, indices):
self.check_index_result(pyfunc, cfunc, (16, v, start, stop))
def test_remove(self):
pyfunc = list_remove
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 15):
expected = pyfunc(n, v)
self.assertPreciseEqual(cfunc(n, v), expected)
def test_remove_error(self):
self.disable_leak_check()
pyfunc = list_remove
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(ValueError) as cm:
cfunc(10, 42)
self.assertEqual(str(cm.exception), "list.remove(x): x not in list")
def test_count(self):
pyfunc = list_count
cfunc = jit(nopython=True)(pyfunc)
for v in range(5):
self.assertPreciseEqual(cfunc(18, v), pyfunc(18, v))
@unittest.skipUnless(sys.version_info >= (3, 3),
"list.clear() needs Python 3.3+")
def test_clear(self):
self.check_unary_with_size(list_clear)
@unittest.skipUnless(sys.version_info >= (3, 3),
"list.copy() needs Python 3.3+")
def test_copy(self):
self.check_unary_with_size(list_copy)
def check_add(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
sizes = [0, 3, 50, 300]
for m, n in itertools.product(sizes, sizes):
expected = pyfunc(m, n)
self.assertPreciseEqual(cfunc(m, n), expected)
def test_add(self):
self.check_add(list_add)
def test_add_heterogenous(self):
pyfunc = list_add_heterogenous
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
self.assertEqual(cfunc(), expected)
def test_add_inplace(self):
self.check_add(list_add_inplace)
def test_add_inplace_heterogenous(self):
pyfunc = list_add_inplace_heterogenous
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
self.assertEqual(cfunc(), expected)
def check_mul(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
for n in [0, 3, 50, 300]:
for v in [1, 2, 3, 0, -1, -42]:
expected = pyfunc(n, v)
self.assertPreciseEqual(cfunc(n, v), expected)
def test_mul(self):
self.check_mul(list_mul)
def test_mul_inplace(self):
self.check_mul(list_mul_inplace)
@unittest.skipUnless(sys.maxsize >= 2**32,
"need a 64-bit system to test for MemoryError")
def test_mul_error(self):
self.disable_leak_check()
pyfunc = list_mul
cfunc = jit(nopython=True)(pyfunc)
# Fail in malloc()
with self.assertRaises(MemoryError):
cfunc(1, 2**58)
# Overflow size computation when multiplying by item size
with self.assertRaises(MemoryError):
cfunc(1, 2**62)
def test_bool(self):
pyfunc = list_bool
cfunc = jit(nopython=True)(pyfunc)
for n in [0, 1, 3]:
expected = pyfunc(n)
self.assertPreciseEqual(cfunc(n), expected)
def test_list_passing(self):
# Check one can pass a list from a Numba function to another
@jit(nopython=True)
def inner(lst):
return len(lst), lst[-1]
@jit(nopython=True)
def outer(n):
l = list(range(n))
return inner(l)
self.assertPreciseEqual(outer(5), (5, 4))
def _test_compare(self, pyfunc):
def eq(args):
self.assertIs(cfunc(*args), pyfunc(*args),
"mismatch for arguments %s" % (args,))
cfunc = jit(nopython=True)(pyfunc)
eq(((1, 2), (1, 2)))
eq(((1, 2, 3), (1, 2)))
eq(((1, 2), (1, 2, 3)))
eq(((1, 2, 4), (1, 2, 3)))
eq(((1.0, 2.0, 3.0), (1, 2, 3)))
eq(((1.0, 2.0, 3.5), (1, 2, 3)))
def test_eq(self):
self._test_compare(eq_usecase)
def test_ne(self):
self._test_compare(ne_usecase)
def test_le(self):
self._test_compare(le_usecase)
def test_lt(self):
self._test_compare(lt_usecase)
def test_ge(self):
self._test_compare(ge_usecase)
def test_gt(self):
self._test_compare(gt_usecase)
def test_identity(self):
pyfunc = identity_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(3), pyfunc(3))
def test_bool_list(self):
# Check lists of bools compile and run successfully
pyfunc = bool_list_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
class TestUnboxing(MemoryLeakMixin, TestCase):
"""
Test unboxing of Python lists into native Numba lists.
"""
@contextlib.contextmanager
def assert_type_error(self, msg):
with self.assertRaises(TypeError) as raises:
yield
if msg is not None:
self.assertIn(msg, str(raises.exception))
def check_unary(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
expected = pyfunc(arg)
got = cfunc(arg)
self.assertPreciseEqual(got, expected)
return check
def test_numbers(self):
check = self.check_unary(unbox_usecase)
check([1, 2])
check([1j, 2.5])
def test_tuples(self):
check = self.check_unary(unbox_usecase2)
check([(1, 2), (3, 4)])
check([(1, 2j), (3, 4j)])
check([(), (), ()])
def test_list_inside_tuple(self):
check = self.check_unary(unbox_usecase3)
check((1, [2, 3, 4]))
def test_list_of_tuples_inside_tuple(self):
check = self.check_unary(unbox_usecase4)
check((1, [(2,), (3,)]))
def test_errors(self):
# See #1545: error checking should ensure the list is homogenous
msg = "can't convert complex to int"
pyfunc = noop
cfunc = jit(nopython=True)(pyfunc)
with self.assert_type_error(msg):
cfunc([1, 2j])
# Same when the list is nested in a tuple or namedtuple
with self.assert_type_error(msg):
cfunc((1, [1, 2j]))
with self.assert_type_error(msg):
cfunc(Point(1, [1, 2j]))
class TestListReflection(MemoryLeakMixin, TestCase):
"""
Test reflection of native Numba lists on Python list objects.
"""
def check_reflection(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
samples = [([1., 2., 3., 4.], [0.]),
([1., 2., 3., 4.], [5., 6., 7., 8., 9.]),
]
for dest, src in samples:
expected = list(dest)
got = list(dest)
pyres = pyfunc(expected, src)
with self.assertRefCount(got, src):
cres = cfunc(got, src)
self.assertPreciseEqual(cres, pyres)
self.assertPreciseEqual(expected, got)
self.assertEqual(pyres[0] is expected, cres[0] is got)
del pyres, cres
def test_reflect_simple(self):
self.check_reflection(reflect_simple)
def test_reflect_conditional(self):
self.check_reflection(reflect_conditional)
def test_reflect_exception(self):
"""
When the function exits with an exception, lists should still be
reflected.
"""
pyfunc = reflect_exception
cfunc = jit(nopython=True)(pyfunc)
l = [1, 2, 3]
with self.assertRefCount(l):
with self.assertRaises(ZeroDivisionError):
cfunc(l)
self.assertPreciseEqual(l, [1, 2, 3, 42])
def test_reflect_same_list(self):
"""
When the same list object is reflected twice, behaviour should
be consistent.
"""
pyfunc = reflect_dual
cfunc = jit(nopython=True)(pyfunc)
pylist = [1, 2, 3]
clist = pylist[:]
expected = pyfunc(pylist, pylist)
got = cfunc(clist, clist)
self.assertPreciseEqual(expected, got)
self.assertPreciseEqual(pylist, clist)
self.assertPreciseEqual(sys.getrefcount(pylist), sys.getrefcount(clist))
if __name__ == '__main__':
unittest.main()
|
|
import logging
import os
import salt.modules.cmdmod as cmdmod
import salt.modules.pkg_resource as pkg_resource
import salt.modules.rpm_lowpkg as rpm
import salt.modules.yumpkg as yumpkg
import salt.utils.platform
from salt.exceptions import CommandExecutionError, SaltInvocationError
from tests.support.mock import MagicMock, Mock, call, patch
try:
import pytest
except ImportError:
pytest = None
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def list_repos_var():
return {
"base": {
"file": "/etc/yum.repos.d/CentOS-Base.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"mirrorlist": "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra",
"name": "CentOS-$releasever - Base",
},
"base-source": {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"enabled": "0",
"file": "/etc/yum.repos.d/CentOS-Sources.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
},
"updates": {
"file": "/etc/yum.repos.d/CentOS-Base.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"mirrorlist": "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra",
"name": "CentOS-$releasever - Updates",
},
"updates-source": {
"baseurl": "http://vault.centos.org/centos/$releasever/updates/Source/",
"enabled": "0",
"file": "/etc/yum.repos.d/CentOS-Sources.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Updates Sources",
},
}
@pytest.fixture
def configure_loader_modules():
return {
yumpkg: {
"__context__": {"yum_bin": "yum"},
"__grains__": {
"osarch": "x86_64",
"os": "CentOS",
"os_family": "RedHat",
"osmajorrelease": 7,
},
},
pkg_resource: {},
}
def test_list_pkgs():
"""
Test packages listing.
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471",
"alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475",
"gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477",
"rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477",
"pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478",
"yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479",
"lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479",
"qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480",
"ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480",
"shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481",
"util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484",
"openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
):
pkgs = yumpkg.list_pkgs(versions_as_list=True)
for pkg_name, pkg_version in {
"python-urlgrabber": "3.10-8.el7",
"alsa-lib": "1.1.1-1.el7",
"gnupg2": "2.0.22-4.el7",
"rpm-python": "4.11.3-21.el7",
"pygpgme": "0.3-9.el7",
"yum": "3.4.3-150.el7.centos",
"lzo": "2.06-8.el7",
"qrencode-libs": "3.4.1-3.el7",
"ustr": "1.0.4-16.el7",
"shadow-utils": "2:4.1.5.1-24.el7",
"util-linux": "2.23.2-33.el7",
"openssh": "6.6.1p1-33.el7_3",
"virt-what": "1.13-8.el7",
}.items():
assert pkgs.get(pkg_name) is not None
assert pkgs[pkg_name] == [pkg_version]
def test_list_pkgs_no_context():
"""
Test packages listing.
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471",
"alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475",
"gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477",
"rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477",
"pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478",
"yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479",
"lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479",
"qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480",
"ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480",
"shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481",
"util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484",
"openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
), patch.object(
yumpkg, "_list_pkgs_from_context"
) as list_pkgs_context_mock:
pkgs = yumpkg.list_pkgs(versions_as_list=True, use_context=False)
list_pkgs_context_mock.assert_not_called()
list_pkgs_context_mock.reset_mock()
pkgs = yumpkg.list_pkgs(versions_as_list=True, use_context=False)
list_pkgs_context_mock.assert_not_called()
list_pkgs_context_mock.reset_mock()
def test_list_pkgs_with_attr():
"""
Test packages listing with the attr parameter
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471",
"alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475",
"gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477",
"rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477",
"pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478",
"yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479",
"lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479",
"qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480",
"ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480",
"shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481",
"util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484",
"openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
):
pkgs = yumpkg.list_pkgs(
attr=["epoch", "release", "arch", "install_date_time_t"]
)
for pkg_name, pkg_attr in {
"python-urlgrabber": {
"version": "3.10",
"release": "8.el7",
"arch": "noarch",
"install_date_time_t": 1487838471,
"epoch": None,
},
"alsa-lib": {
"version": "1.1.1",
"release": "1.el7",
"arch": "x86_64",
"install_date_time_t": 1487838475,
"epoch": None,
},
"gnupg2": {
"version": "2.0.22",
"release": "4.el7",
"arch": "x86_64",
"install_date_time_t": 1487838477,
"epoch": None,
},
"rpm-python": {
"version": "4.11.3",
"release": "21.el7",
"arch": "x86_64",
"install_date_time_t": 1487838477,
"epoch": None,
},
"pygpgme": {
"version": "0.3",
"release": "9.el7",
"arch": "x86_64",
"install_date_time_t": 1487838478,
"epoch": None,
},
"yum": {
"version": "3.4.3",
"release": "150.el7.centos",
"arch": "noarch",
"install_date_time_t": 1487838479,
"epoch": None,
},
"lzo": {
"version": "2.06",
"release": "8.el7",
"arch": "x86_64",
"install_date_time_t": 1487838479,
"epoch": None,
},
"qrencode-libs": {
"version": "3.4.1",
"release": "3.el7",
"arch": "x86_64",
"install_date_time_t": 1487838480,
"epoch": None,
},
"ustr": {
"version": "1.0.4",
"release": "16.el7",
"arch": "x86_64",
"install_date_time_t": 1487838480,
"epoch": None,
},
"shadow-utils": {
"epoch": "2",
"version": "4.1.5.1",
"release": "24.el7",
"arch": "x86_64",
"install_date_time_t": 1487838481,
},
"util-linux": {
"version": "2.23.2",
"release": "33.el7",
"arch": "x86_64",
"install_date_time_t": 1487838484,
"epoch": None,
},
"openssh": {
"version": "6.6.1p1",
"release": "33.el7_3",
"arch": "x86_64",
"install_date_time_t": 1487838485,
"epoch": None,
},
"virt-what": {
"version": "1.13",
"release": "8.el7",
"install_date_time_t": 1487838486,
"arch": "x86_64",
"epoch": None,
},
}.items():
assert pkgs.get(pkg_name) is not None
assert pkgs[pkg_name] == [pkg_attr]
def test_list_pkgs_with_attr_multiple_versions():
"""
Test packages listing with the attr parameter reporting multiple version installed
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"glibc_|-(none)_|-2.12_|-1.212.el6_|-i686_|-(none)_|-1542394210"
"glibc_|-(none)_|-2.12_|-1.212.el6_|-x86_64_|-(none)_|-1542394204",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
"virt-what_|-(none)_|-1.10_|-2.el7_|-x86_64_|-(none)_|-1387838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
):
pkgs = yumpkg.list_pkgs(
attr=["epoch", "release", "arch", "install_date_time_t"]
)
expected_pkg_list = {
"glibc": [
{
"version": "2.12",
"release": "1.212.el6",
"install_date_time_t": 1542394210,
"arch": "i686",
"epoch": None,
},
{
"version": "2.12",
"release": "1.212.el6",
"install_date_time_t": 1542394204,
"arch": "x86_64",
"epoch": None,
},
],
"virt-what": [
{
"version": "1.10",
"release": "2.el7",
"install_date_time_t": 1387838486,
"arch": "x86_64",
"epoch": None,
},
{
"version": "1.13",
"release": "8.el7",
"install_date_time_t": 1487838486,
"arch": "x86_64",
"epoch": None,
},
],
}
for pkgname, pkginfo in pkgs.items():
assert pkginfo == expected_pkg_list[pkgname]
assert len(pkginfo) == len(expected_pkg_list[pkgname])
def test_list_patches():
"""
Test patches listing.
:return:
"""
yum_out = [
"i my-fake-patch-not-installed-1234 recommended "
" spacewalk-usix-2.7.5.2-2.2.noarch",
" my-fake-patch-not-installed-1234 recommended "
" spacewalksd-5.0.26.2-21.2.x86_64",
"i my-fake-patch-not-installed-1234 recommended "
" suseRegisterInfo-3.1.1-18.2.x86_64",
"i my-fake-patch-installed-1234 recommended "
" my-package-one-1.1-0.1.x86_64",
"i my-fake-patch-installed-1234 recommended "
" my-package-two-1.1-0.1.x86_64",
]
expected_patches = {
"my-fake-patch-not-installed-1234": {
"installed": False,
"summary": [
"spacewalk-usix-2.7.5.2-2.2.noarch",
"spacewalksd-5.0.26.2-21.2.x86_64",
"suseRegisterInfo-3.1.1-18.2.x86_64",
],
},
"my-fake-patch-installed-1234": {
"installed": True,
"summary": [
"my-package-one-1.1-0.1.x86_64",
"my-package-two-1.1-0.1.x86_64",
],
},
}
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run_stdout": MagicMock(return_value=os.linesep.join(yum_out))},
):
patches = yumpkg.list_patches()
assert patches["my-fake-patch-not-installed-1234"]["installed"] is False
assert len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3
for _patch in expected_patches["my-fake-patch-not-installed-1234"]["summary"]:
assert _patch in patches["my-fake-patch-not-installed-1234"]["summary"]
assert patches["my-fake-patch-installed-1234"]["installed"] is True
assert len(patches["my-fake-patch-installed-1234"]["summary"]) == 2
for _patch in expected_patches["my-fake-patch-installed-1234"]["summary"]:
assert _patch in patches["my-fake-patch-installed-1234"]["summary"]
def test_latest_version_with_options():
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.latest_version("foo", refresh=False, fromrepo="good", branch="foo")
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"list",
"available",
"foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.latest_version(
"foo",
refresh=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"available",
"foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
# without fromrepo, but within the scope
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch("salt.utils.systemd.has_scope", MagicMock(return_value=True)):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=True)},
):
yumpkg.latest_version(
"foo",
refresh=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
cmd.assert_called_once_with(
[
"systemd-run",
"--scope",
"yum",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"available",
"foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
def test_list_repo_pkgs_with_options(list_repos_var):
"""
Test list_repo_pkgs with and without fromrepo
NOTE: mock_calls is a stack. The most recent call is indexed
with 0, while the first call would have the highest index.
"""
really_old_yum = MagicMock(return_value="3.2.0")
older_yum = MagicMock(return_value="3.4.0")
newer_yum = MagicMock(return_value="3.4.5")
list_repos_mock = MagicMock(return_value=list_repos_var)
kwargs = {
"output_loglevel": "trace",
"ignore_retcode": True,
"python_shell": False,
"env": {},
}
with patch.object(yumpkg, "list_repos", list_repos_mock):
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {"cmd.run": really_old_yum}):
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs("foo")
# We should have called cmd.run_all twice
assert len(cmd.mock_calls) == 2
# Check args from first call
assert cmd.mock_calls[1][1] == (
["yum", "--quiet", "list", "available"],
)
# Check kwargs from first call
assert cmd.mock_calls[1][2] == kwargs
# Check args from second call
assert cmd.mock_calls[0][1] == (
["yum", "--quiet", "list", "installed"],
)
# Check kwargs from second call
assert cmd.mock_calls[0][2] == kwargs
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {"cmd.run": older_yum}):
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs("foo")
# We should have called cmd.run_all twice
assert len(cmd.mock_calls) == 2
# Check args from first call
assert cmd.mock_calls[1][1] == (
["yum", "--quiet", "--showduplicates", "list", "available"],
)
# Check kwargs from first call
assert cmd.mock_calls[1][2] == kwargs
# Check args from second call
assert cmd.mock_calls[0][1] == (
["yum", "--quiet", "--showduplicates", "list", "installed"],
)
# Check kwargs from second call
assert cmd.mock_calls[0][2] == kwargs
# Test with newer yum. We should run one yum command per repo, so
# fromrepo would limit how many calls we make.
with patch.dict(yumpkg.__salt__, {"cmd.run": newer_yum}):
# When fromrepo is used, we would only run one yum command, for
# that specific repo.
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs("foo", fromrepo="base")
# We should have called cmd.run_all once
assert len(cmd.mock_calls) == 1
# Check args
assert cmd.mock_calls[0][1] == (
[
"yum",
"--quiet",
"--showduplicates",
"repository-packages",
"base",
"list",
"foo",
],
)
# Check kwargs
assert cmd.mock_calls[0][2] == kwargs
# Test enabling base-source and disabling updates. We should
# get two calls, one for each enabled repo. Because dict
# iteration order will vary, different Python versions will be
# do them in different orders, which is OK, but it will just
# mean that we will have to check both the first and second
# mock call both times.
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs(
"foo", enablerepo="base-source", disablerepo="updates"
)
# We should have called cmd.run_all twice
assert len(cmd.mock_calls) == 2
for repo in ("base", "base-source"):
for index in (0, 1):
try:
# Check args
assert cmd.mock_calls[index][1] == (
[
"yum",
"--quiet",
"--showduplicates",
"repository-packages",
repo,
"list",
"foo",
],
)
# Check kwargs
assert cmd.mock_calls[index][2] == kwargs
break
except AssertionError:
continue
else:
pytest.fail("repo '{}' not checked".format(repo))
def test_list_upgrades_dnf():
"""
The subcommand should be "upgrades" with dnf
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(refresh=False, fromrepo="good", branch="foo")
cmd.assert_called_once_with(
[
"dnf",
"--quiet",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"list",
"upgrades",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(
refresh=False, enablerepo="good", disablerepo="bad", branch="foo"
)
cmd.assert_called_once_with(
[
"dnf",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"upgrades",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
def test_list_upgrades_yum():
"""
The subcommand should be "updates" with yum
"""
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(refresh=False, fromrepo="good", branch="foo")
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"list",
"updates",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(
refresh=False, enablerepo="good", disablerepo="bad", branch="foo"
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"updates",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
def test_refresh_db_with_options():
with patch("salt.utils.pkg.clear_rtag", Mock()):
# With check_update=True we will do a cmd.run to run the clean_cmd, and
# then a separate cmd.retcode to check for updates.
# with fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(check_update=True, fromrepo="good", branch="foo")
assert yum_call.call_count == 2
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"check-update",
"--setopt=autocheck_running_kernel=false",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
],
output_loglevel="trace",
env={},
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(
check_update=True,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
assert yum_call.call_count == 2
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"check-update",
"--setopt=autocheck_running_kernel=false",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
],
output_loglevel="trace",
env={},
ignore_retcode=True,
python_shell=False,
)
# With check_update=False we will just do a cmd.run for the clean_cmd
# with fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(check_update=False, fromrepo="good", branch="foo")
assert yum_call.call_count == 1
yum_call.assert_called_once_with(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(
check_update=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
assert yum_call.call_count == 1
yum_call.assert_called_once_with(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
def test_install_with_options():
parse_targets = MagicMock(return_value=({"foo": None}, "repository"))
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"pkg_resource.parse_targets": parse_targets}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.install(
refresh=False,
fromrepo="good",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"-y",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"install",
"foo",
],
env={},
output_loglevel="trace",
python_shell=False,
ignore_retcode=False,
redirect_stderr=True,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.install(
refresh=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"-y",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"install",
"foo",
],
env={},
output_loglevel="trace",
python_shell=False,
ignore_retcode=False,
redirect_stderr=True,
)
def test_remove_with_epoch():
"""
Tests that we properly identify a version containing an epoch for
deinstallation.
You can deinstall pkgs only without the epoch if no arch is provided:
.. code-block:: bash
yum remove PackageKit-yum-1.1.10-2.el7.centos
"""
name = "foo"
installed = "8:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [installed] if kwargs.get("versions_as_list", False) else installed
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: installed}, "repository")
),
}
full_pkg_string = "-".join((name, installed[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
expected = ["yum", "-y", "remove", full_pkg_string]
yumpkg.remove(name)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
def test_remove_with_epoch_and_arch_info():
"""
Tests that we properly identify a version containing an epoch and arch
deinstallation.
You can deinstall pkgs with or without epoch in combination with the arch.
Here we test for the absence of the epoch, but the presence for the arch:
.. code-block:: bash
yum remove PackageKit-yum-1.1.10-2.el7.centos.x86_64
"""
arch = "x86_64"
name = "foo"
name_and_arch = name + "." + arch
installed = "8:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name_and_arch: [installed]
if kwargs.get("versions_as_list", False)
else installed
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name_and_arch: installed}, "repository")
),
}
full_pkg_string = "-".join((name, installed[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
expected = ["yum", "-y", "remove", full_pkg_string + "." + arch]
yumpkg.remove(name)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
def test_remove_with_wildcard():
"""
Tests that we properly identify a version containing an epoch for
deinstallation.
You can deinstall pkgs only without the epoch if no arch is provided:
.. code-block:: bash
yum remove foo*
yum remove pkgs='[{"foo*": "8:3.8.12-4.n.el7"}]'
"""
name = "foobarpkg"
installed = "8:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [installed] if kwargs.get("versions_as_list", False) else installed
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: installed}, "repository")
),
}
full_pkg_string = "-".join((name, installed[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
expected = ["yum", "-y", "remove", full_pkg_string]
yumpkg.remove("foo*")
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
expected = ["yum", "-y", "remove", full_pkg_string]
yumpkg.remove(pkgs=[{"foo*": "8:3.8.12-4.n.el7"}])
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
def test_install_with_epoch():
"""
Tests that we properly identify a version containing an epoch as an
upgrade instead of a downgrade.
"""
name = "foo"
old = "8:3.8.12-6.n.el7"
new = "9:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [old] if kwargs.get("versions_as_list", False) else old
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: new}, "repository")
),
}
full_pkg_string = "-".join((name, new[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
# Test yum
expected = ["yum", "-y", "install", full_pkg_string]
with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}
):
yumpkg.install("foo", version=new)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
# Test dnf
expected = [
"dnf",
"-y",
"--best",
"--allowerasing",
"install",
full_pkg_string,
]
yumpkg.__context__.pop("yum_bin")
cmd_mock.reset_mock()
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 27}
):
yumpkg.install("foo", version=new)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
@pytest.mark.skipif(not salt.utils.platform.is_linux(), reason="Only run on Linux")
def test_install_error_reporting():
"""
Tests that we properly report yum/dnf errors.
"""
name = "foo"
old = "8:3.8.12-6.n.el7"
new = "9:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [old] if kwargs.get("versions_as_list", False) else old
}
)
salt_mock = {
"cmd.run_all": cmdmod.run_all,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: new}, "repository")
),
}
full_pkg_string = "-".join((name, new[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock), patch.object(
yumpkg, "_yum", MagicMock(return_value="cat")
):
expected = {
"changes": {},
"errors": [
"cat: invalid option -- 'y'\nTry 'cat --help' for more information."
],
}
with pytest.raises(CommandExecutionError) as exc_info:
yumpkg.install("foo", version=new)
assert exc_info.value.info == expected, exc_info.value.info
def test_remove_not_installed():
"""
Tests that no exception raised on removing not installed package
"""
name = "foo"
list_pkgs_mock = MagicMock(return_value={})
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: None}, "repository")
),
}
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
# Test yum
with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}
):
yumpkg.remove(name)
cmd_mock.assert_not_called()
# Test dnf
yumpkg.__context__.pop("yum_bin")
cmd_mock.reset_mock()
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 27}
):
yumpkg.remove(name)
cmd_mock.assert_not_called()
def test_upgrade_with_options():
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.upgrade(
refresh=False,
fromrepo="good",
exclude="kernel*",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"-y",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"--exclude=kernel*",
"upgrade",
],
env={},
output_loglevel="trace",
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.upgrade(
refresh=False,
enablerepo="good",
disablerepo="bad",
exclude="kernel*",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"-y",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"--exclude=kernel*",
"upgrade",
],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_info_installed_with_all_versions():
"""
Test the return information of all versions for the named package(s), installed on the system.
:return:
"""
run_out = {
"virgo-dummy": [
{
"build_date": "2015-07-09T10:55:19Z",
"vendor": "openSUSE Build Service",
"description": (
"This is the Virgo dummy package used for testing SUSE Manager"
),
"license": "GPL-2.0",
"build_host": "sheep05",
"url": "http://www.suse.com",
"build_date_time_t": 1436432119,
"relocations": "(not relocatable)",
"source_rpm": "virgo-dummy-1.0-1.1.src.rpm",
"install_date": "2016-02-23T16:31:57Z",
"install_date_time_t": 1456241517,
"summary": "Virgo dummy package",
"version": "1.0",
"signature": (
"DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9"
),
"release": "1.1",
"group": "Applications/System",
"arch": "i686",
"size": "17992",
},
{
"build_date": "2015-07-09T10:15:19Z",
"vendor": "openSUSE Build Service",
"description": (
"This is the Virgo dummy package used for testing SUSE Manager"
),
"license": "GPL-2.0",
"build_host": "sheep05",
"url": "http://www.suse.com",
"build_date_time_t": 1436432119,
"relocations": "(not relocatable)",
"source_rpm": "virgo-dummy-1.0-1.1.src.rpm",
"install_date": "2016-02-23T16:31:57Z",
"install_date_time_t": 14562415127,
"summary": "Virgo dummy package",
"version": "1.0",
"signature": (
"DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9"
),
"release": "1.1",
"group": "Applications/System",
"arch": "x86_64",
"size": "13124",
},
],
"libopenssl1_0_0": [
{
"build_date": "2015-11-04T23:20:34Z",
"vendor": "SUSE LLC <https://www.suse.com/>",
"description": "The OpenSSL Project is a collaborative effort.",
"license": "OpenSSL",
"build_host": "sheep11",
"url": "https://www.openssl.org/",
"build_date_time_t": 1446675634,
"relocations": "(not relocatable)",
"source_rpm": "openssl-1.0.1i-34.1.src.rpm",
"install_date": "2016-02-23T16:31:35Z",
"install_date_time_t": 1456241495,
"summary": "Secure Sockets and Transport Layer Security",
"version": "1.0.1i",
"signature": (
"RSA/SHA256, Wed Nov 4 22:21:34 2015, Key ID 70af9e8139db7c82"
),
"release": "34.1",
"group": "Productivity/Networking/Security",
"packager": "https://www.suse.com/",
"arch": "x86_64",
"size": "2576912",
}
],
}
with patch.dict(yumpkg.__salt__, {"lowpkg.info": MagicMock(return_value=run_out)}):
installed = yumpkg.info_installed(all_versions=True)
# Test overall products length
assert len(installed) == 2
# Test multiple versions for the same package
for pkg_name, pkg_info_list in installed.items():
assert len(pkg_info_list) == 2 if pkg_name == "virgo-dummy" else 1
for info in pkg_info_list:
assert info["arch"] in ("x86_64", "i686")
def test_pkg_hold_yum():
"""
Tests that we properly identify versionlock plugin when using yum
for RHEL/CentOS 7 and Fedora < 22
"""
# Test RHEL/CentOS 7
list_pkgs_mock = {
"yum-plugin-versionlock": "0:1.0.0-0.n.el7",
"yum-versionlock": "0:1.0.0-0.n.el7",
}
cmd = MagicMock(return_value={"retcode": 0})
with patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(yumpkg, "list_holds", MagicMock(return_value=[])), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["yum", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
# Test Fedora 20
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 20}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["yum", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_pkg_hold_tdnf():
"""
Tests that we raise a SaltInvocationError if we try to use
hold-related functions on Photon OS.
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "tdnf"}):
with pytest.raises(SaltInvocationError) as exc_info:
yumpkg.hold("foo")
def test_pkg_hold_dnf():
"""
Tests that we properly identify versionlock plugin when using dnf
for RHEL/CentOS 8 and Fedora >= 22
"""
# Test RHEL/CentOS 8
list_pkgs_mock = {
"python2-dnf-plugin-versionlock": "0:1.0.0-0.n.el8",
"python3-dnf-plugin-versionlock": "0:1.0.0-0.n.el8",
}
yumpkg.__context__.pop("yum_bin")
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"osmajorrelease": 8}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["dnf", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
# Test Fedora 26+
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 26}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["dnf", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
# Test Fedora 22-25
list_pkgs_mock = {
"python-dnf-plugins-extras-versionlock": "0:1.0.0-0.n.el8",
"python3-dnf-plugins-extras-versionlock": "0:1.0.0-0.n.el8",
}
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 25}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["dnf", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
@pytest.mark.skipif(not yumpkg.HAS_YUM, reason="Could not import yum")
def test_yum_base_error():
with patch("yum.YumBase") as mock_yum_yumbase:
mock_yum_yumbase.side_effect = CommandExecutionError
with pytest.raises(CommandExecutionError):
yumpkg._get_yum_config()
def test_group_info():
"""
Test yumpkg.group_info parsing
"""
expected = {
"conditional": [],
"default": ["qgnomeplatform", "xdg-desktop-portal-gtk"],
"description": (
"GNOME is a highly intuitive and user friendly desktop environment."
),
"group": "GNOME",
"id": "gnome-desktop",
"mandatory": [
"NetworkManager-libreswan-gnome",
"PackageKit-command-not-found",
"PackageKit-gtk3-module",
"abrt-desktop",
"at-spi2-atk",
"at-spi2-core",
"avahi",
"baobab",
"caribou",
"caribou-gtk2-module",
"caribou-gtk3-module",
"cheese",
"chrome-gnome-shell",
"compat-cheese314",
"control-center",
"dconf",
"empathy",
"eog",
"evince",
"evince-nautilus",
"file-roller",
"file-roller-nautilus",
"firewall-config",
"firstboot",
"fprintd-pam",
"gdm",
"gedit",
"glib-networking",
"gnome-bluetooth",
"gnome-boxes",
"gnome-calculator",
"gnome-classic-session",
"gnome-clocks",
"gnome-color-manager",
"gnome-contacts",
"gnome-dictionary",
"gnome-disk-utility",
"gnome-font-viewer",
"gnome-getting-started-docs",
"gnome-icon-theme",
"gnome-icon-theme-extras",
"gnome-icon-theme-symbolic",
"gnome-initial-setup",
"gnome-packagekit",
"gnome-packagekit-updater",
"gnome-screenshot",
"gnome-session",
"gnome-session-xsession",
"gnome-settings-daemon",
"gnome-shell",
"gnome-software",
"gnome-system-log",
"gnome-system-monitor",
"gnome-terminal",
"gnome-terminal-nautilus",
"gnome-themes-standard",
"gnome-tweak-tool",
"gnome-user-docs",
"gnome-weather",
"gucharmap",
"gvfs-afc",
"gvfs-afp",
"gvfs-archive",
"gvfs-fuse",
"gvfs-goa",
"gvfs-gphoto2",
"gvfs-mtp",
"gvfs-smb",
"initial-setup-gui",
"libcanberra-gtk2",
"libcanberra-gtk3",
"libproxy-mozjs",
"librsvg2",
"libsane-hpaio",
"metacity",
"mousetweaks",
"nautilus",
"nautilus-sendto",
"nm-connection-editor",
"orca",
"redhat-access-gui",
"sane-backends-drivers-scanners",
"seahorse",
"setroubleshoot",
"sushi",
"totem",
"totem-nautilus",
"vinagre",
"vino",
"xdg-user-dirs-gtk",
"yelp",
],
"optional": [
"",
"alacarte",
"dconf-editor",
"dvgrab",
"fonts-tweak-tool",
"gconf-editor",
"gedit-plugins",
"gnote",
"libappindicator-gtk3",
"seahorse-nautilus",
"seahorse-sharing",
"vim-X11",
"xguest",
],
"type": "package group",
}
cmd_out = """Group: GNOME
Group-Id: gnome-desktop
Description: GNOME is a highly intuitive and user friendly desktop environment.
Mandatory Packages:
=NetworkManager-libreswan-gnome
=PackageKit-command-not-found
=PackageKit-gtk3-module
abrt-desktop
=at-spi2-atk
=at-spi2-core
=avahi
=baobab
-caribou
-caribou-gtk2-module
-caribou-gtk3-module
=cheese
=chrome-gnome-shell
=compat-cheese314
=control-center
=dconf
=empathy
=eog
=evince
=evince-nautilus
=file-roller
=file-roller-nautilus
=firewall-config
=firstboot
fprintd-pam
=gdm
=gedit
=glib-networking
=gnome-bluetooth
=gnome-boxes
=gnome-calculator
=gnome-classic-session
=gnome-clocks
=gnome-color-manager
=gnome-contacts
=gnome-dictionary
=gnome-disk-utility
=gnome-font-viewer
=gnome-getting-started-docs
=gnome-icon-theme
=gnome-icon-theme-extras
=gnome-icon-theme-symbolic
=gnome-initial-setup
=gnome-packagekit
=gnome-packagekit-updater
=gnome-screenshot
=gnome-session
=gnome-session-xsession
=gnome-settings-daemon
=gnome-shell
=gnome-software
=gnome-system-log
=gnome-system-monitor
=gnome-terminal
=gnome-terminal-nautilus
=gnome-themes-standard
=gnome-tweak-tool
=gnome-user-docs
=gnome-weather
=gucharmap
=gvfs-afc
=gvfs-afp
=gvfs-archive
=gvfs-fuse
=gvfs-goa
=gvfs-gphoto2
=gvfs-mtp
=gvfs-smb
initial-setup-gui
=libcanberra-gtk2
=libcanberra-gtk3
=libproxy-mozjs
=librsvg2
=libsane-hpaio
=metacity
=mousetweaks
=nautilus
=nautilus-sendto
=nm-connection-editor
=orca
-redhat-access-gui
=sane-backends-drivers-scanners
=seahorse
=setroubleshoot
=sushi
=totem
=totem-nautilus
=vinagre
=vino
=xdg-user-dirs-gtk
=yelp
Default Packages:
=qgnomeplatform
=xdg-desktop-portal-gtk
Optional Packages:
alacarte
dconf-editor
dvgrab
fonts-tweak-tool
gconf-editor
gedit-plugins
gnote
libappindicator-gtk3
seahorse-nautilus
seahorse-sharing
vim-X11
xguest
"""
with patch.dict(
yumpkg.__salt__, {"cmd.run_stdout": MagicMock(return_value=cmd_out)}
):
info = yumpkg.group_info("@gnome-desktop")
assert info == expected
def test_get_repo_with_existent_repo(list_repos_var):
"""
Test get_repo with an existent repository
Expected return is a populated dictionary
"""
repo = "base-source"
kwargs = {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
"enabled": True,
}
parse_repo_file_return = (
"",
{
"base-source": {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
"enabled": "1",
}
},
)
expected = {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
"enabled": "1",
}
patch_list_repos = patch.object(
yumpkg, "list_repos", autospec=True, return_value=list_repos_var
)
patch_parse_repo_file = patch.object(
yumpkg,
"_parse_repo_file",
autospec=True,
return_value=parse_repo_file_return,
)
with patch_list_repos, patch_parse_repo_file:
ret = yumpkg.get_repo(repo, **kwargs)
assert ret == expected, ret
def test_get_repo_with_non_existent_repo(list_repos_var):
"""
Test get_repo with an non existent repository
Expected return is an empty dictionary
"""
repo = "non-existent-repository"
kwargs = {
"baseurl": "http://fake.centos.org/centos/$releasever/os/Non-Existent/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Non-Existent Repository",
"enabled": True,
}
expected = {}
patch_list_repos = patch.object(
yumpkg, "list_repos", autospec=True, return_value=list_repos_var
)
with patch_list_repos:
ret = yumpkg.get_repo(repo, **kwargs)
assert ret == expected, ret
def test_pkg_update_dnf():
"""
Tests that the proper CLI options are added when obsoletes=False
"""
name = "foo"
old = "1.2.2-1.fc31"
new = "1.2.3-1.fc31"
cmd_mock = MagicMock(return_value={"retcode": 0})
list_pkgs_mock = MagicMock(side_effect=[{name: old}, {name: new}])
parse_targets_mock = MagicMock(return_value=({"foo": None}, "repository"))
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd_mock, "pkg_resource.parse_targets": parse_targets_mock},
), patch.object(yumpkg, "refresh_db", MagicMock()), patch.object(
yumpkg, "list_pkgs", list_pkgs_mock
), patch.object(
yumpkg, "_yum", MagicMock(return_value="dnf")
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
ret = yumpkg.update(name, setopt="obsoletes=0,plugins=0")
expected = {name: {"old": old, "new": new}}
assert ret == expected, ret
cmd_mock.assert_called_once_with(
[
"dnf",
"--quiet",
"-y",
"--setopt",
"plugins=0",
"--setopt",
"obsoletes=False",
"upgrade",
"foo",
],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_call_yum_default():
"""
Call default Yum/Dnf.
:return:
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "fake-yum"}):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)},
):
yumpkg._call_yum(["-y", "--do-something"]) # pylint: disable=W0106
yumpkg.__salt__["cmd.run_all"].assert_called_once_with(
["fake-yum", "-y", "--do-something"],
env={},
output_loglevel="trace",
python_shell=False,
)
@patch("salt.utils.systemd.has_scope", MagicMock(return_value=True))
def test_call_yum_in_scope():
"""
Call Yum/Dnf within the scope.
:return:
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "fake-yum"}):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=True)},
):
yumpkg._call_yum(["-y", "--do-something"]) # pylint: disable=W0106
yumpkg.__salt__["cmd.run_all"].assert_called_once_with(
["systemd-run", "--scope", "fake-yum", "-y", "--do-something"],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_call_yum_with_kwargs():
"""
Call Yum/Dnf with the optinal keyword arguments.
:return:
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "fake-yum"}):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)},
):
yumpkg._call_yum(
["-y", "--do-something"],
python_shell=True,
output_loglevel="quiet",
ignore_retcode=False,
username="Darth Vader",
) # pylint: disable=W0106
yumpkg.__salt__["cmd.run_all"].assert_called_once_with(
["fake-yum", "-y", "--do-something"],
env={},
ignore_retcode=False,
output_loglevel="quiet",
python_shell=True,
username="Darth Vader",
)
@pytest.mark.skipif(not salt.utils.systemd.booted(), reason="Requires systemd")
def test_services_need_restart():
"""
Test that dnf needs-restarting output is parsed and
salt.utils.systemd.pid_to_service is called as expected.
"""
expected = ["firewalld", "salt-minion"]
dnf_mock = Mock(
return_value="123 : /usr/bin/firewalld\n456 : /usr/bin/salt-minion\n"
)
systemd_mock = Mock(side_effect=["firewalld", "salt-minion"])
with patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")):
with patch.dict(yumpkg.__salt__, {"cmd.run_stdout": dnf_mock}), patch(
"salt.utils.systemd.pid_to_service", systemd_mock
):
assert sorted(yumpkg.services_need_restart()) == expected
systemd_mock.assert_has_calls([call("123"), call("456")])
def test_services_need_restart_requires_systemd():
"""Test that yumpkg.services_need_restart raises an error if systemd is unavailable."""
with patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")):
with patch("salt.utils.systemd.booted", Mock(return_value=False)):
pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
def test_services_need_restart_requires_dnf():
"""Test that yumpkg.services_need_restart raises an error if DNF is unavailable."""
with patch("salt.modules.yumpkg._yum", Mock(return_value="yum")):
pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
def test_61003_pkg_should_not_fail_when_target_not_in_old_pkgs():
patch_list_pkgs = patch(
"salt.modules.yumpkg.list_pkgs", return_value={}, autospec=True
)
patch_salt = patch.dict(
yumpkg.__salt__,
{
"pkg_resource.parse_targets": Mock(
return_value=[
{
"fnord-this-is-not-actually-a-package": "fnord-this-is-not-actually-a-package-1.2.3"
}
]
)
},
)
with patch_list_pkgs, patch_salt:
# During the 3004rc1 we discoverd that if list_pkgs was missing
# packages that were returned by parse_targets that yumpkg.remove would
# catch on fire. This ensures that won't go undetected again.
yumpkg.remove()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output a tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def _broadcast_shape_helper(shape_x, shape_y):
"""Helper functions for is_broadcast_compatible and broadcast_shape.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
Returns None if the shapes are not broadcast compatible,
a list of the broadcast dimensions otherwise.
"""
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
return None
return return_dims
def is_broadcast_compatible(shape_x, shape_y):
"""Returns True if `shape_x` and `shape_y` are broadcast compatible.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
True if a shape exists that both `shape_x` and `shape_y` can be broadcasted
to. False otherwise.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return False
return _broadcast_shape_helper(shape_x, shape_y) is not None
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
return_dims = _broadcast_shape_helper(shape_x, shape_y)
if return_dims is None:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op, require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_data": [None]
}
input_tensors_needed = []
input_tensors_as_shapes_needed = []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
"""Core implementation of call_cpp_shape_fn."""
graph_def_version = op.graph.graph_def_versions.producer
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
if t._handle_data is not None:
r.handle_data.CopyFrom(t._handle_data)
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
with errors.raise_exception_on_not_ok_status() as status:
output = pywrap_tensorflow.RunCppShapeInference(
graph_def_version, node_def_str, input_shapes, input_tensors,
input_tensors_as_shapes, status)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_data = [
r.handle_data if r.handle_data.is_set else None for r in result_protos
]
return {
"shapes": result,
"handle_data": result_handle_data,
"inputs_needed": output[-1]
}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================
General 3D Object
=====================
TODO
"""
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame, pygame.image
from pygame.locals import *
from Display3D import Display3D
from Util3D import *
from Intersect3D import *
import Axon
textures = [0,0]
class PygameWrapperPlane(Axon.Component.component):
Inboxes = {
"inbox": "used to handle events",
"control": "ignored",
# 3D control
# "translation" : "receive 3D movement Vectors here"
# "rotation": "receive 3D rotation Vectors here",
# "scaling": "receive 3D scaling Vectors here",
# "rel_translation" : "receive 3D movement Vectors here"
# "rel_rotation": "receive 3D rotation Vectors here",
# "rel_scaling": "receive 3D scaling Vectors here",
"control3d": "receive Control3D commands here",
"wrapcallback": "receive wrap data after WRAPREQUEST",
"eventrequests": "receive event requests from wrapped component here",
}
Outboxes = {
"outbox": "not used",
"display_signal" : "Outbox used for communicating to the display surface",
# 3D status
"position" : "send position status when updated",
"rotation": "send rotation status when updated",
"scaling": "send scaling status when updated",
"wrapped_eventsfeedback": "Used to send events to wrapped pygame comp",
}
def __init__(self, **argd):
super(PygameWrapperPlane, self).__init__()
self.size = argd.get("size", Vector(2,2,2))
self.pos = argd.get("pos",Vector(0,0,-15))
self.rot = Vector(0.0,0.0,0.0)
self.scaling = argd.get("scaling",Vector(1,1,1))
self.transform = Transform()
self.oldrot = Vector()
self.oldpos = Vector()
self.oldscaling = Vector()
self.name = argd.get("name", "nameless")
self.tex = argd.get("tex", None)
self.grabbed = 0
self.texname = 0
self.tex_w = 0
self.tex_h = 0
self.width = 0
self.height= 0
self.wrapped_comp = argd.get("wrap")
self.events_wanted = {}
# vertices for intersection test
self.vertices = []
self.transformedVertices = []
# similar to Pygame component registration
self.disprequest = { "3DDISPLAYREQUEST" : True,
# "callback" : (self,"callback"),
"events" : (self, "inbox"),
# "size": self.size,
# "pos": self.pos,
"object": self }
# Ray intersection test
# returns the distance of the origin o to the point of intersection
# if no intersection occurs, 0 is returned
# Algorithm from "Realtime Rendering"
def intersectRay(self, o, d):
self.transformedVertices = [self.transform.transformVector(v) for v in self.vertices]
t = Intersect3D.ray_Polygon(o, d, self.transformedVertices)
return t
def applyTransforms(self):
# generate new transformation matrix if needed
if self.oldscaling != self.scaling or self.oldrot != self.rot or self.oldpos != self.pos:
self.transform.reset()
self.transform.applyScaling(self.scaling)
self.transform.applyRotation(self.rot)
self.transform.applyTranslation(self.pos)
if self.oldscaling != self.scaling:
self.send(self.scaling, "scaling")
self.oldscaling = self.scaling.copy()
if self.oldrot != self.rot:
self.send(self.rot, "rotation")
self.oldrot = self.rot.copy()
if self.oldpos != self.pos:
self.send(self.pos, "position")
self.oldpos = self.pos.copy()
def draw(self):
glMatrixMode(GL_MODELVIEW)
# set texure
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texname)
# set generated matrix
glPushMatrix()
glLoadMatrixf(self.transform.getMatrix())
w = self.width/200.0
h = self.height/200.0
# print "size", self.width, self.height
# print "draw", w,h
# draw faces
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
glBegin(GL_QUADS)
glColor3f(0,1,0)
glTexCoord2f(0.0, 1.0-self.tex_h); glVertex3f(-w, -h, 0.0)
glTexCoord2f(self.tex_w, 1.0-self.tex_h); glVertex3f( w, -h, 0.0)
glTexCoord2f(self.tex_w, 1.0); glVertex3f( w, h, 0.0)
glTexCoord2f(0.0, 1.0); glVertex3f(-w, h, 0.0)
glEnd()
glPopMatrix()
glDisable(GL_TEXTURE_2D)
def handleEventRequests(self):
while self.dataReady("eventrequests"):
message = self.recv("eventrequests")
if message.get("ADDLISTENEVENT", None) is not None:
self.events_wanted[message["ADDLISTENEVENT"]] = True
elif message.get("REMOVELISTENEVENT", None) is not None:
self.events_wanted[message["REMOVELISTENEVENT"]] = False
def handleEvents(self):
while self.dataReady("inbox"):
for event in self.recv("inbox"):
# If movementMode is True, translate input to movement commands
if event.type == pygame.MOUSEBUTTONUP:
if event.button in [1,3]:
self.grabbed = 0
if event.movementMode:
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button in [1,3] and self.intersectRay(Vector(0,0,0), event.dir) > 0:
self.grabbed = event.button
if event.button == 4 and self.intersectRay(Vector(0,0,0), event.dir) > 0:
self.pos.z -= 1
if event.button == 5 and self.intersectRay(Vector(0,0,0), event.dir) > 0:
self.pos.z += 1
if event.type == pygame.MOUSEMOTION:
if self.grabbed == 1:
self.rot.y += float(event.rel[0])
self.rot.x += float(event.rel[1])
self.rot %= 360
if self.grabbed == 3:
self.pos.x += float(event.rel[0])/10.0
self.pos.y -= float(event.rel[1])/10.0
# If movementMode is False, forward events to wrapped component
else:
wanted = False
#test if event is wanted by wrapped component
try: wanted = self.events_wanted[event.type]
except KeyError: pass
if wanted:
# print "Event forwarded to ", self.wrapped_comp.name
# test if ray intersects plane
t = self.intersectRay(Vector(0,0,0), event.dir);
# if an intersection was detected, map position on plane
if t != 0:
p = event.dir*t
Ap = p-self.transformedVertices[0]
AB = self.transformedVertices[1]-self.transformedVertices[0]
AD = self.transformedVertices[3]-self.transformedVertices[0]
x = Ap.dot(AB)/(AB.length()**2)
y = Ap.dot(AD)/(AD.length()**2)
event.pos = (x*self.width,y*self.height)
self.send([event], "wrapped_eventsfeedback")
# self.send("2D: (%2.2f, %2.2f); " % (x*self.width, y*self.height), "outbox")
def handleMovementCommands(self):
while self.dataReady("control3d"):
cmd = self.recv("control3d")
if cmd.type == Control3D.POSITION:
self.pos = cmd.amount
if cmd.type == Control3D.REL_POSITION:
self.pos += cmd.amount
if cmd.type == Control3D.ROTATION:
self.rot = cmd.amount
if cmd.type == Control3D.REL_ROTATION:
self.rot = (self.rot+cmd.amount)%360
if cmd.type == Control3D.SCALING:
self.scaling = cmd.amount
if cmd.type == Control3D.REL_SCALING:
self.scaling += cmd.amount
def waitBox(self,boxname):
"""Generator. yields 1 until data ready on the named inbox."""
waiting = True
while waiting:
if self.dataReady(boxname): return
else: yield 1
def main(self):
displayservice = Display3D.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send(self.disprequest, "display_signal");
while 1:
try:
self.wraprequest = { "WRAPPERREQUEST" : True,
"wrapcallback" : (self, "wrapcallback"),
"eventrequests" : (self, "eventrequests"),
"surface": self.wrapped_comp.display }
self.send( self.wraprequest, "display_signal")
break
except AttributeError:
yield 1
for _ in self.waitBox("wrapcallback"): yield 1
b = self.recv("wrapcallback")
self.texname = b.texname
self.tex_w = b.tex_w
self.tex_h = b.tex_h
self.width = float(b.width)
self.height = float(b.height)
#prepare vertices for intersection test
x = self.width/200.0
y = self.height/200.0
self.vertices = [ Vector(-x, y, 0.0), Vector(x, y, 0.0), Vector(x, -y, 0.0), Vector(-x, -y, 0.0) ]
# setup event communications
if b.eventservice is not None:
self.link((self, "wrapped_eventsfeedback"), b.eventservice)
while 1:
yield 1
self.handleEventRequests()
self.handleEvents()
self.handleMovementCommands()
self.applyTransforms()
self.draw()
# Later it might be a good idea to provide a set of drawing functions
# so the component developer does not need to know about opengl
# This way opengl could later easily be replaced by an other mechanism
# for drawing
# e.g. TOGRA
if __name__=='__main__':
class Bunch: pass
class CubeRotator(Axon.Component.component):
def main(self):
while 1:
yield 1
self.send( Control3D(Control3D.REL_ROTATION, Vector(0.1, 0.1, 0.1)), "outbox")
class CubeMover(Axon.Component.component):
def main(self):
x,y,z = 3.0, 3.0, -20.0
dx = -0.03
dy = -0.03
dz = -0.03
while 1:
yield 1
self.send( Control3D(Control3D.POSITION, Vector(x, y, z)), "outbox")
x +=dx
y +=dy
z +=dz
if abs(x)>5: dx = -dx
if abs(y)>5: dy = -dy
if abs(z+20)>10: dz = -dz
# print x, y, abs(x), abs(y)
import random
class CubeBuzzer(Axon.Component.component):
def main(self):
r = 1.00
f = 0.01
while 1:
yield 1
if r>1.0: f -= 0.001
else: f += 0.001
r += f
self.send( Control3D(Control3D.SCALING, Vector(r, r, r)), "outbox")
text = """\
All objects in this scene can be moved when the CTRL
key is pressed. Otherwise all interaction gets translated
to pygame events. Try it on the button.
The size of these 2 Ticker components is (350,250).
The wrapped button is now fully functional (assigned to SPACE).
Bottom left there is a Magna Doodle (tm) component. You can draw
green lines on it by using your left mouse button. Use the right mouse
button to erase your artwork.
"""
class datasource(Axon.Component.component):
def main(self):
for x in text.split():
self.send(x,"outbox")
yield 1
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Graphline import Graphline
from Kamaelia.UI.Pygame.Ticker import Ticker
from Kamaelia.UI.Pygame.Button import Button
from SimpleCube import *
import sys;
sys.path.append("../pygame/")
from MagnaDoodle import *
sys.path.append("../../MPS/Systems/Paint/")
from Paint import *
Display3D.getDisplayService()[0].overridePygameDisplay()
TEXT = datasource().activate()
TICKER1 = Ticker(position = (400, 300), render_left = 0, render_right=350, render_top=0, render_bottom=250).activate()
TICKER1WRAPPER = PygameWrapperPlane(wrap=TICKER1, pos=Vector(-2, 1,-10), name="1st Wrapper Plane").activate()
TICKER2 = Ticker(position = (400, 300), render_left = 0, render_right=350, render_top=0, render_bottom=250).activate()
TICKER2WRAPPER = PygameWrapperPlane(wrap=TICKER2, pos=Vector(2, 1,-10), name="2nd Wrapper Plane").activate()
BUTTON = Button(caption="This button...",msg="...can be moved AND activated!", key=pygame.K_SPACE).activate()
BUTTONWRAPPER = PygameWrapperPlane(wrap=BUTTON, pos=Vector(0, 1.5,-5), name="2nd Wrapper Plane").activate()
MAGNADOODLE = MagnaDoodle(size=(255,255)).activate()
MAGNADOODLEWRAPPER = PygameWrapperPlane(wrap=MAGNADOODLE, pos=Vector(-2, -2,-10), name="Magna Doodle Wrapper Plane").activate()
ECHO = consoleEchoer().activate()
CUBE = SimpleCube(pos = Vector(2,-2,-10)).activate()
CUBEROTATOR = CubeRotator().activate()
TICKER1WRAPPER.link((TICKER1WRAPPER, "outbox"), (TICKER2, "inbox"))
TICKER2WRAPPER.link((TICKER2WRAPPER, "outbox"), (TICKER2, "inbox"))
BUTTON.link((BUTTON, "outbox"), (TICKER2, "inbox"))
TEXT.link((TEXT, "outbox"), (TICKER1, "inbox"))
CUBEROTATOR.link((CUBEROTATOR,"outbox"), (CUBE, "control3d"))
Axon.Scheduler.scheduler.run.runThreads()
|
|
"""DHCPv4 options part4"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import misc
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_malformed_values_pool():
misc.test_setup()
srv_control.config_srv_subnet('256.0.2.0/24', '256.0.2.1-256.0.2.10')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
# Test Setup:
# Server is configured with 127.0.0.1/24 subnet with 127.0.0.1-127.0.0.1 pool.
# Send server configuration using SSH and config-file.
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_malformed_values_ip_forwarding():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('ip-forwarding', '2')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('ip-forwarding', '1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_malformed_values_subnet_mask():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('subnet-mask', '255.255.266.0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('subnet-mask', '255.255.255.0')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_malformed_values_time_offset():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '-2147483649')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '-2147483648')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '2147483647')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '2147483648')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_malformed_values_boot_size():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('boot-size', '65536')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('boot-size', '-1')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('boot-size', '655')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_policy_filter():
# Allowed only pairs of addresses
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('policy-filter', '199.199.199.1')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('policy-filter', '199.199.199.1,50.50.50.1,60.60.60.5')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('policy-filter', '199.199.199.1,50.50.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_max_dgram_reassembly():
# Unsigned integer (0 to 65535) minimum value: 576
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '-1')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '575')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '65536')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '576')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '65535')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_default_ip_ttl():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-ip-ttl', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-ip-ttl', '1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-ip-ttl', '255')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-ip-ttl', '256')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_path_mtu_aging_timeout():
# Unsigned integer (0 to 65535) minimum: 68
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('path-mtu-aging-timeout', '67')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('path-mtu-aging-timeout', '-1')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('path-mtu-aging-timeout', '65536')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('path-mtu-aging-timeout', '65535')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('path-mtu-aging-timeout', '68')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_static_routes():
# pair of addresses 0.0.0.0 forbidden
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('static-routes', '199.199.199.1')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('static-routes', '199.199.199.1,70.70.70.5,80.80.80.80')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('static-routes', '199.199.199.1,0.0.0.0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('static-routes', '199.199.199.1,70.70.70.5,80.80.80.80,10.10.10.5')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_malformed_values_arp_cache_timeout():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('arp-cache-timeout', '-1')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('arp-cache-timeout', '4294967296')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('arp-cache-timeout', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('arp-cache-timeout', '4294967295')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_default_tcp_ttl():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-tcp-ttl', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-tcp-ttl', '256')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-tcp-ttl', '255')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-tcp-ttl', '1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_dhcp_option_overload():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-option-overload', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-option-overload', '4')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-option-overload', '1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-option-overload', '2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-option-overload', '3')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
@pytest.mark.disabled
def test_v4_options_malformed_values_dhcp_max_message_size():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-max-message-size', '0')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-max-message-size', '575')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-max-message-size', '576')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-max-message-size', '65536')
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('dhcp-max-message-size', '65535')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for davidson.py."""
import logging
import unittest
import numpy
import numpy.linalg
import scipy.linalg
import scipy.sparse
import scipy.sparse.linalg
from openfermion.ops.operators import QubitOperator
from openfermion.linalg.davidson import (Davidson, DavidsonOptions,
QubitDavidson, SparseDavidson,
append_random_vectors, orthonormalize)
def generate_matrix(dimension):
"""Generates matrix with shape (dimension, dimension)."""
numpy.random.seed(dimension)
rand = numpy.array(numpy.random.rand(dimension, dimension))
numpy.random.seed(dimension * 2)
diag = numpy.array(range(dimension)) + numpy.random.rand(dimension)
# Makes sure matrix is hermitian, which is symmetric when real.
matrix = rand + rand.conj().T + numpy.diag(diag)
return matrix
def generate_sparse_matrix(dimension, diagonal_factor=30):
"""Generates a hermitian sparse matrix with specified dimension."""
numpy.random.seed(dimension)
diagonal = sorted(numpy.array(numpy.random.rand(dimension)))
numpy.random.seed(dimension - 1)
off_diagonal = numpy.array(numpy.random.rand(dimension - 1))
# Makes sure matrix is hermitian, which is symmetric when real.
matrix = numpy.diag(diagonal) * diagonal_factor
for row in range(dimension - 2):
col = row + 1
matrix[row, col] = off_diagonal[row]
matrix[col, row] = off_diagonal[row]
return matrix
def get_difference(linear_operator, eigen_values, eigen_vectors):
"""Get difference of M * v - lambda v."""
return numpy.max(
numpy.abs(
linear_operator.dot(eigen_vectors) - eigen_vectors * eigen_values))
class DavidsonOptionsTest(unittest.TestCase):
""""Tests for DavidsonOptions class."""
def setUp(self):
"""Sets up all variables needed for DavidsonOptions class."""
self.max_subspace = 10
self.max_iterations = 100
self.eps = 1e-7
self.davidson_options = DavidsonOptions(self.max_subspace,
self.max_iterations, self.eps)
def test_init(self):
"""Tests vars in __init__()."""
self.assertEqual(self.davidson_options.max_subspace, self.max_subspace)
self.assertEqual(self.davidson_options.max_iterations,
self.max_iterations)
self.assertAlmostEqual(self.davidson_options.eps, self.eps, places=8)
self.assertFalse(self.davidson_options.real_only)
def test_set_dimension_small(self):
"""Tests set_dimension() with a small dimension."""
dimension = 6
self.davidson_options.set_dimension(dimension)
self.assertEqual(self.davidson_options.max_subspace, dimension + 1)
def test_set_dimension_large(self):
"""Tests set_dimension() with a large dimension not affecting
max_subspace."""
self.davidson_options.set_dimension(60)
self.assertEqual(self.davidson_options.max_subspace, self.max_subspace)
def test_invalid_max_subspace(self):
"""Test for invalid max_subspace."""
with self.assertRaises(ValueError):
DavidsonOptions(max_subspace=1)
def test_invalid_max_iterations(self):
"""Test for invalid max_iterations."""
with self.assertRaises(ValueError):
DavidsonOptions(max_iterations=0)
def test_invalid_eps(self):
"""Test for invalid eps."""
with self.assertRaises(ValueError):
DavidsonOptions(eps=-1e-6)
def test_invalid_dimension(self):
"""Test for invalid dimension."""
with self.assertRaises(ValueError):
self.davidson_options.set_dimension(0)
class DavidsonTest(unittest.TestCase):
""""Tests for Davidson class with a real matrix."""
def setUp(self):
"""Sets up all variables needed for Davidson class."""
dimension = 10
matrix = generate_matrix(dimension)
def mat_vec(vec):
"""Trivial matvec with a numpy matrix."""
return numpy.dot(matrix, vec)
self.linear_operator = scipy.sparse.linalg.LinearOperator(
(dimension, dimension), matvec=mat_vec)
self.diagonal = numpy.diag(matrix)
self.davidson = Davidson(linear_operator=self.linear_operator,
linear_operator_diagonal=self.diagonal)
self.matrix = matrix
self.initial_guess = numpy.eye(self.matrix.shape[0], 10)
self.eigen_values = numpy.array([
1.15675714,
1.59132505,
2.62268014,
4.44533793,
5.3722743,
5.54393114,
7.73652405,
8.50089897,
9.4229309,
15.54405993,
])
def test_init(self):
"""Test for __init__()."""
davidson = self.davidson
self.assertAlmostEqual(
numpy.max(numpy.abs(self.matrix - self.matrix.T)), 0)
self.assertTrue(davidson.linear_operator)
self.assertTrue(
numpy.allclose(davidson.linear_operator_diagonal, self.diagonal))
# Options default values except max_subspace.
self.assertEqual(davidson.options.max_subspace, 11)
self.assertAlmostEqual(davidson.options.eps, 1e-6, places=8)
self.assertFalse(davidson.options.real_only)
def test_with_built_in(self):
"""Compare with eigenvalues from built-in functions."""
eigen_values, _ = numpy.linalg.eig(self.matrix)
eigen_values = sorted(eigen_values)
self.assertTrue(numpy.allclose(eigen_values, self.eigen_values))
# Checks for eigh() function.
eigen_values, eigen_vectors = numpy.linalg.eigh(self.matrix)
self.assertAlmostEqual(
get_difference(self.davidson.linear_operator, eigen_values,
eigen_vectors), 0)
def test_lowest_invalid_operator(self):
"""Test for get_lowest_n() with invalid linear operator."""
with self.assertRaises(ValueError):
Davidson(None, numpy.eye(self.matrix.shape[0], 8))
def test_lowest_zero_n(self):
"""Test for get_lowest_n() with invalid n_lowest."""
with self.assertRaises(ValueError):
self.davidson.get_lowest_n(0)
def test_lowest_invalid_shape(self):
"""Test for get_lowest_n() with invalid dimension for initial guess."""
with self.assertRaises(ValueError):
self.davidson.get_lowest_n(
1, numpy.ones((self.matrix.shape[0] * 2, 1), dtype=complex))
def test_get_lowest_n_trivial_guess(self):
"""Test for get_lowest_n() with trivial initial guess."""
with self.assertRaises(ValueError):
self.davidson.get_lowest_n(
1, numpy.zeros((self.matrix.shape[0], 1), dtype=complex))
def test_get_lowest_fail(self):
"""Test for get_lowest_n() with n_lowest = 1."""
n_lowest = 1
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=2)
self.assertTrue(not success)
self.assertTrue(numpy.allclose(eigen_values, numpy.array([1.41556103])))
def test_get_lowest_with_default(self):
"""Test for get_lowest_n() with default n_lowest = 1."""
numpy.random.seed(len(self.eigen_values))
success, eigen_values, _ = self.davidson.get_lowest_n()
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, self.eigen_values[:1]))
def test_get_lowest_one(self):
"""Test for get_lowest_n() with n_lowest = 1."""
n_lowest = 1
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=10)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
def test_get_lowest_two(self):
"""Test for get_lowest_n() with n_lowest = 2.
See the iteration results (eigenvalues and max error) below:
[1.87267714 4.06259537] 3.8646520980719212
[1.28812931 2.50316266] 1.548676934730246
[1.16659255 1.82600658] 0.584638880856119
[1.15840263 1.65254981] 0.4016803134102507
[1.15675714 1.59132505] 0
"""
n_lowest = 2
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, eigen_vectors = self.davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=5)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
self.assertTrue(
numpy.allclose(self.davidson.linear_operator * eigen_vectors,
eigen_vectors * eigen_values))
def test_get_lowest_two_subspace(self):
"""Test for get_lowest_n() with n_lowest = 2.
See the iteration results (eigenvalues and max error) below:
[1.87267714 4.06259537] 3.8646520980719212
[1.28812931 2.50316266] 1.548676934730246
[1.16659255 1.82600658] 0.584638880856119
[1.15947254 1.69773006] 0.5077687725257688
[1.1572995 1.61393264] 0.3318982487563453
"""
self.davidson.options.max_subspace = 8
expected_eigen_values = numpy.array([1.1572995, 1.61393264])
n_lowest = 2
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, eigen_vectors = self.davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=5)
self.assertTrue(not success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertFalse(
numpy.allclose(self.davidson.linear_operator * eigen_vectors,
eigen_vectors * eigen_values))
def test_get_lowest_six(self):
"""Test for get_lowest_n() with n_lowest = 6."""
n_lowest = 6
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=2)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
def test_get_lowest_all(self):
"""Test for get_lowest_n() with n_lowest = 10."""
n_lowest = 10
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=1)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
class QubitDavidsonTest(unittest.TestCase):
""""Tests for QubitDavidson class with a QubitOperator."""
def setUp(self):
"""Sets up all variables needed for QubitDavidson class."""
self.coefficient = 2
self.n_qubits = 12
def test_get_lowest_n(self):
"""Test for get_lowest_n()."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator.zero()
for i in range(min(self.n_qubits, 4)):
numpy.random.seed(dimension + i)
qubit_operator += QubitOperator(((i, 'Z'),),
numpy.random.rand(1)[0])
qubit_operator *= self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
n_lowest = 6
numpy.random.seed(dimension)
initial_guess = numpy.random.rand(dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=20)
expected_eigen_values = -3.80376934 * numpy.ones(n_lowest)
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertAlmostEqual(
get_difference(davidson.linear_operator, eigen_values,
eigen_vectors), 0)
def test_get_lowest_zzx(self):
"""Test for get_lowest_n() for one term only within 10 iterations.
Also the number of starting vectors is smaller than n_lowest."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator('Z0 Z1 X2') * self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
n_lowest = 6
numpy.random.seed(dimension)
initial_guess = numpy.random.rand(dimension, n_lowest // 2)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=10)
# one half of the eigenvalues is -1 and the other half is +1, together
# with the coefficient.
expected_eigen_values = -self.coefficient * numpy.ones(n_lowest)
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertAlmostEqual(
get_difference(davidson.linear_operator, eigen_values,
eigen_vectors), 0)
def test_get_lowest_xyz(self):
"""Test for get_lowest_n() for one term only within 10 iterations."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator('X0 Y1 Z3') * self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
n_lowest = 6
# Guess vectors have both real and imaginary parts.
numpy.random.seed(dimension)
initial_guess = 1.0j * numpy.random.rand(dimension, n_lowest)
numpy.random.seed(dimension * 2)
initial_guess += numpy.random.rand(dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=10)
# one half of the eigenvalues is -1 and the other half is +1, together
# with the coefficient.
expected_eigen_values = -self.coefficient * numpy.ones(n_lowest)
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertAlmostEqual(
get_difference(davidson.linear_operator, eigen_values,
eigen_vectors), 0)
def test_get_lowest_z_real(self):
"""Test for get_lowest_n() for z with real eigenvectors only."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator('Z3') * self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
davidson.options.real_only = True
n_lowest = 6
# Guess vectors have both real and imaginary parts.
numpy.random.seed(dimension)
initial_guess = 1.0j * numpy.random.rand(dimension, n_lowest)
numpy.random.seed(dimension * 2)
initial_guess += numpy.random.rand(dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=10)
# one half of the eigenvalues is -1 and the other half is +1, together
# with the coefficient.
expected_eigen_values = -self.coefficient * numpy.ones(n_lowest)
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertAlmostEqual(
get_difference(davidson.linear_operator, eigen_values,
eigen_vectors), 0)
# Real components only.
self.assertTrue(numpy.allclose(numpy.real(eigen_vectors),
eigen_vectors))
def test_get_lowest_y_real_fail(self):
"""Test for get_lowest_n() for y with real eigenvectors only."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator('Y3') * self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
davidson.options.max_subspace = 11
davidson.options.real_only = True
n_lowest = 6
# Guess vectors have both real and imaginary parts.
numpy.random.seed(dimension)
initial_guess = 1.0j * numpy.random.rand(dimension, n_lowest)
numpy.random.seed(dimension * 2)
initial_guess += numpy.random.rand(dimension, n_lowest)
success, _, eigen_vectors = davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=10)
self.assertFalse(success)
# Not real components only.
self.assertFalse(
numpy.allclose(numpy.real(eigen_vectors), eigen_vectors))
def test_get_lowest_y_real(self):
"""Test for get_lowest_n() for y with real eigenvectors only."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator('Y3') * self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
davidson.options.real_only = True
n_lowest = 6
# Guess vectors have both real and imaginary parts.
numpy.random.seed(dimension)
initial_guess = 1.0j * numpy.random.rand(dimension, n_lowest)
numpy.random.seed(dimension * 2)
initial_guess += numpy.random.rand(dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=10)
# one half of the eigenvalues is -1 and the other half is +1, together
# with the coefficient.
expected_eigen_values = -self.coefficient * numpy.ones(n_lowest)
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertAlmostEqual(
get_difference(davidson.linear_operator, eigen_values,
eigen_vectors), 0)
# Not real components only.
self.assertFalse(
numpy.allclose(numpy.real(eigen_vectors), eigen_vectors))
def test_get_lowest_y_complex(self):
"""Test for get_lowest_n() for y with complex eigenvectors."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator('Y3') * self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
davidson.options.real_only = True
n_lowest = 6
# Guess vectors have both real and imaginary parts.
numpy.random.seed(dimension)
initial_guess = 1.0j * numpy.random.rand(dimension, n_lowest)
numpy.random.seed(dimension * 2)
initial_guess += numpy.random.rand(dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=10)
# one half of the eigenvalues is -1 and the other half is +1, together
# with the coefficient.
expected_eigen_values = -self.coefficient * numpy.ones(n_lowest)
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertAlmostEqual(
get_difference(davidson.linear_operator, eigen_values,
eigen_vectors), 0)
class SparseDavidsonTest(unittest.TestCase):
""""Tests for SparseDavidson class with sparse matrices."""
def setUp(self):
"""Sets up all variables needed for SparseDavidson class."""
logging.basicConfig(level=logging.INFO)
self.dimension = 1000
self.sparse_matrix = generate_sparse_matrix(self.dimension)
self.davidson_options = DavidsonOptions(max_subspace=100,
max_iterations=50,
real_only=True)
# Checks for built-in eigh() function.
self.eigen_values, self.eigen_vectors = numpy.linalg.eigh(
self.sparse_matrix)
self.assertAlmostEqual(
get_difference(self.sparse_matrix, self.eigen_values,
self.eigen_vectors), 0)
# Makes sure eigenvalues are sorted.
self.eigen_values = sorted(self.eigen_values)
def test_hermitain(self):
"""Test matrix used is Hermitian."""
self.assertTrue(
numpy.allclose(self.sparse_matrix,
self.sparse_matrix.conj().T))
def test_get_lowest_n_coo(self):
"""Test for get_lowest_n() as a coo_matrix."""
davidson = SparseDavidson(scipy.sparse.coo_matrix(self.sparse_matrix),
self.davidson_options)
n_lowest = 2
initial_guess = numpy.eye(self.dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess)
expected_eigen_values = self.eigen_values[:n_lowest]
self.assertTrue(success)
self.assertLess(
numpy.max(numpy.abs(eigen_values - expected_eigen_values)),
self.davidson_options.eps)
self.assertLess(
get_difference(self.sparse_matrix, eigen_values, eigen_vectors),
self.davidson_options.eps)
# Real components only.
self.assertTrue(numpy.allclose(numpy.real(eigen_vectors),
eigen_vectors))
def test_get_lowest_n_coo_complex(self):
"""Test for get_lowest_n() as a coo_matrix with real_only=False."""
self.davidson_options.real_only = False
davidson = SparseDavidson(scipy.sparse.coo_matrix(self.sparse_matrix),
self.davidson_options)
n_lowest = 2
initial_guess = numpy.eye(self.dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=30)
expected_eigen_values = self.eigen_values[:n_lowest]
self.assertTrue(success)
self.assertLess(
numpy.max(numpy.abs(eigen_values - expected_eigen_values)),
self.davidson_options.eps)
self.assertLess(
get_difference(self.sparse_matrix, eigen_values, eigen_vectors),
self.davidson_options.eps)
# Real components only.
self.assertTrue(numpy.allclose(numpy.real(eigen_vectors),
eigen_vectors))
def test_get_lowest_n(self):
"""Test for get_lowest_n() as a other sparse formats."""
n_lowest = 2
expected_eigen_values = self.eigen_values[:n_lowest]
initial_guess = numpy.eye(self.dimension, n_lowest)
for run_matrix in [
scipy.sparse.bsr_matrix(self.sparse_matrix),
scipy.sparse.csc_matrix(self.sparse_matrix),
scipy.sparse.csr_matrix(self.sparse_matrix),
scipy.sparse.dia_matrix(self.sparse_matrix),
scipy.sparse.dok_matrix(self.sparse_matrix),
scipy.sparse.lil_matrix(self.sparse_matrix),
]:
davidson = SparseDavidson(run_matrix, self.davidson_options)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess)
self.assertTrue(success)
self.assertLess(
numpy.max(numpy.abs(eigen_values - expected_eigen_values)),
self.davidson_options.eps)
self.assertLess(
get_difference(self.sparse_matrix, eigen_values, eigen_vectors),
self.davidson_options.eps)
# Real components only.
self.assertTrue(
numpy.allclose(numpy.real(eigen_vectors), eigen_vectors))
class DavidsonUtilityTest(unittest.TestCase):
""""Tests for utility functions."""
def test_append_random_vectors_0(self):
"""Test append_random_vectors() with too few columns."""
vectors = numpy.zeros((10, 2), dtype=complex)
self.assertTrue(
numpy.allclose(append_random_vectors(vectors, 0), vectors))
def test_append_random_vectors(self):
"""Test append_random_vectors()."""
row = 10
col = 2
add = 1
vectors = numpy.eye(row, col)
new_vectors = append_random_vectors(vectors, add)
# Identical for the first col columns.
self.assertTrue(numpy.allclose(new_vectors[:, :col], vectors))
# Orthonormal.
self.assertTrue(
numpy.allclose(numpy.dot(new_vectors.conj().T, new_vectors),
numpy.eye(col + add, col + add)))
def test_append_random_vectors_real(self):
"""Test append_random_vectors()."""
row = 10
col = 2
add = 1
vectors = numpy.eye(row, col)
new_vectors = append_random_vectors(vectors, add, real_only=True)
# Identical for the first col columns.
self.assertTrue(numpy.allclose(new_vectors[:, :col], vectors))
# Orthonormal.
self.assertTrue(
numpy.allclose(numpy.dot(new_vectors.conj().T, new_vectors),
numpy.eye(col + add, col + add)))
# Real.
self.assertTrue(numpy.allclose(numpy.real(new_vectors), new_vectors))
def test_append_vectors_big_col(self):
"""Test append_random_vectors() with too many failed trial."""
row = 10
vectors = numpy.eye(row, row)
new_vectors = append_random_vectors(vectors, 1)
self.assertTrue(numpy.allclose(new_vectors, vectors))
def test_orthonormalize(self):
"""Test for orthonormalization with removing non-independent vectors."""
sqrt_half = numpy.sqrt(0.5)
expected_array = numpy.array([
[sqrt_half, sqrt_half, 0],
[sqrt_half, -sqrt_half, 0],
[0, 0, 1],
])
array = numpy.array([[1, 1, 10, 1], [1, -1, 10, 1], [0, 0, 2, 1]],
dtype=float)
array[:, 0] *= sqrt_half
array = orthonormalize(array, 1)
self.assertTrue(numpy.allclose(array, expected_array))
def test_orthonormalize_complex(self):
"""Test for orthonormalization with complex matrix."""
sqrt_half = numpy.sqrt(0.5)
expected_array = numpy.array([
[sqrt_half * 1.0j, sqrt_half * 1.0j, 0],
[sqrt_half * 1.0j, -sqrt_half * 1.0j, 0],
[0, 0, 1],
],
dtype=complex)
array = numpy.array([[1.j, 1.j, 10], [1.j, -1.j, 10], [0, 0, 2]],
dtype=complex)
array[:, 0] *= sqrt_half
array = orthonormalize(array, 1)
self.assertTrue(numpy.allclose(array, expected_array))
|
|
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management.base import CommandError
from django.test.utils import override_settings
import mock
from daguerre.adjustments import Fit
from daguerre.management.commands._daguerre_clean import Command as Clean
from daguerre.management.commands._daguerre_preadjust import (NO_ADJUSTMENTS,
BAD_STRUCTURE, Command as Preadjust)
from daguerre.management.commands.daguerre import Command as Daguerre
from daguerre.models import AdjustedImage, Area
from daguerre.tests.base import BaseTestCase
class CleanTestCase(BaseTestCase):
def test_old_adjustments(self):
"""
_old_adjustments should return AdjustedImages whose storage_path
no longer exists.
"""
nonexistant = 'daguerre/test/nonexistant.png'
if default_storage.exists(nonexistant):
default_storage.delete(nonexistant)
adjusted = self.create_image('100x100.png')
adjusted1 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=nonexistant,
adjusted=adjusted)
adjusted2 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=adjusted,
adjusted=adjusted)
clean = Clean()
self.assertEqual(list(clean._old_adjustments()), [adjusted1])
default_storage.delete(adjusted)
def test_old_areas(self):
"""
_old_areas should return Areas whose storage_path no longer exists.
"""
nonexistant = 'daguerre/test/nonexistant.png'
if default_storage.exists(nonexistant):
default_storage.delete(nonexistant)
storage_path = self.create_image('100x100.png')
kwargs = {
'x1': 0,
'x2': 10,
'y1': 0,
'y2': 10
}
area1 = Area.objects.create(storage_path=nonexistant,
**kwargs)
area2 = Area.objects.create(storage_path=storage_path,
**kwargs)
clean = Clean()
self.assertEqual(list(clean._old_areas()), [area1])
default_storage.delete(storage_path)
def test_missing_adjustments(self):
"""
_missing_adjustments should return AdjustedImages whose adjusted
no longer exists.
"""
nonexistant = 'daguerre/test/nonexistant.png'
if default_storage.exists(nonexistant):
default_storage.delete(nonexistant)
storage_path = self.create_image('100x100.png')
adjusted1 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=storage_path,
adjusted=nonexistant)
adjusted2 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=storage_path,
adjusted=storage_path)
clean = Clean()
self.assertEqual(list(clean._missing_adjustments()), [adjusted1])
default_storage.delete(storage_path)
def test_duplicate_adjustments(self):
path1 = self.create_image('100x100.png')
path2 = self.create_image('100x100.png')
adjusted1 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=path1,
adjusted=path1)
adjusted2 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=path1,
adjusted=path1)
adjusted3 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=path2,
adjusted=path1)
clean = Clean()
duplicates = clean._duplicate_adjustments()
self.assertNotIn(adjusted3, duplicates)
self.assertTrue(list(duplicates) == [adjusted1] or
list(duplicates) == [adjusted2])
def test_orphaned_files(self):
clean = Clean()
walk_ret = (
('daguerre', ['test'], []),
('daguerre/test', [], ['fake1.png', 'fake2.png', 'fake3.png'])
)
AdjustedImage.objects.create(requested='fit|50|50',
storage_path='whatever.png',
adjusted='daguerre/test/fake2.png')
with mock.patch.object(clean, '_walk', return_value=walk_ret) as walk:
self.assertEqual(clean._orphaned_files(),
['daguerre/test/fake1.png',
'daguerre/test/fake3.png'])
walk.assert_called_once_with('daguerre', topdown=False)
class PreadjustTestCase(BaseTestCase):
@override_settings()
def test_get_helpers__no_setting(self):
try:
del settings.DAGUERRE_PREADJUSTMENTS
except AttributeError:
pass
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
NO_ADJUSTMENTS,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('model', [Fit(width=50)], None),))
def test_get_helpers__bad_string(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('app.model', [Fit(width=50)], None),))
def test_get_helpers__bad_model(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(1, 2, 3))
def test_get_helpers__not_tuples(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('daguerre.adjustedimage', [], 'storage_path'),))
def test_get_helpers__no_adjustments(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('daguerre.adjustedimage', [Fit(width=50)], 'storage_path'),))
def test_get_helpers__good_string(self):
preadjust = Preadjust()
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
(AdjustedImage, [Fit(width=50)], 'storage_path'),))
def test_get_helpers__model(self):
preadjust = Preadjust()
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
def test_get_helpers__queryset(self):
preadjust = Preadjust()
qs = AdjustedImage.objects.all()
dp = ((qs, [Fit(width=50)], 'storage_path'),)
with override_settings(DAGUERRE_PREADJUSTMENTS=dp):
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
self.assertTrue(qs._result_cache is None)
def test_get_helpers__iterable(self):
preadjust = Preadjust()
storage_path = self.create_image('100x100.png')
adjusted = AdjustedImage.objects.create(storage_path=storage_path,
adjusted=storage_path)
def _iter():
yield adjusted
dp = ((_iter(), [Fit(width=50)], 'storage_path'),)
with override_settings(DAGUERRE_PREADJUSTMENTS=dp):
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
class DaguerreTestCase(BaseTestCase):
def test_find_commands(self):
daguerre_command = Daguerre()
self.assertEqual(daguerre_command._find_commands(), {
'clean': '_daguerre_clean',
'preadjust': '_daguerre_preadjust'
})
|
|
# winservice.py
from os.path import splitext, abspath
from sys import modules
import win32serviceutil
import win32service
import win32event
import win32api
class Service(win32serviceutil.ServiceFramework):
_svc_name_ = '_unNamed'
_svc_display_name_ = '_Service Template'
def __init__(self, *args):
win32serviceutil.ServiceFramework.__init__(self, *args)
self.log('init')
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def log(self, msg):
import servicemanager
servicemanager.LogInfoMsg(str(msg))
def sleep(self, sec):
win32api.Sleep(sec*1000, True)
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.log('start')
self.start()
self.log('wait')
win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE)
self.log('done')
except Exception as x:
self.log('Exception : %s' % x)
self.SvcStop()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.log('stopping')
self.stop()
self.log('stopped')
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# to be overridden
def start(self):
pass
# to be overridden
def stop(self):
pass
def instart(cls, name, display_name=None, stay_alive=True):
''' Install and Start (auto) a Service
cls : the class (derived from Service) that implement the Service
name : Service name
display_name : the name displayed in the service manager
stay_alive : Service will stop on logout if False
'''
cls._svc_name_ = name
cls._svc_display_name_ = display_name or name
try:
module_path=modules[cls.__module__].__file__
except AttributeError:
# maybe py2exe went by
from sys import executable
module_path=executable
module_file = splitext(abspath(module_path))[0]
cls._svc_reg_class_ = '%s.%s' % (module_file, cls.__name__)
if stay_alive:
win32api.SetConsoleCtrlHandler(lambda x: True, True)
try:
win32serviceutil.InstallService(
cls._svc_reg_class_,
cls._svc_name_,
cls._svc_display_name_,
startType = win32service.SERVICE_AUTO_START
)
print 'Install ok'
win32serviceutil.StartService(
cls._svc_name_
)
print 'Start ok'
except Exception as x:
print str(x)
#
#
#
#
##### TEST MODULE
#
#
#
#
# winservice_test.py
from winservice import Service, instart
class Test(Service):
def start(self):
self.runflag=True
while self.runflag:
self.sleep(10)
self.log("I'm alive ...")
def stop(self):
self.runflag=False
self.log("I'm done")
instart(Test, 'aTest', 'Python Service Test')
################################################################################
# http://stackoverflow.com/questions/32404/
################################################################################
import pythoncom
import win32serviceutil
import win32service
import win32event
import servicemanager
import socket
class AppServerSvc (win32serviceutil.ServiceFramework):
_svc_name_ = "TestService"
_svc_display_name_ = "Test Service"
def __init__(self,args):
win32serviceutil.ServiceFramework.__init__(self,args)
self.hWaitStop = win32event.CreateEvent(None,0,0,None)
socket.setdefaulttimeout(60)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_,''))
self.main()
def main(self):
pass
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(AppServerSvc)
################################################################################
# http://ryrobes.com/python/running-python-scripts-as-a-windows-service/
################################################################################
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "MyServiceShortName"
_svc_display_name_ = "My Serivce Long Fancy Name!"
_svc_description_ = "THis is what my crazy little service does - aka a DESCRIPTION! WHoa!"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ''))
#self.timeout = 640000 #640 seconds / 10 minutes (value is in milliseconds)
self.timeout = 120000 #120 seconds / 2 minutes
# This is how long the service will wait to run / refresh
#itself (see script below)
while 1:
# Wait for service stop signal, if I timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# Check to see if self.hWaitStop happened
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
servicemanager.LogInfoMsg("SomeShortNameVersion - STOPPED!") #For Event Log
break
else:
#Ok, here's the real money shot right here.
#[actual service code between rests]
try:
file_path = "C:\whereever\my_REAL_py_work_to_be_done.py"
execfile(file_path) #Execute the script
inc_file_path2 = "C:\whereever\MORE_REAL_py_work_to_be_done.py"
execfile(inc_file_path2) #Execute the script
except:
pass
#[actual service code between rests]
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(aservice)
|
|
from __future__ import division, absolute_import, print_function
__all__ = ['ravel_multi_index',
'unravel_index',
'mgrid',
'ogrid',
'r_', 'c_', 's_',
'index_exp', 'ix_',
'ndenumerate', 'ndindex',
'fill_diagonal', 'diag_indices', 'diag_indices_from']
import sys
import numpy.core.numeric as _nx
from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod,
arange )
from numpy.core.numerictypes import find_common_type
import math
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.lib._compiled_base import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
makemat = matrix.matrix
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
baseshape = [1]*nd
for k in range(nd):
new = _nx.asarray(args[k])
if (new.ndim != 1):
raise ValueError("Cross index must be 1 dimensional")
if issubclass(new.dtype.type, _nx.bool_):
new = new.nonzero()[0]
baseshape[k] = len(new)
new = new.reshape(tuple(baseshape))
out.append(new)
baseshape[k] = 1
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None: start=0
if step is None: step=1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(int(math.ceil((key[k].stop - start)/(step*1.0))))
if isinstance(step, float) or \
isinstance(start, float) or \
isinstance(key[k].stop, float):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None: start=0
if step is None: step=1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None: start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop+step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self, i, j):
return _nx.arange(i, j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None: start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
def __getslice__(self, i, j):
res = _nx.arange(i, j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays together.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see `r_`.
Examples
--------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
a : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, next(self.iter)
def __iter__(self):
return self
next = __next__
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print index
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index'], order='C')
def __iter__(self):
return self
def ndincr(self):
"""
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
"""
next(self)
def __next__(self):
"""
Standard iterator method, updates the index and returns the index tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current iteration.
"""
next(self._it)
return self._it.multi_index
next = __next__
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affect only tall matrices.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
# tall matrices no wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4)
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
# tall matrices wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4)
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
# wide matrices
>>> a = np.zeros((3, 5),int)
>>> fill_diagonal(a, 4)
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
#This is needed to don't have tall matrix have the diagonal wrap.
if not wrap:
end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape)==0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
|
import json
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import toggle_enabled
from django_prbac.exceptions import PermissionDenied
from django_prbac.utils import ensure_request_has_privilege
from corehq import privileges
from corehq.apps.export.exceptions import BadExportConfiguration
from corehq.apps.reports.standard import export
from corehq.apps.reports.models import FormExportSchema, HQGroupExportConfiguration, CaseExportSchema
from corehq.apps.reports.standard.export import DeidExportReport
from couchexport.models import ExportTable, ExportSchema, ExportColumn, display_column_types, SplitColumn
from django.utils.translation import ugettext as _
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.commtrack.models import StockExportColumn
from corehq.apps.domain.models import Domain
USERNAME_TRANSFORM = 'corehq.apps.export.transforms.user_id_to_username'
OWNERNAME_TRANSFORM = 'corehq.apps.export.transforms.owner_id_to_display'
CASENAME_TRANSFORM = 'corehq.apps.export.transforms.case_id_to_case_name'
FORM_CASE_ID_PATH = 'form.case.@case_id'
class AbstractProperty(object):
def __get__(self, instance, owner):
raise NotImplementedError()
class DEID(object):
options = (
('', ''),
(_('Sensitive ID'), 'couchexport.deid.deid_ID'),
(_('Sensitive Date'), 'couchexport.deid.deid_date'),
)
json_options = [{'label': label, 'value': value}
for label, value in options]
class ColumnTypesOptions(object):
json_options = [
{'label': meta.label, 'value': value}
for value, meta in display_column_types.items() if meta.label
]
class CustomExportHelper(object):
ExportSchemaClass = AbstractProperty()
ExportReport = AbstractProperty()
export_title = AbstractProperty()
allow_deid = False
allow_repeats = True
export_type = 'form'
@property
def default_order(self):
return {}
def update_custom_params(self):
if len(self.custom_export.tables) > 0:
if self.export_stock:
self.custom_export.tables[0].columns.append(
StockExportColumn(domain=self.domain, index='_id')
)
def format_config_for_javascript(self, table_configuration):
return table_configuration
def has_stock_column(self):
return any(
col.doc_type == 'StockExportColumn'
for col in self.custom_export.tables[0].columns
) if self.custom_export.tables else False
def __init__(self, request, domain, export_id=None, minimal=False):
self.request = request
self.domain = domain
self.presave = False
self.transform_dates = False
self.creating_new_export = not bool(export_id)
self.minimal = minimal
if export_id:
self.custom_export = self.ExportSchemaClass.get(export_id)
# also update the schema to include potential new stuff
self.custom_export.update_schema()
# enable configuring saved exports from this page
saved_group = HQGroupExportConfiguration.get_for_domain(self.domain)
self.presave = export_id in saved_group.custom_export_ids
self.export_stock = self.has_stock_column()
try:
assert self.custom_export.doc_type == 'SavedExportSchema', 'bad export doc type'
assert self.custom_export.type == self.export_type, 'wrong export type specified'
assert self.custom_export.index[0] == domain, 'bad export doc domain'
except AssertionError, e:
raise BadExportConfiguration(str(e))
else:
self.custom_export = self.ExportSchemaClass(type=self.export_type)
self.export_stock = False
@property
@memoized
def post_data(self):
return json.loads(self.request.body)
def update_custom_export(self):
"""
Updates custom_export object from the request
and saves to the db
"""
post_data = self.post_data
custom_export_json = post_data['custom_export']
SAFE_KEYS = ('default_format', 'is_safe', 'name', 'schema_id', 'transform_dates')
for key in SAFE_KEYS:
self.custom_export[key] = custom_export_json[key]
# update the custom export index (to stay in sync)
schema_id = self.custom_export.schema_id
schema = ExportSchema.get(schema_id)
self.custom_export.index = schema.index
self.presave = post_data['presave']
self.export_stock = post_data['export_stock']
self.custom_export.tables = [
ExportTable.wrap(table)
for table in custom_export_json['tables']
]
table_dict = dict((t.index, t) for t in self.custom_export.tables)
for table in self.custom_export.tables:
if table.index in table_dict:
table_dict[table.index].columns = table.columns
else:
self.custom_export.tables.append(
ExportTable(
index=table.index,
display=self.custom_export.name,
columns=table.columns
)
)
self.update_custom_params()
self.custom_export.custom_validate()
self.custom_export.save()
if self.presave:
HQGroupExportConfiguration.add_custom_export(self.domain, self.custom_export.get_id)
else:
HQGroupExportConfiguration.remove_custom_export(self.domain, self.custom_export.get_id)
return self.custom_export.get_id
def get_context(self):
table_configuration = self.format_config_for_javascript(self.custom_export.table_configuration)
if self.minimal:
table_configuration = filter(lambda t: t['selected'], table_configuration)
return {
'custom_export': self.custom_export,
'default_order': self.default_order,
'deid_options': DEID.json_options,
'column_type_options': ColumnTypesOptions.json_options,
'presave': self.presave,
'export_stock': self.export_stock,
'DeidExportReport_name': DeidExportReport.name,
'table_configuration': table_configuration,
'domain': self.domain,
'commtrack_domain': Domain.get_by_name(self.domain).commtrack_enabled,
'minimal': self.minimal,
'helper': {
'back_url': self.ExportReport.get_url(domain=self.domain),
'export_title': self.export_title,
'slug': self.ExportReport.slug,
'allow_deid': self.allow_deid,
'allow_repeats': self.allow_repeats
}
}
class FormCustomExportHelper(CustomExportHelper):
ExportSchemaClass = FormExportSchema
ExportReport = export.ExcelExportReport
allow_repeats = True
default_questions = [FORM_CASE_ID_PATH, "form.meta.timeEnd", "_id", "id", "form.meta.username"]
questions_to_show = default_questions + ["form.meta.timeStart", "received_on", "form.meta.location.#text"]
@property
def export_title(self):
return _('Export Submissions to Excel')
def __init__(self, request, domain, export_id=None, minimal=False):
super(FormCustomExportHelper, self).__init__(request, domain, export_id, minimal)
if not self.custom_export.app_id:
self.custom_export.app_id = request.GET.get('app_id')
@property
def allow_deid(self):
try:
ensure_request_has_privilege(self.request, privileges.DEIDENTIFIED_DATA)
return True
except PermissionDenied:
return False
def update_custom_params(self):
p = self.post_data['custom_export']
e = self.custom_export
e.include_errors = p['include_errors']
e.split_multiselects = p['split_multiselects']
e.app_id = p['app_id']
super(FormCustomExportHelper, self).update_custom_params()
@property
@memoized
def default_order(self):
return self.custom_export.get_default_order()
def update_table_conf_with_questions(self, table_conf):
column_conf = table_conf[0].get("column_configuration", [])
current_questions = set(self.custom_export.question_order)
remaining_questions = current_questions.copy()
def is_special_type(q):
return any([q.startswith('form.#'), q.startswith('form.@'), q.startswith('form.case.'),
q.startswith('form.meta.'), q.startswith('form.subcase_')])
def generate_additional_columns(requires_case):
ret = []
case_name_col = CustomColumn(slug='case_name', index=FORM_CASE_ID_PATH, display='info.case_name',
transform=CASENAME_TRANSFORM, show=True, selected=True)
if not requires_case:
case_name_col.show, case_name_col.selected, case_name_col.tag = False, False, 'deleted'
matches = filter(case_name_col.match, column_conf)
if matches:
# hack/annoying - also might have to re-add the case id column which can get
# overwritten by case name if only that is set.
case_id_cols = filter(lambda col: col['index'] == FORM_CASE_ID_PATH, column_conf)
if len(case_id_cols) <= 1:
ret.append(ExportColumn(
index=FORM_CASE_ID_PATH,
display='info.case_id',
show=True,
).to_config_format(selected=False))
for match in matches:
case_name_col.format_for_javascript(match)
elif filter(lambda col: col["index"] == case_name_col.index, column_conf):
ret.append(case_name_col.default_column())
return ret
question_schema = self.custom_export.question_schema.question_schema
def update_multi_select_column(question, col):
if question in question_schema and not question_schema[question].repeat_context:
if self.creating_new_export:
col["options"] = question_schema[question].options
col["allOptions"] = question_schema[question].options
col["doc_type"] = SplitColumn.__name__
else:
current_options = set(col.get("options", []))
col["allOptions"] = list(set(question_schema[question].options) | current_options)
for col in column_conf:
question = col["index"]
if question in remaining_questions:
remaining_questions.discard(question)
col["show"] = True
if question.startswith("form.") and not is_special_type(question) and question not in current_questions:
col["tag"] = "deleted"
col["show"] = False
if question in self.questions_to_show:
col["show"] = True
if self.creating_new_export and (question in self.default_questions or question in current_questions):
col["selected"] = True
update_multi_select_column(question, col)
requires_case = self.custom_export.uses_cases()
case_cols = filter(lambda col: col["index"] == FORM_CASE_ID_PATH, column_conf)
if not requires_case:
for col in case_cols:
if col['index'] == FORM_CASE_ID_PATH:
col['tag'], col['show'], col['selected'] = 'deleted', False, False
col['allOptions'] = []
elif not case_cols:
column_conf.append({
'index': FORM_CASE_ID_PATH,
'show': True,
'is_sensitive': False,
'selected': True,
'transform': None,
'tag': None,
'display': '',
'doc_type': None,
'allOptions': None,
'options': []
})
# This adds [info] location.#text to the standard list of columns to export, even if no forms have been
# submitted with location data yet.
if (self.custom_export.app
and not self.custom_export.app.is_remote_app()
and self.custom_export.app.auto_gps_capture):
loc_present = False
for col in column_conf:
if col['index'] == 'form.meta.location.#text':
loc_present = True
if not loc_present:
column_conf.append({
'index': 'form.meta.location.#text',
'show': True,
'is_sensitive': False,
'selected': False,
'transform': None,
'tag': None,
'display': '',
'doc_type': None,
'allOptions': None,
'options': []
})
column_conf.extend(generate_additional_columns(requires_case))
def get_remainder_column(question):
col = ExportColumn(
index=question,
display='',
show=True,
).to_config_format(selected=self.creating_new_export)
update_multi_select_column(question, col)
return col
column_conf.extend([
get_remainder_column(q)
for q in remaining_questions
])
# show all questions in repeat groups by default
for conf in table_conf:
if conf["index"].startswith('#.form.'):
for col in conf.get("column_configuration", []):
col["show"] = True
table_conf[0]["column_configuration"] = column_conf
return table_conf
def get_context(self):
ctxt = super(FormCustomExportHelper, self).get_context()
self.update_table_conf_with_questions(ctxt["table_configuration"])
return ctxt
class CustomColumn(object):
def __init__(self, slug, index, display, transform, is_sensitive=False, tag=None, show=False, selected=False):
self.slug = slug
self.index = index
self.display = display
self.transform = transform
self.is_sensitive = is_sensitive
self.tag = tag
self.show = show
self.selected = selected
def match(self, col):
return col['index'] == self.index and col['transform'] == self.transform
def format_for_javascript(self, col):
# this is js --> js conversion so the name is pretty bad
# couch --> javascript UI code
col['special'] = self.slug
def default_column(self):
# this is kinda hacky - mirrors ExportColumn.to_config_format to add custom columns
# to the existing export UI
return {
'index': self.index,
'selected': self.selected,
'display': self.display,
'transform': self.transform,
"is_sensitive": self.is_sensitive,
'tag': self.tag,
'special': self.slug,
'show': self.show,
'doc_type': None,
'allOptions': None,
'options': []
}
class CaseCustomExportHelper(CustomExportHelper):
ExportSchemaClass = CaseExportSchema
ExportReport = export.CaseExportReport
export_type = 'case'
default_properties = ["_id", "closed", "closed_on", "modified_on", "opened_on", "info.owner_name", "id"]
properties_to_show = ["identifier", "referenced_id", "referenced_type", "id", "doc_type"]
default_transformed_properties = ["info.closed_by_username", "info.last_modified_by_username",
"info.opened_by_username", "info.owner_name"]
meta_properties = ["_id", "closed", "closed_by", "closed_on", "domain", "computed_modified_on_",
"server_modified_on", "modified_on", "opened_by", "opened_on", "owner_id",
"user_id", "type", "version", "external_id"]
server_properties = ["_rev", "doc_type", "-deletion_id", "initial_processing_complete"]
row_properties = ["id"]
@property
def export_title(self):
return _('Export Cases and Users')
def format_config_for_javascript(self, table_configuration):
custom_columns = [
CustomColumn(slug='last_modified_by_username', index='user_id',
display='info.last_modified_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='opened_by_username', index='opened_by',
display='info.opened_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='closed_by_username', index='closed_by',
display='info.closed_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='owner_name', index='owner_id', display='info.owner_name',
transform=OWNERNAME_TRANSFORM),
]
main_table_columns = table_configuration[0]['column_configuration']
for custom in custom_columns:
matches = filter(custom.match, main_table_columns)
if not matches:
main_table_columns.append(custom.default_column())
else:
for match in matches:
custom.format_for_javascript(match)
return table_configuration
def update_table_conf(self, table_conf):
column_conf = table_conf[0].get("column_configuration", [])
current_properties = set(self.custom_export.case_properties)
remaining_properties = current_properties.copy()
def is_special_type(p):
return any([p in self.meta_properties, p in self.server_properties, p in self.row_properties])
def update_multi_select_column(col):
if self.creating_new_export:
col["options"] = []
col["allOptions"] = []
else:
current_options = col.get("options", [])
col["allOptions"] = current_options
return col
for col in column_conf:
prop = col["index"]
display = col.get('display') or prop
if prop in remaining_properties:
remaining_properties.discard(prop)
col["show"] = True
if not is_special_type(prop) and prop not in current_properties:
col["tag"] = "deleted"
col["show"] = False
if prop in self.default_properties + list(current_properties) or \
display in self.default_transformed_properties:
col["show"] = True
if self.creating_new_export:
col["selected"] = True
update_multi_select_column(col)
column_conf.extend([
update_multi_select_column(ExportColumn(
index=prop,
display='',
show=True,
).to_config_format(selected=self.creating_new_export))
for prop in filter(lambda prop: not prop.startswith("parent/"), remaining_properties)
])
table_conf[0]["column_configuration"] = column_conf
for table in table_conf:
for col in table.get("column_configuration", []):
if col["index"] in self.properties_to_show:
col["show"] = True
# Show most of the Case History rows by default
dont_show_cols = {"sync_log_id"}
for table in table_conf:
if table.get("index", "") == "#.actions.#":
for col in table.get("column_configuration", []):
index = col.get("index", "")
if index not in dont_show_cols:
col["show"] = True
else:
dont_show_cols.discard(index)
break
return table_conf
def get_context(self):
ctxt = super(CaseCustomExportHelper, self).get_context()
self.update_table_conf(ctxt["table_configuration"])
return ctxt
def make_custom_export_helper(request, export_type, domain=None, export_id=None):
export_type = export_type or request.GET.get('request_type', 'form')
minimal = bool(request.GET.get('minimal', False))
return {
'form': FormCustomExportHelper,
'case': CaseCustomExportHelper,
}[export_type](request, domain, export_id=export_id, minimal=minimal)
|
|
#!/usr/bin/env python
import sys
#sys.path.insert(0, '/usr/local/lib/python2.7/dist-packages/bintrees')
import CacheStats
from bintrees import AVLTree
CACHE_ATIME = 0
CACHE_SIZE = 1
CACHE_OBJ_ID = 2
CACHE_NEXT_TIME = 3
class BeladyCache():
def __init__(self, cache_size, min_obj_size, max_obj_size):
self._max_size = cache_size
self._used_size = 0
# dictionary: obj_id -> object with last and next caching time
self._cached_objects = {}
# AVL tree: next_time -> object with last and next caching time
self._tree = AVLTree()
self._oldest_obj_id = None
self._freshest_obj_id = None
self.stats = CacheStats.CacheStats("Belady", cache_size)
self.daily_stats = CacheStats.DailyCacheStats(cache_size)
def get_cache_stats_total(self):
return self.stats.to_dict()
def get_cache_stats_day(self):
self.daily_stats.cache_used = self._used_size
s = self.daily_stats.to_dict()
self.daily_stats.reset()
return s
def get_num_cached_objects(self):
return len(self._cached_objects)
def is_cached(self, obj_id):
return obj_id in self._cached_objects
def is_remembered(self, obj_id):
return self.is_cached(obj_id)
def get_free_cache_bytes(self):
return self._max_size - self._used_size
def update_obj_size(self, obj_id, size, delta):
if obj_id in self._cached_objects:
# update size of object in cache
self._cached_objects[obj_id][CACHE_SIZE] = size
# update size of object in tree
next_time = self._cached_objects[obj_id][CACHE_NEXT_TIME] # ineffizient: zwei Zugriffe
self._tree[next_time][CACHE_SIZE] = size
# update size used in cache
self._used_size += delta
# TODO: Muesste man nicht noch pruefen, ob Cachegroesse ueberschritten?
def _evict_bytes(self, bytes, xtime):
if self.stats.first_eviction_ts == 0:
self.stats.first_eviction_ts = xtime
# remove objects from cache
evicted_bytes = 0
while evicted_bytes < bytes:
# remove object with largest next_line_number from tree
(next_line_number, obj) = self._tree.pop_max()
# remove same object from cache
evicted_bytes += self._remove_cached(obj[CACHE_OBJ_ID])
# update stats
self.stats.cached_objects_current -= 1
self.stats.evicted_objects += 1
self.daily_stats.evicted_objects += 1
def remove_cached(self, obj_id):
if self.is_cached(obj_id):
self.stats.deleted_objects += 1
self.stats.cached_objects_current -= 1
self.daily_stats.deleted_objects += 1
return self._remove_cached(obj_id)
return None
def _remove_cached(self, obj_id):
if obj_id in self._cached_objects:
# remove object from cache
obj = self._cached_objects.pop(obj_id)
# remove object from tree
next_line_number = obj[CACHE_NEXT_TIME]
self._tree.discard(next_line_number)
# adapt size
self._used_size -= obj[CACHE_SIZE]
return obj[CACHE_SIZE]
return 0
def cache_object(self, obj_id, size, xtime, next_line_number, force=True, is_new=False):
# do not cache object if next_line_number == -1
if next_line_number == -1:
return
# add object to cache
self._cached_objects[obj_id] = [xtime, size, obj_id, next_line_number]
# add new object to tree
self._tree[next_line_number] = [xtime, size, obj_id, next_line_number]
# update size
self._used_size += size
# remove other objects from cache if necessary
if self._used_size > self._max_size:
bytes = self._used_size - self._max_size
self._evict_bytes(bytes, next_line_number)
# check whether cache is large enough
if self._used_size > self._max_size:
# remove new object
self._cached_objects.pop(obj_id)
self._tree.discard(next_line_number)
raise Exception("Error, cannot cache file. Size to large: %s %d" % (obj_id, size))
# update stats
self.stats.cached_objects_current += 1
self.stats.cached_objects_total += 1
self.stats.cached_bytes_written += size
self.daily_stats.cached_objects += 1
self.daily_stats.cached_bytes_written += size
def get_cached(self, obj_id, xtime, next_line):
# GET
if obj_id in self._cached_objects:
# remove object from cache
size = self._remove_cached(obj_id)
# add object with new time to cache
self.cache_object(obj_id, size, xtime, next_line)
# update stats
self.stats.cache_hits += 1
self.stats.cached_bytes_read += size
self.daily_stats.cache_hits += 1
self.daily_stats.cached_bytes_read += size
return True
# update stats
self.stats.cache_misses += 1
self.daily_stats.cache_misses += 1
return False
def rename(self, from_obj_id, to_obj_id):
# Belady cache stores from_obj only if to_obj is accessed (GET), possibly after a RENAME chain
# Belady cache does not store to_obj because it will be overwritten by this function
if self.is_cached(to_obj_id):
raise Exception("Error in rename(...): File cached that is not needed.")
if self.is_cached(from_obj_id):
# retrieve object and store it under new ID
obj = self._cached_objects.pop(from_obj_id)
self._cached_objects[to_obj_id] = obj
# update ID of object in tree
next_line_number = obj[CACHE_NEXT_TIME]
self._tree[next_line_number][CACHE_OBJ_ID] = to_obj_id
def main(argv=None):
cache = BeladyCache(168884986026393600)
"""
cache.cache_object('a', 1000, 10)
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
cache.cache_object('b', 2000, 20)
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
cache.cache_object('c', 2000, 30)
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
cache.cache_object('d', 3000, 40)
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
print (json.dumps(cache._cached_objects, indent=2))
cache._remove_cached('a')
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
cache._remove_cached('d')
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
cache.cache_object('e', 3000, 80)
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
cache.get_cached('b', 90)
print (json.dumps(cache._cached_objects, indent=2))
print ("oldest_obj_id", cache._oldest_obj_id)
print ("freshest_obj_id", cache._freshest_obj_id)
print ("====================================")
# test renaming.
## create 3 objects. rename the freshest, rename the oldest, rename the middle
c2 = LRUCache(10000)
c2.cache_object('old', 100, 1)
c2.cache_object('middle', 100, 2)
c2.cache_object('fresh', 100, 3)
assert(c2._freshest_obj_id == "fresh")
assert(c2._oldest_obj_id == "old")
c2.rename("old", "new_old")
assert(c2._oldest_obj_id == "new_old")
c2.rename("fresh", "new_fresh")
assert(c2._freshest_obj_id == "new_fresh")
c2.rename("middle", "new_middle")
assert(c2._cached_objects["new_middle"][CACHE_FRESHER_ID] == "new_fresh")
assert(c2._cached_objects["new_middle"][CACHE_OLDER_ID] == "new_old")
"""
if __name__ == "__main__":
sys.exit(main())
|
|
import sys
import unittest
from mock import (
patch,
Mock,
)
from bind.zoneparser import ZoneParser
# from bind.zone import Zone
class TestZoneParser(unittest.TestCase):
ez = []
def setUp(self):
with open('contrib/tests/fixtures/db.orangebox.com') as f:
self.ez = f.readlines()
self.ezp = list(self.ez)
def test_init_loads_keys_from_zone_class(self):
zp = ZoneParser('example.com')
self.assertIn('CNAME', zp.implemented_records)
self.assertIn('AAAA', zp.implemented_records)
self.assertIn('A', zp.implemented_records)
self.assertIn('SOA', zp.implemented_records)
self.assertIn('NS', zp.implemented_records)
self.assertIn('NAPTR', zp.implemented_records)
self.assertIn('SRV', zp.implemented_records)
@patch('builtins.open' if sys.version_info > (3,) else '__builtin__.open')
def test_from_file_exception(self, mopen):
mopen.return_value.__enter__ = lambda s: s
mopen.return_value.__exit__ = Mock()
mopen.return_value.readlines = Mock(side_effect=OSError('Intentional'))
zp = ZoneParser('foo.com')
zp.normalize_contents = Mock()
zp.normalize_contents.return_value = self.ez
self.assertEqual(zp.from_file(), [])
@patch('bind.zoneparser.ZoneParser.a_from_array')
@patch('bind.zoneparser.ZoneParser.ns_from_array')
@patch('bind.zoneparser.ZoneParser.soa_from_array')
def test_array_to_zone(self, soam, nsm, am):
zp = ZoneParser('orangebox.com')
zp.contents = self.ez
zp.array_to_zone()
soam.assert_called_with(self.ez[5].split())
nsm.assert_called_with(self.ez[6].split())
am.assert_called_with(self.ez[7].split())
def test_array_to_zone_with_data(self):
data = """sprout 300 IN A 10.0.5.1
sprout 300 IN NAPTR 1 1 "S" "SIP+D2T" "" _sip._tcp.sprout
_sip._tcp.sprout 300 IN SRV 0 0 5054 sprout-0
sprout-0 300 IN A 10.0.5.1""".split('\n')
zp = ZoneParser('orangebox.com')
zp.array_to_zone(data)
cont = zp.zone.contents
self.assertEqual(cont['A'], [{'alias': 'sprout',
'addr': '10.0.5.1',
'ttl': '300'},
{'alias': 'sprout-0',
'addr': '10.0.5.1',
'ttl': '300'}])
self.assertEqual(cont['NAPTR'], [{'alias': 'sprout',
'order': '1',
'pref': '1',
'params': '"SIP+D2T"',
'regexp': '""',
'flag': '"S"',
'replace': '_sip._tcp.sprout',
'ttl': '300'}])
self.assertEqual(cont['SRV'], [{'alias': '_sip._tcp.sprout',
'port': '5054',
'ttl': '300',
'priority': '0',
'target': 'sprout-0',
'weight': '0'}])
def test_soa_from_array(self):
zp = ZoneParser('orangebox.com')
zp.soa_from_array(self.ez[5].split())
self.assertEqual(zp.zone.contents['SOA'],
[{'addr': 'ns.orangebox.com.',
'owner': 'root.orangebox.com.',
'expiry': '3w',
'minimum': '15m',
'refresh': '12h',
'serial': '16640992',
'update-retry': '15m'}])
def test_cname_from_array(self):
zp = ZoneParser('orangebox.com')
zp.cname_from_array(self.ez[8].split())
self.assertEqual(zp.zone.contents['CNAME'], [{'alias': 'mail',
'addr': 'gmail.com.'}])
def test_a_from_array(self):
zp = ZoneParser('orangebox.com')
zp.a_from_array(self.ez[7].split())
self.assertEqual(zp.zone.contents['A'], [{'addr': '10.0.10.55',
'alias': 'ns'}])
def test_a_from_array_with_ttl(self):
zp = ZoneParser('orangebox.com')
zp.a_from_array(['ns', '300', 'IN', 'A', '10.0.10.55'])
self.assertEqual(zp.zone.contents['A'], [{'addr': '10.0.10.55',
'alias': 'ns',
'ttl': '300'}])
def test_ns_from_array(self):
zp = ZoneParser('orangebox.com')
zp.ns_from_array(['@', 'IN', 'NS', '10.0.10.55'])
self.assertEqual(zp.zone.contents['NS'], [{'addr': '10.0.10.55',
'alias': '@'}])
def test_ns_from_array_with_ttl(self):
zp = ZoneParser('orangebox.com')
zp.ns_from_array(['@', '300', 'IN', 'NS', '10.0.10.55'])
self.assertEqual(zp.zone.contents['NS'], [{'addr': '10.0.10.55',
'alias': '@',
'ttl': '300'}])
def test_naptr_from_array(self):
zp = ZoneParser('example.com')
zcontents = '@ 3200 IN NAPTR 1 1 "S" "SIP+D2T" "" _sip._tcp'.split(' ')
zp.naptr_from_array(zcontents)
self.assertEqual(zp.zone.contents['NAPTR'], [{'alias': '@',
'order': '1',
'pref': '1',
'flag': '"S"',
'params': '"SIP+D2T"',
'regexp': '""',
'ttl': '3200',
'replace': '_sip._tcp'}])
def test_srv_from_array(self):
zp = ZoneParser('example.com')
zcontents = '_sip._udp 3200 IN SRV 0 0 5060 bono-0'.split(' ')
zp.srv_from_array(zcontents)
self.assertEqual(zp.zone.contents['SRV'], [{'alias': '_sip._udp',
'priority': '0',
'weight': '0',
'port': '5060',
'target': 'bono-0',
'ttl': '3200'}])
def test_bono_a_from_array(self):
zp = ZoneParser('offline.cw-ngv.com')
zp.a_from_array(u'@ 300 IN A 54.73.45.41'.split(' '))
self.assertEqual(zp.zone.contents['A'], [{'ttl': '300',
'addr': '54.73.45.41',
'alias': '@'}])
def test_ellis_a_from_array(self):
zp = ZoneParser('offline.cw-ngv.com')
zp.a_from_array(u'ellis-0 300 IN A 54.73.45.41'.split(' '))
self.assertEqual(zp.zone.contents['A'], [{'ttl': '300',
'addr': '54.73.45.41',
'alias': 'ellis-0'}])
@patch('bind.zone.Zone.to_file')
@patch('os.remove')
def test_save(self, osrm, fwm):
zp = ZoneParser('example.com')
zp.passes_validation = Mock()
zp.passes_validation.return_value = True
zp.add_to_local_zones = Mock()
zp.save()
osrm.assert_called_with('/etc/bind/db.example.com.proposed')
fwm.assert_called_with('/etc/bind/db.example.com')
zp.add_to_local_zones.assert_called_once()
def test_find_type(self):
zp = ZoneParser('example.com')
self.assertEqual(zp.find_type(['foo', 'bar', 'baz', 'CNAME']), 3)
self.assertEqual(zp.find_type(['foo', 'bar', 'baz']), -1)
def test_dict_to_zone(self):
zp = ZoneParser('example.com')
zp.update_ns = Mock()
zp.update_soa = Mock()
zp.update_cname = Mock()
zp.update_a = Mock()
zp.dict_to_zone({'rr': 'NS'})
zp.update_ns.assert_called_with({'rr': 'NS'})
zp.dict_to_zone({'rr': 'SOA'})
zp.update_soa.assert_called_with({'rr': 'SOA'})
zp.dict_to_zone({'rr': 'CNAME'})
zp.update_cname.assert_called_with({'rr': 'CNAME'})
zp.dict_to_zone({'rr': 'A'})
zp.update_a.assert_called_with({'rr': 'A'})
zp.dict_to_zone({'rr': 'NOPE'})
@patch('builtins.open' if sys.version_info > (3,) else '__builtin__.open')
def test_read_local_zones(self, mopen):
seed_zone = """zone "255.in-addr.arpa" {
type master;
file "/etc/bind/db.255";
};"""
mopen.return_value.__enter__ = lambda s: s
mopen.return_value.__exit__ = Mock()
mopen.return_value.readlines.return_value = seed_zone.split('\n')
zp = ZoneParser('example.com')
self.assertEqual(zp.read_local_zones(), seed_zone.split('\n'))
@patch('builtins.open' if sys.version_info > (3,) else '__builtin__.open')
def test_write_local_zones(self, mopen):
seed_zone = """zone "255.in-addr.arpa" {
type master;
file "/etc/bind/db.255";
};"""
mopen.return_value.__enter__ = lambda s: s
mopen.return_value.__exit__ = Mock()
mopen.return_value.write = Mock()
made_config = seed_zone.split('\n').append(['hello'])
zp = ZoneParser('example.com')
zp.write_local_zones(made_config)
# mopen.return_value.write.assert_called_with(made_config)
def test_exists_in_local_zones(self):
seed_zone = """zone "example.com" {
type master;
file "/etc/bind/db.example.com";
};"""
zp = ZoneParser('example.com')
self.assertEqual(zp.exists_in_local_zones(seed_zone.split('\n')), 0)
zp = ZoneParser('nope.com')
self.assertEqual(zp.exists_in_local_zones(seed_zone.split('\n')), -1)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session(use_gpu=True):
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
def testConcurrent(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in False, True:
lhs1 = random_ops.random_normal([3, 3], seed=42)
lhs2 = random_ops.random_normal([3, 3], seed=42)
rhs1 = random_ops.random_normal([3, 3], seed=42)
rhs2 = random_ops.random_normal([3, 3], seed=42)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = sess.run(all_ops)
self.assertAllEqual(val[0], val[1])
self.assertAllEqual(val[2], val[3])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
|
|
"""
Bias Calibration
================
This simple script can be used to study the number of bias frames required to meet the VIS calibration requirements.
The following requirements related to the bias calibration has been taken from GDPRD.
R-GDP-CAL-052:
The contribution of the residuals of VIS bias subtraction to the *error on the determination of each ellipticity
component* of the local PSF shall not exceed 3x10-5 (one sigma).
R-GDP-CAL-062:
The contribution of the residuals of VIS bias subtraction to the *relative error* \sigma(R2)/R2 on the determination of
the local PSF R2 shall not exceed 1x10-4 (one sigma).
:requires: PyFITS
:requires: NumPy
:requires: matplotlib
:requires: VISsim-Python
:version: 0.95
:author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
"""
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import pyfits as pf
import numpy as np
import math, datetime, cPickle, sys
from scipy import interpolate
from analysis import shape
from support import logger as lg
from support import surfaceFitting as sf
from support import bleedingtest as write
from support import files as fileIO
def testBiasCalibrationDelta(log, numdata=2066, floor=995, xsize=2048, ysize=2066, order=3, biases=15, surfaces=100,
file='psf1x.fits', psfs=500, psfscale=1.e3, debug=False, plots=False):
"""
Derive the PSF ellipticities for a given number of random surfaces with random PSF positions
and a given number of biases median combined and compare to the nominal PSF ellipticity.
This function can be used to derive the error (delta) in determining ellipticity and size given
a reference PSF.
Choices that need to be made and effect the results:
#. bias surface that is assumed (amplitude, complexity, etc.)
#. whether the order of the polynomial surface to be fitted is known or not
#. size of the Gaussian weighting function when calculating the ellipticity components
There are also other choices such as the number of PSFs and scaling and the random numbers generated for
the surface that also affect the results, however, to a lesser degree.
Generates a set of plots that can be used to inspect the simulation.
"""
log.info('Processing file %s' % file)
#read in data without noise or bias level and scale it to 20k electrons
data = pf.getdata(file)
data /= np.max(data)
data *= psfscale
#derive the reference value from the scaled data
sh = shape.shapeMeasurement(data.copy(), log)
results = sh.measureRefinedEllipticity()
#sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
#rescale to not loose numerical accuracy
reference1 = results['e1']
reference2 = results['e2']
refR2 = results['R2']
reference = results['ellipticity']
print 'Reference Ellipticities and R2 :'
print reference1, reference2, reference, refR2
#generate a random quadrant surface representing BIAS without noise
#modify zclean if a different order surface is needed
x = np.random.random(numdata)
y = np.random.random(numdata)
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), xsize),
np.linspace(y.min(), y.max(), ysize))
zclean = yy - xx + 0.78*xx**2 + 15.0*yy**2 - 1.75*xx*yy + 10.0*xx**3 + 0.3*yy**3 + floor
#random positions for the PSFs, these positions are the lower corners
xpositions = np.random.random_integers(0, zclean.shape[1] - data.shape[1], psfs)
ypositions = np.random.random_integers(0, zclean.shape[0] - data.shape[0], psfs)
if plots:
# generate 2D plot
im = plt.imshow(zclean, extent=(0, ysize, xsize, 0))
plt.scatter(xpositions/data.shape[1]/2, ypositions/data.shape[0]/2)
c1 = plt.colorbar(im)
c1.set_label('BIAS [ADUs]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('NoNoise2D.png')
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, zclean, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel('BIAS [ADUs]')
plt.savefig('NoNoise.png')
plt.close()
out = {}
#number of biases to median combine
for a in xrange(biases):
print 'Number of Biases: %i / %i' % (a+1, biases)
#data storage
de1 = []
de2 = []
de = []
R2 = []
R2abs = []
#number of random readnoised surfaces to loop over
for b in xrange(surfaces):
print 'Surface: %i / %i' % (b+1, surfaces)
#add readout noise based on a+1 median combined biases
z = addReadoutNoise(zclean.copy(), number=a+1)
if plots:
# generate 2D plot
im = plt.imshow(z, extent=(0, ysize, xsize, 0))
c1 = plt.colorbar(im)
c1.set_label('BIAS [ADUs]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('Readnoised%i%i.png' % (a+1, b+1))
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, z, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel('BIAS [ADUs]')
plt.savefig('Readnoised3D%i%i.png' % (a+1, b+1))
plt.close()
# Fit 2d polynomial to the noised data
m = sf.polyfit2d(xx.ravel(), yy.ravel(), z.ravel(), order=order)
# Evaluate it on a rectangular grid
fitted = sf.polyval2d(xx, yy, m)
if plots:
# generate 2D plot
im = plt.imshow(fitted, extent=(0, ysize, xsize, 0))
c1 = plt.colorbar(im)
c1.set_label('BIAS [ADUs]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('Fitted2D%i.png' % (a+1))
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, fitted, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel('BIAS [ADUs]')
plt.savefig('Fitted3D%i.png' % (a+1))
plt.close()
#subtract the no noise surface from the fit
fitted -= zclean
if plots:
# generate 2D plot
im = plt.imshow(fitted, extent=(0, ysize, xsize, 0))
c1 = plt.colorbar(im)
c1.set_label(r'$\Delta$BIAS [ADUs]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('Residual2D%i%i.png' % (a+1, b+1))
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, fitted, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel(r'$\Delta$BIAS [ADUs]')
ax.set_zlim(-0.01, 0.01)
plt.savefig('Residual3D%i%i.png' % (a+1, b+1))
plt.close()
#loop over the PSFs
for xpos, ypos in zip(xpositions, ypositions):
#measure e and R2 from the postage stamp image
small = fitted[ypos:ypos+data.shape[0], xpos:xpos+data.shape[1]].copy()
small += data.copy()
sh = shape.shapeMeasurement(small.copy(), log)
results = sh.measureRefinedEllipticity()
#save delta values
de1.append(results['e1'] - reference1)
de2.append(results['e2'] - reference2)
de.append(results['ellipticity'] - reference)
R2.append((results['R2'] - refR2) / refR2)
R2abs.append((results['R2'] - refR2))
if debug:
print xpos, ypos
write.writeFITSfile(small/data, 'testResidualDelta.fits')
print 'DEBUG mode -- exiting now'
import sys; sys.exit()
if plots:
plotDeltaEs(de1, de2, de, 'MultipleBiases%i.pdf' % (a+1), title='%i Biases median combined' % (a+1))
out[a+1] = [de1, de2, de, R2, R2abs]
return out
def testBiasCalibrationSigma(log, numdata=2066, floor=3500, xsize=2048, ysize=2066, order=3, biases=15, surfaces=100,
file='psf1x.fits', psfs=500, psfscale=1e5, gain=3.1,
debug=False, plots=True):
"""
Derive the PSF ellipticities for a given number of random surfaces with random PSF positions
and a given number of biases median combined.
This function is to derive the the actual values so that the knowledge (variance) can be studied.
Choices that need to be made and effect the results:
#. bias surface that is assumed (amplitude, complexity, etc.)
#. whether the order of the polynomial surface to be fitted is known or not
#. size of the Gaussian weighting function when calculating the ellipticity components
There are also other choices such as the number of PSFs and scaling and the random numbers generated for
the surface that also affect the results, however, to a lesser degree.
Generates a set of plots that can be used to inspect the simulation.
"""
log.info('Processing file %s' % file)
#read in data without noise or bias level and renormalize it
data = pf.getdata(file)
data /= np.max(data)
data = data * psfscale / gain
#generate a random quadrant surface representing BIAS without noise
#modify zclean if a different order surface is needed
x = np.random.random(numdata)
y = np.random.random(numdata)
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), xsize),
np.linspace(y.min(), y.max(), ysize))
zclean = (yy - xx + 0.78*xx**2 + 15.0*yy**2 - 1.75*xx*yy + 10.0*xx**3 + 0.3*yy**3 + floor) / gain
#random positions for the PSFs, these positions are the lower corners
xpositions = np.random.random_integers(0, zclean.shape[1] - data.shape[1], psfs)
ypositions = np.random.random_integers(0, zclean.shape[0] - data.shape[0], psfs)
# generate 2D plot
if plots:
im = plt.imshow(zclean*gain, extent=(0, ysize, xsize, 0))
plt.scatter(xpositions + (data.shape[1]/2), ypositions + (data.shape[0]/2), color='white')
c1 = plt.colorbar(im)
c1.set_label('BIAS [electrons]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('NoNoise2D.png')
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, zclean*gain, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel('BIAS [electrons]')
plt.savefig('NoNoise.png')
plt.close()
out = {}
#number of biases to median combine
for a in xrange(biases):
print 'Number of Biases: %i / %i' % (a+1, biases)
#data storage
de1 = []
de2 = []
de = []
R2 = []
#number of random readnoised surfaces to loop over
for b in xrange(surfaces):
print 'Number of Random Realisations: %i / %i' % (b+1, surfaces)
#add readout noise based on a+1 median combined bias
#this surface needs to be integer, because it resembles a recorded one
z = addReadoutNoise(zclean.copy(), number=a+1)
if plots:
# generate 2D plot
im = plt.imshow(z*gain, extent=(0, ysize, xsize, 0))
c1 = plt.colorbar(im)
c1.set_label('BIAS [electrons]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('Readnoised%i%i.png' % (a+1, b+1))
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, z*gain, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel('BIAS [electrons]')
plt.savefig('Readnoised3D%i%i.png' % (a+1, b+1))
plt.close()
# Fit 2d polynomial to the noised data
m = sf.polyfit2d(xx.ravel(), yy.ravel(), z.ravel(), order=order)
# Evaluate it on a rectangular grid
fitted = sf.polyval2d(xx, yy, m)
if plots:
# generate 2D plot
im = plt.imshow(fitted*gain, extent=(0, ysize, xsize, 0))
c1 = plt.colorbar(im)
c1.set_label('BIAS [electrons]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('Fitted2D%i.png' % (a+1))
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, fitted*gain, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel('BIAS [electrons]')
plt.savefig('Fitted3D%i.png' % (a+1))
plt.close()
#subtract the no noise surface from the fit, adjust for integer conversion done earlier
fitted -= zclean.copy()
if plots:
# generate 2D plot
im = plt.imshow(fitted*gain, extent=(0, ysize, xsize, 0))
c1 = plt.colorbar(im)
c1.set_label(r'$\Delta$BIAS [electrons]')
plt.xlim(0, ysize)
plt.ylim(0, xsize)
plt.xlabel('Y [pixels]')
plt.ylabel('X [pixels]')
plt.savefig('Residual2D%i%i.png' % (a+1, b+1))
plt.close()
#and 3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xx*xsize, yy*ysize, fitted*gain, rstride=100, cstride=100, alpha=0.6, cmap=cm.jet)
ax.set_xlabel('X [pixels]')
ax.set_ylabel('Y [pixels]')
ax.set_zlabel(r'$\Delta$BIAS [electrons]')
ax.set_zlim(-0.1, 0.1)
plt.savefig('Residual3D%i%i.png' % (a+1, b+1))
plt.close()
#loop over the PSFs
for xpos, ypos in zip(xpositions, ypositions):
#measure e and R2 from the postage stamp image
small = fitted[ypos:ypos+data.shape[0], xpos:xpos+data.shape[1]].copy()
#print np.sum(small), np.average(small), np.median(small), small.shape
small += data.copy()
sh = shape.shapeMeasurement(small.copy(), log)
results = sh.measureRefinedEllipticity()
#save values
de1.append(results['e1'])
de2.append(results['e2'])
de.append(results['ellipticity'])
R2.append(results['R2'])
if debug:
print xpos, ypos
write.writeFITSfile(small/data, 'testResidualSigma.fits')
print 'DEBUG mode -- exiting now'
import sys; sys.exit()
if plots:
plotEs(de1, de2, de, 'MBiases%i.png' % (a+1), title='%i Biases median combined' % (a+1))
out[a+1] = [de1, de2, de, R2]
return out
def plotDeltaEs(deltae1, deltae2, deltae, output, title='', ymax=8, req=3):
"""
Generates a simple plot showing the errors in the ellipticity components.
"""
deltae1 = np.asarray(deltae1)
deltae2 = np.asarray(deltae2)
deltae = np.asarray(deltae)
#plot histograms
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(title)
bins = np.arange(-6, 6.1, 0.1)
ax.hist(deltae, bins=bins, label=r'$e$', alpha=0.3, normed=False)
ax.hist(deltae1, bins=bins, label=r'$e_{1}$', alpha=0.2, normed=False)
ax.hist(deltae2, bins=bins, label=r'$e_{2}$', alpha=0.1, normed=False)
ax.axvline(x=req, c='g', ls='--', label='Requirement')
ax.axvline(x=-req, c='g', ls='--')
ax.set_xlim(-6, 6)
ax.set_xlabel(r'$\Delta e_{i}\ , \ \ \ i \in [1,2] \ \ \ \ [10^{-5}]$')
ax.set_ylabel('Probability Density')
plt.legend(shadow=True, fancybox=True)
plt.savefig('hist' + output)
plt.close()
#make scatter plots
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(title)
ax.plot(deltae, 'mD', label=r'$e$')
ax.plot(deltae2, 'ys', label=r'$e_{2}$')
ax.plot(deltae1, 'bo', label=r'$e_{1}$')
ax.fill_between(np.arange(len(deltae1)), np.ones(len(deltae1))*req, ymax, facecolor='red', alpha=0.08)
ax.fill_between(np.arange(len(deltae1)), -np.ones(len(deltae1))*req, -ymax, facecolor='red', alpha=0.08)
ax.axhline(y=req, c='g', ls='--', label='Requirement')
ax.axhline(y=-req, c='g', ls='--')
ax.set_ylim(-ymax, ymax)
ax.set_xlabel('Number of Iterations')
ax.set_ylabel(r'$\Delta e_{i}\ , \ \ \ i \in [1,2] \ \ \ \ [10^{-5}]$')
plt.text(0.5, 0.1,
r'Average error in $e_{1}=$ %f and $e_{2}=$ %f' % (np.mean(deltae1), np.mean(deltae2)),
ha='center',
va='center',
transform=ax.transAxes)
plt.legend(shadow=True, fancybox=True, numpoints=1, ncol=2)
plt.savefig(output)
plt.close()
def plotEs(deltae1, deltae2, deltae, output, title=''):
"""
Generates a simple plot showing the ellipticity components.
"""
deltae1 = np.asarray(deltae1)
deltae2 = np.asarray(deltae2)
deltae = np.asarray(deltae)
#plot histograms
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(title)
bins = 10
# bins = np.arange(0.0, 0.2, 0.02)
ax.hist(deltae, bins=bins, label=r'$e$', alpha=0.3, normed=False)
ax.hist(deltae1, bins=bins, label=r'$e_{1}$', alpha=0.2, normed=False)
ax.hist(deltae2, bins=bins, label=r'$e_{2}$', alpha=0.1, normed=False)
#ax.axvline(x=req, c='g', ls='--', label='Requirement')
#ax.axvline(x=-req, c='g', ls='--')
#ax.set_xlim(-6, 6)
ax.set_xlabel(r'$e_{i}\ , \ \ \ i \in [1,2] \ \ \ \ [10^{-5}]$')
ax.set_ylabel('Probability Density')
plt.legend(shadow=True, fancybox=True)
plt.savefig('hist2' + output)
plt.close()
#make scatter plots
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(title)
ax.plot(deltae, 'mD', label=r'$e$')
ax.plot(deltae2, 'ys', label=r'$e_{2}$')
ax.plot(deltae1, 'bo', label=r'$e_{1}$')
#ax.fill_between(np.arange(len(deltae1)), np.ones(len(deltae1))*req, ymax, facecolor='red', alpha=0.08)
#ax.fill_between(np.arange(len(deltae1)), -np.ones(len(deltae1))*req, -ymax, facecolor='red', alpha=0.08)
#ax.set_ylim(0.0, 0.2)
ax.set_xlabel('Number of Iterations')
ax.set_ylabel(r'$e_{i}\ , \ \ \ i \in [1,2] \ \ \ \ [10^{-5}]$')
#plt.text(0.5, 0.1,
# r'Average error in $e_{1}=$ %f and $e_{2}=$ %f' % (np.mean(deltae1), np.mean(deltae2)),
# ha='center',
# va='center',
# transform=ax.transAxes)
plt.legend(shadow=True, fancybox=True, numpoints=1)#, ncol=2)
plt.savefig(output)
plt.close()
def plotNumberOfFramesDelta(results, timeStamp=False):
"""
Creates a simple plot to combine and show the results for errors (delta).
:param results: results to be plotted
:type results: dict
:param timeStamp: whether to include a time stamp in the output image
:type timeStamp: bool
"""
txt = '%s' % datetime.datetime.isoformat(datetime.datetime.now())
print '\nDelta results:'
#loop over the number of bias frames combined
for key in results:
fig = plt.figure()
ax = fig.add_subplot(111)
if key == 1:
plt.title(r'VIS Bias Calibration (%i exposure): $\delta e$' % key)
else:
plt.title(r'VIS Bias Calibration (%i exposures): $\delta e$' % key)
de1 = np.asarray(results[key][0])
de2 = np.asarray(results[key][1])
de = np.asarray(results[key][2])
avg1 = np.mean(de1)**2
avg2 = np.mean(de2)**2
avg = np.mean(de)**2
#write down the values
print key, avg, avg1, avg2
plt.text(0.08, 0.9, r'$\left< \delta e_{1} \right>^{2} = %e$' %avg1, fontsize=10, transform=ax.transAxes)
plt.text(0.08, 0.85, r'$\left< \delta e_{2}\right>^{2} = %e$' %avg2, fontsize=10, transform=ax.transAxes)
plt.text(0.08, 0.8, r'$\left< \delta | \bar{e} |\right>^{2} = %e$' %avg, fontsize=10, transform=ax.transAxes)
ax.hist(de, bins=10, color='y', alpha=0.2, label=r'$\delta | \bar{e} |$', normed=True, log=True)
ax.hist(de1, bins=10, color='b', alpha=0.5, label=r'$\delta e_{1}$', normed=True, log=True)
ax.hist(de2, bins=10, color='g', alpha=0.3, label=r'$\delta e_{2}$', normed=True, log=True)
ax.axvline(x=0, ls=':', c='k')
ax.set_ylabel('Probability Density')
ax.set_xlabel(r'$\delta e_{i}\ , \ \ \ i \in [1,2]$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=2.0, ncol=2)
plt.savefig('BiasCalibrationEDelta%i.pdf' % key)
plt.close()
#same for R2s
for key in results:
fig = plt.figure()
if key == 1:
plt.title(r'VIS Bias Calibration (%i exposure): $\frac{\delta R^{2}}{R_{ref}^{2}}$' % key)
else:
plt.title(r'VIS Bias Calibration (%i exposures): $\frac{\delta R^{2}}{R_{ref}^{2}}$' % key)
ax = fig.add_subplot(111)
dR2 = np.asarray(results[key][3])
avg = np.mean(dR2)**2
ax.hist(dR2, bins=20, color='y', label=r'$\frac{\delta R^{2}}{R_{ref}^{2}}$', normed=True, log=True)
print key, avg
plt.text(0.1, 0.9, r'$\left<\frac{\delta R^{2}}{R^{2}_{ref}}\right>^{2} = %e$' %avg, fontsize=10, transform=ax.transAxes)
ax.axvline(x=0, ls=':', c='k')
ax.set_ylabel('Probability Density')
ax.set_xlabel(r'$\frac{\delta R^{2}}{R_{ref}^{2}}$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8)
plt.savefig('BiasCalibrationDeltaSize%i.pdf' % key)
plt.close()
def plotNumberOfFramesSigma(results, reqe=3e-5, reqr2=1e-4, shift=0.1, timeStamp=False):
"""
Creates a simple plot to combine and show the results.
:param results: results to be plotted
:type results: dict
:param req: the requirement
:type req: float
:param ymax: maximum value to show on the y-axis
:type ymax: int or float
:param shift: the amount to shift the e2 results on the abscissa (for clarity)
:type shift: float
:param timeStamp: whether to include a time stamp in the output image
:type timeStamp: bool
"""
print '\nSigma results:'
txt = '%s' % datetime.datetime.isoformat(datetime.datetime.now())
fig = plt.figure()
plt.title(r'VIS Bias Calibration: $\sigma (e)$')
ax = fig.add_subplot(111)
x = 1
#loop over the number of bias frames combined
for key in results:
e1 = np.asarray(results[key][0])
e2 = np.asarray(results[key][1])
e = np.asarray(results[key][2])
std1 = np.std(e1)
std2 = np.std(e2)
std = np.std(e)
ax.scatter(key-shift, std, c='m', marker='*')
ax.scatter(key, std1, c='b', marker='o')
ax.scatter(key, std2, c='y', marker='s')
x += 1
print key, std, std1, std2
ax.scatter(key-shift, std, c='m', marker='*', label=r'$\sigma (e)$')
ax.scatter(key, std1, c='b', marker='o', label=r'$\sigma (e_{1})$')
ax.scatter(key, std2, c='y', marker='s', label=r'$\sigma (e_{2})$')
ax.fill_between(np.arange(x+1), np.ones(x+1)*reqe, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=reqe, c='g', ls='--', label='Requirement')
ax.set_yscale('log')
ax.set_ylim(1e-8, 1e-4)
ax.set_xlim(0, x)
ax.set_xlabel('Number of Bias Frames Median Combined')
ax.set_ylabel(r'$\sigma (e_{i})\ , \ \ \ i \in [1,2]$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=2.0, ncol=2)
plt.savefig('BiasCalibrationsigmaE.pdf')
plt.close()
#same for R2s
fig = plt.figure()
plt.title(r'VIS Bias Calibration: $\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
ax = fig.add_subplot(111)
ax.axhline(y=0, c='k', ls=':')
x = 1
#loop over the number of bias frames combined
for key in results:
dR2 = np.asarray(results[key][3])
std = np.std(dR2) / np.mean(dR2)
std2 = np.sqrt(np.var(dR2) / np.mean(dR2)**2)
print key, std, std2
ax.scatter(key, std, c='m', marker='*', s=35, zorder=10)
#ax.scatter(key, var, c='b', marker='s', s=35, zorder=10)
x += 1
#for the legend
ax.scatter(key, std, c='m', marker='*', label=r'$\frac{\sigma(R^{2})}{R_{ref}^{2}}$')
#ax.scatter(key, var, c='b', marker='s', label=r'$\frac{\sigma^{2}(R^{2})}{R_{ref}^{4}}$')
#show the requirement
ax.fill_between(np.arange(x+1), np.ones(x+1)*reqr2, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=reqr2, c='g', ls='--', label='Requirement')
ax.set_yscale('log')
ax.set_ylim(1e-5, 1e-3)
ax.set_xlim(0, x)
ax.set_xlabel('Number of Bias Frames Median Combined')
ax.set_ylabel(r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8)
plt.savefig('BiasCalibrationSigmaR2.pdf')
plt.close()
def addReadoutNoise(data, readnoise=4.5, gain=3.1, number=1):
"""
Add readout noise to the input data. The readout noise is the median of the number of frames.
:param data: input data to which the readout noise will be added to [ADUs]
:type data: ndarray
:param readnoise: standard deviation of the read out noise [electrons]
:type readnoise: float
:param gain: the gain factor that is used to convert electrons to ADUs
:type gain: float
:param number: number of read outs to median combine before adding to the data [default=1]
:type number: int
:return: data + read out noise
:rtype: ndarray [same as input data]
"""
shape = data.shape
biases = (np.random.normal(loc=0.0, scale=readnoise, size=(number, shape[0], shape[1]))/gain).astype(np.int)
if number > 1:
bias = np.median(biases, axis=0, overwrite_input=True)
elif number < 1:
sys.exit('ERROR - number of bias frames to create cannot be less than 1')
else:
bias = biases[0]
return data + bias
def findTolerableErrorPiston(log, file='data/psf12x.fits', oversample=12.0, samples=12,
psfs=4000, sigma=0.36, iterations=5, debug=False):
"""
Calculate ellipticity and size for PSFs of different scaling when there is a residual
bias offset.
:param sigma: 1sigma radius of the Gaussian weighting function for shape measurements
:type sigma: float
"""
#read in PSF and renormalize it
data = pf.getdata(file)
data /= np.max(data)
if debug:
write.writeFITSfile(data, 'normalizedPSF.fits')
write.writeFITSfile(data.copy()*1e4, 'normalizedPSF2.fits')
#PSF scalings for the peak pixel, in electrons
scales = np.random.random_integers(2000, 2100, psfs)
#set the scale for shape measurement
settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma)
#residual from a perfectly flat surface, pistons are in electrons
pistons = np.logspace(-5, 1, samples)
tot = pistons.size
res = {}
for i, piston in enumerate(pistons):
print'Piston: %i / %i' % (i+1, tot)
R2 = []
e1 = []
e2 = []
e = []
#loop over the PSFs
for scale in scales:
#piston = normally distributed around them mean = error and scale = readout noise
#ps = np.random.normal(loc=piston, scale=4.5, size=data.shape)
#tmp = data.copy() * scale + ps
#make a copy of the PSF and scale it with the given scaling
#and then add a random piston which is <= the error
tmp = data.copy() * scale + piston
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(tmp.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
#save values
e1.append(results['e1'])
e2.append(results['e2'])
e.append(results['ellipticity'])
R2.append(results['R2'])
out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2))
res[piston] = out
return res
def pistonKnowledge(log, file='data/psf2x.fits', oversample=2.0, psfs=1000, sigma=0.36, iterations=4, debug=False):
"""
"""
#read in PSF and renormalize it
data = pf.getdata(file)
data /= np.max(data)
data *= 2000.
if debug:
write.writeFITSfile(data, 'normalizedPSF.fits')
#set the scale for shape measurement
settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma)
#residual from a perfectly flat surface, pistons are in electrons
pistons = np.logspace(-5, 1, 10)
tot = pistons.size
res = {}
for i, piston in enumerate(pistons):
print'Piston: %i / %i' % (i+1, tot)
R2 = []
e1 = []
e2 = []
e = []
pss = np.random.random(psfs) * piston
#loop over the PSFs
for ps in pss:
#make a copy of the PSF and scale it with the given scaling
#and then add a random piston which is <= the error
tmp = data.copy() + ps
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(tmp, log, **settings)
results = sh.measureRefinedEllipticity()
#save values
e1.append(results['e1'])
e2.append(results['e2'])
e.append(results['ellipticity'])
R2.append(results['R2'])
out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2))
#res[piston] = out
res[np.std(pss)] = out
#or should we save std(ps)?
return res
def findTolerableErrorSlope(log, file='data/psf12x.fits', oversample=12.0, samples=12,
psfs=4000, sigma=0.36, iterations=5, pixels=60):
"""
Calculate ellipticity and size for PSFs of different scaling when there is a residual
bias slope.
:param sigma: 1sigma radius of the Gaussian weighting function for shape measurements
:type sigma: float
"""
#read in PSF and renormalize it
data = pf.getdata(file)
data /= np.max(data)
#no slope surface
noslope = np.ones(data.shape)
dx = (np.arange(noslope.shape[1]) - noslope.shape[1]/2.) / (pixels * oversample)
#PSF scalings in electrons
scales = np.random.random_integers(2000, 2100, psfs)
#set the scale for shape measurement
settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma)
#the slope is in electrons
slopes = np.logspace(-4, 0.5, samples)
tot = slopes.size
res = {}
for i, slope in enumerate(slopes):
print'Slope: %i / %i' % (i+1, tot)
R2 = []
e1 = []
e2 = []
e = []
#now always in "x" direction
slopeSurface = noslope.copy() * dx.copy() * slope
if i == 7:
write.writeFITSfile(slopeSurface, 'slopeSurface.fits')
#loop over the PSFs
for scale in scales:
#make a copy of the PSF and scale it with the given scaling
#and then add the surface with the slope
tmp = data.copy() * scale + slopeSurface
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(tmp.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
#save values
e1.append(results['e1'])
e2.append(results['e2'])
e.append(results['ellipticity'])
R2.append(results['R2'])
out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2))
res[slope] = out
return res
def plotTolerableErrorR2(res, title, output, req=1e-4):
fig = plt.figure()
plt.title(title)
ax = fig.add_subplot(111)
#loop over the number of bias frames combined
vals = []
for key in res.keys():
dR2 = res[key]['R2']
normed = np.std(dR2) / np.mean(dR2)
ax.scatter(key, normed, c='m', marker='*', s=35)
vals.append(normed)
print key, normed
#for the legend
ax.scatter(key, normed, c='m', marker='*', label=r'$\frac{\sigma(R^{2})}{R_{ref}^{2}}$')
#show the requirement
ks = np.asarray(res.keys())
ran = np.linspace(ks.min()*0.99, ks.max()*1.01)
ax.fill_between(ran, np.ones(ran.size)*req, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=req, c='g', ls='--', label='Requirement')
#find the crossing
srt = np.argsort(ks)
values = np.asarray(vals)
f = interpolate.interp1d(ks[srt], values[srt])
x = np.logspace(np.log10(ks.min()), np.log10(ks.max()), 100)
vals = f(x)
ax.plot(x, vals, ':', c='0.2', zorder=10)
msk = vals < req
maxn = np.max(x[msk])
plt.text(1e-2, 8e-5, r'Error must be $\leq %.2e$' % maxn, fontsize=11, ha='center', va='center')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(1e-6, 1e-2)
ax.set_xlim(ks.min()*0.99, ks.max()*1.01)
ax.set_xlabel('Error in the Bias Map')
ax.set_ylabel(r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='upper left')
plt.savefig(output)
plt.close()
def plotTolerableErrorE(res, title, output, req=3e-5):
fig = plt.figure()
plt.title(title)
ax = fig.add_subplot(111)
#loop over the number of bias frames combined
vals = []
for key in res.keys():
e1 = np.std(res[key]['e1'])
e2 = np.std(res[key]['e'])
e = np.std(res[key]['e'])
vals.append(e)
ax.scatter(key, e1, c='m', marker='*', s=35)
ax.scatter(key, e2, c='y', marker='s', s=35)
ax.scatter(key, e, c='r', marker='o', s=35)
print key, e, e1, e2
#for the legend
ax.scatter(key, e1, c='m', marker='*', label=r'$\sigma(e_{1})$')
ax.scatter(key, e2, c='y', marker='s', label=r'$\sigma(e_{2})$')
ax.scatter(key, e, c='r', marker='o', label=r'$\sigma(e)$')
#show the requirement
ks = np.asarray(res.keys())
ran = np.linspace(ks.min()*0.99, ks.max()*1.01)
ax.fill_between(ran, np.ones(ran.size)*req, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=req, c='g', ls='--', label='Requirement')
#find the crossing
srt = np.argsort(ks)
values = np.asarray(vals)
f = interpolate.interp1d(ks[srt], values[srt])
x = np.logspace(np.log10(ks.min()), np.log10(ks.max()), 100)
vals = f(x)
ax.plot(x, vals, ':', c='0.2', zorder=10)
msk = vals < req
maxn = np.max(x[msk])
plt.text(1e-2, 2e-5, r'Error for $e$ must be $\leq %.2e$' % maxn, fontsize=11, ha='center', va='center')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(1e-6, 1e-2)
ax.set_xlim(ks.min()*0.99, ks.max()*1.01)
ax.set_xlabel('Error in the Bias Map')
ax.set_ylabel(r'$\sigma (e_{i})\ , \ \ \ i \in [1,2]$')
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='upper left')
plt.savefig(output)
plt.close()
def simpleAnalytical(offset=1500, size=(50, 50), readnoise=4.5, gain=3.1, req=0.6):
"""
A simple function to test the area of pixels needed (defined by size) to derive the
pixel offset to the level of required number of electrons given the readout noise and
the gain of the system.
:param offset: the offset level in electrons [default = 1500]
:type offset: int
:param size: area describing the number of pixels available [default = (50, 50)]
:type size: tuple
:param readnoise: readout noise of the full detection chain [default = 4.5]
:type readnoise: float
:param gain: gain of the detection system [default = 3.1]
:type gain: float
:param req: required level to reach in electrons [default = 0.1]
:type req: float
:return: none
"""
stars = 2000
mc = 50
fail = 0
for a in range(mc):
for x in range(stars):
data = np.round((((np.random.normal(loc=0, scale=readnoise, size=size)) + offset) / gain)).astype(np.int)
derived = data * gain - offset
if np.mean(derived) > req:
print 'Not enough pixels to derive the floor level to %.2f electron level' % req
print np.mean(derived), np.median(derived), np.std(derived)
fail += 1
print 'Failed %i times' % fail
print np.mean(derived), np.median(derived), np.std(derived)
if __name__ == '__main__':
run = False
plot = False
error = False
debug = False
simpleAnalytical()
#start the script
log = lg.setUpLogger('biasCalibration.log')
log.info('Testing bias level calibration...')
if error:
if debug:
resPiston = findTolerableErrorPiston(log, file='data/psf1x.fits', oversample=1.0, iterations=4, psfs=500,
samples=8)
resSlope = findTolerableErrorSlope(log, file='data/psf1x.fits', oversample=1.0, iterations=4, psfs=500,
samples=8)
else:
resPiston = findTolerableErrorPiston(log)
resSlope = findTolerableErrorSlope(log)
fileIO.cPickleDumpDictionary(resPiston, 'piston.pk')
plotTolerableErrorE(resPiston, r'VIS Bias Calibration: Piston', output='BiasCalibrationTolerableErrorEPiston.pdf')
plotTolerableErrorR2(resPiston, r'VIS Bias Calibration: Piston', output='BiasCalibrationTolerableErrorR2Piston.pdf')
fileIO.cPickleDumpDictionary(resSlope, 'slope.pk')
plotTolerableErrorE(resSlope, r'VIS Bias Calibration: Tilt', output='BiasCalibrationTolerableErrorESlope.pdf')
plotTolerableErrorR2(resSlope, r'VIS Bias Calibration: Tilt', output='BiasCalibrationTolerableErrorR2Slope.pdf')
if run:
print '\nSigma run:'
resultsSigma = testBiasCalibrationSigma(log, biases=10, psfs=5000, surfaces=100,
file='psf1xhighe.fits', plots=False)
fileIO.cPickleDumpDictionary(resultsSigma, 'biasResultsSigma.pk')
print '\nDelta run:'
resultsDelta = testBiasCalibrationDelta(log, biases=2, psfs=5000, surfaces=100,
plots=False, file='psf1xhighe.fits')
fileIO.cPickleDumpDictionary(resultsDelta, 'biasResultsDelta.pk')
if plot:
if not run:
resultsDelta = cPickle.load(open('biasResultsDelta.pk'))
resultsSigma = cPickle.load(open('biasResultsSigma.pk'))
plotNumberOfFramesSigma(resultsSigma)
plotNumberOfFramesDelta(resultsDelta)
log.info('Run finished...\n\n\n')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class BasicOperations(object):
"""BasicOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
Get complex type {id: 2, name: 'abc', color: 'YELLOW'}
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Basic
<fixtures.acceptancetestsbodycomplex.models.Basic>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/basic/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Basic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_valid(
self, complex_body, custom_headers={}, raw=False, **operation_config):
"""
Please put {id: 2, name: 'abc', color: 'Magenta'}
:param complex_body: Please put {id: 2, name: 'abc', color: 'Magenta'}
:type complex_body: :class:`Basic
<fixtures.acceptancetestsbodycomplex.models.Basic>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/basic/valid'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(complex_body, 'Basic')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_invalid(
self, custom_headers={}, raw=False, **operation_config):
"""
Get a basic complex type that is invalid for the local strong type
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Basic
<fixtures.acceptancetestsbodycomplex.models.Basic>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/basic/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Basic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_empty(
self, custom_headers={}, raw=False, **operation_config):
"""
Get a basic complex type that is empty
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Basic
<fixtures.acceptancetestsbodycomplex.models.Basic>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/basic/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Basic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_null(
self, custom_headers={}, raw=False, **operation_config):
"""
Get a basic complex type whose properties are null
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Basic
<fixtures.acceptancetestsbodycomplex.models.Basic>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/basic/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Basic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_not_provided(
self, custom_headers={}, raw=False, **operation_config):
"""
Get a basic complex type while the server doesn't provide a response
payload
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Basic
<fixtures.acceptancetestsbodycomplex.models.Basic>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/complex/basic/notprovided'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Basic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import json
import datetime
from jinja2.utils import urlize
from google.appengine.ext import db
from poker.firebase import send_firebase_message
__all__ = [
'Game',
'Participant',
'Story',
'Round',
'Estimate',
]
class Game(db.Model):
DECK_CHOICES = (
(1 , ('1', '2', '3', '5', '8', '13', '21', '100', '?', 'Coffee')),
(2 , ('0', '1/2' , '1', '2', '3', '5', '8', '13', '20', '40', '60', '100', '?', 'oo')),
(3 , ('0', '1', '2', '3', '5', '8', '13', '21', '44', '?', 'oo')),
)
name = db.StringProperty(required = True)
deck = db.IntegerProperty(required = True, choices = [deck[0] for deck in DECK_CHOICES])
completed = db.BooleanProperty(default = False)
user = db.UserProperty(required = True)
current_story_id = db.IntegerProperty()
created = db.DateTimeProperty(auto_now_add = True)
def get_deck(self):
for deck in self.DECK_CHOICES:
if self.deck == deck[0]:
return deck[1]
return ()
def get_participants(self):
return Participant.all().ancestor(self).order("created")
def get_stories(self):
return Story.all().ancestor(self).order("created")
def get_url(self):
game_url = '/game/' + str(self.key().id())
return game_url
def get_current_story(self):
if not self.current_story_id:
return None
return Story.get_by_id(self.current_story_id, self)
def get_participant_messages(self):
messages = []
for participant in self.get_participants():
message = participant.get_message()
messages.append(message)
return messages
def get_story_messages(self):
messages = []
for story in self.get_stories():
message = story.get_message()
messages.append(message)
return messages
def get_current_story_message(self):
current_story = self.get_current_story()
if not current_story:
return None
return current_story.get_message()
def get_message(self):
message = {
'id': self.key().id(),
'name': self.name,
'deck': self.get_deck(),
'completed': self.completed,
'user': self.user.user_id(),
'current_story': self.get_current_story_message(),
'url': self.get_url(),
'participants': self.get_participant_messages(),
'stories': self.get_story_messages(),
}
return message
def send_update(self, force = True, user = None):
message = self.get_message()
message = json.dumps(message)
participants = self.get_participants()
for participant in participants:
if user and participant.user == user:
force = True
participant.send_update(message, force)
def get_user_estimates(self, user):
estimates = {}
if not user:
return estimates
for story in self.get_stories():
for round in story.get_rounds():
estimate = round.get_estimate(user)
round_id = round.key().id()
if estimate:
card = estimate.card
estimates[round_id] = card
return estimates
def delete(self, **kwargs):
participants = self.get_participants()
for participant in participants:
participant.send_update(None, True)
db.delete(Participant.all(keys_only = True).ancestor(self))
stories = self.get_stories()
for story in stories:
story.delete()
super(Game, self).delete(**kwargs)
class Participant(db.Model):
user = db.UserProperty(required = True)
name = db.StringProperty()
photo = db.StringProperty()
created = db.DateTimeProperty(auto_now_add = True)
observer = db.BooleanProperty(required = True, default = False)
last_update = db.DateTimeProperty(auto_now_add = True)
def get_url(self):
game_url = self.parent().get_url()
participant_url = game_url + '/participant/' + self.key().name()
return participant_url
def get_name(self):
if self.name:
return self.name
else:
return self.user.nickname()
def get_message(self):
message = {
'user': self.user.user_id(),
'name': self.get_name(),
'photo': self.photo,
'observer': self.observer,
'url': self.get_url()
}
return message
def send_update(self, message, force):
if force or self.need_update():
self.last_update = datetime.datetime.now()
self.put()
send_firebase_message(self.key().name(), message)
def need_update(self):
return datetime.datetime.now() - self.last_update > datetime.timedelta(seconds = 1)
class Story(db.Model):
SKIPPED = -1
name = db.StringProperty(required = True)
estimate = db.IntegerProperty()
created = db.DateTimeProperty(auto_now_add = True)
def get_rounds(self):
return Round.all().ancestor(self).order("created")
def get_estimate(self):
game = self.parent()
deck = game.get_deck()
card = self.estimate
if card == self.SKIPPED:
return card
if card is None:
return None
try:
estimate = deck[card]
except IndexError:
return None
return estimate
def get_name_display(self):
return urlize(self.name, 80)
def get_url(self):
game_url = self.parent().get_url()
story_url = game_url + '/story/' + str(self.key().id())
return story_url
def is_current(self):
game = self.parent()
is_current = game.current_story_id == self.key().id()
return is_current
def new_round(self):
rounds = self.get_rounds()
for round in rounds:
round.completed = True
round.put()
round = Round(
parent = self
)
round.put()
self.estimate = None
self.put()
return round
def get_round_messages(self):
messages = []
if not self.is_current():
return messages
for round in self.get_rounds():
message = round.get_message()
messages.append(message)
return messages
def get_message(self):
message = {
'id': self.key().id(),
'name': self.get_name_display(),
'estimate': self.get_estimate(),
'url': self.get_url(),
'is_current': self.is_current(),
'rounds': self.get_round_messages(),
}
return message
def delete(self, **kwargs):
rounds = self.get_rounds()
for round in rounds:
round.delete()
super(Story, self).delete(**kwargs)
class Round(db.Model):
completed = db.BooleanProperty(default = False)
created = db.DateTimeProperty(auto_now_add = True)
def get_estimates(self):
return Estimate.all().ancestor(self).order("created")
def get_url(self):
story_url = self.parent().get_url()
round_url = story_url + '/round/' + str(self.key().id())
return round_url
def get_estimate(self, user):
if not user:
return None
estimate_key = str(self.key().id()) + str(user.user_id())
estimate = Estimate.get_by_key_name(estimate_key, self)
return estimate
def get_estimate_messages(self):
messages = []
for estimate in self.get_estimates():
message = estimate.get_message()
messages.append(message)
return messages
def get_message(self):
message = {
'id': self.key().id(),
'completed': self.completed,
'url': self.get_url(),
'estimates': self.get_estimate_messages(),
}
return message
def delete(self, **kwargs):
db.delete(Estimate.all(keys_only = True).ancestor(self))
super(Round, self).delete(**kwargs)
class Estimate(db.Model):
user = db.UserProperty(required = True)
card = db.IntegerProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
def get_message(self):
message = {
'user': self.user.user_id(),
'name': self.user.nickname(),
'card': self.get_card(),
}
return message
def get_card(self):
round = self.parent()
if not round.completed:
return None
story = round.parent()
game = story.parent()
deck = game.get_deck()
card = self.card
try:
estimate = deck[card]
except IndexError:
return None
return estimate
|
|
import glob
import logging
import os
import re
import sys
from pprint import pprint, pformat
import uuid
import sloelib
class SloePluginOarstack(sloelib.SloeBasePlugIn):
EVENT_DATA = dict(
mays2011=dict(
year=2011,
capture_device="Canon XF100 camera",
capture_info="720p/50fps",
genspec_name_ytf="youtube720,I=50p,S=1",
genspec_name_ytnf="youtube720,I=50p,S=4",
location="Cambridge, UK",
event_title="May Bumps 2011",
tags="Rowing (Sport),Cambridge,May Bumps 2011",
title="May Bumps 2011",
wed=dict(
title="Wednesday",
subevent_title="Wednesday",
event_time="Wednesday 15th June 2011"
),
thurs=dict(
title="Thursday",
subevent_title="Thursday",
event_time="Thursday 16th June 2011"
),
fri=dict(
title="Friday",
subevent_title="Friday",
event_time="Friday 17th June 2011"
),
sat=dict(
title="Saturday",
subevent_title="Saturday",
event_time="Saturday 18th June 2011"
)
),
mays2018=dict(
year=2018,
capture_device="Panasonic GH5S camera",
capture_info="1080p/60fps",
genspec_name_ytf="youtube,I=60p,S=1",
genspec_name_ytnf="youtube,I=60p,S=4",
location="Cambridge, UK",
event_title="May Bumps 2018",
tags="Rowing (Sport),Cambridge,May Bumps 2018",
title="May Bumps 2018",
wed=dict(
title="Wednesday",
subevent_title="Wednesday",
event_time="Wednesday 13th June 2018"
),
thurs=dict(
title="Thursday",
subevent_title="Thursday",
event_time="Thursday 14th June 2018"
)
),
mays2019=dict(
year=2019,
capture_device="Sony a6400 camera",
capture_info="1080p/120fps",
genspec_name_ytf="youtube,I=120p,S=1",
genspec_name_ytnf="youtube,I=120p,S=8",
location="Cambridge, UK",
event_title="May Bumps 2019",
tags="Rowing (Sport),Cambridge,May Bumps 2019",
title="May Bumps 2019",
wed=dict(
title="Wednesday",
subevent_title="Wednesday",
event_time="Wednesday 12th June 2019"
),
thurs=dict(
title="Thursday",
subevent_title="Thursday",
event_time="Thursday 13th June 2019"
)
),
townbumps2019=dict(
year=2019,
capture_device="Sony a6400 camera",
capture_info="1080p/120fps",
genspec_name_ytf="youtube,I=120p,S=1",
genspec_name_ytnf="youtube,I=120p,S=8",
location="Cambridge, UK",
event_title="Town Bumps 2019",
tags="Rowing (Sport),Cambridge,Town Bumps 2019",
title="Town Bumps 2019",
wed=dict(
title="Wednesday",
subevent_title="Wednesday",
event_time="Wednesday 12th June 2019"
),
thurs=dict(
title="Thursday",
subevent_title="Thursday",
event_time="Thursday 13th June 2019"
)
),
townbumps2021=dict(
year=2021,
capture_device="Sony a7R III camera",
capture_info="1080p/120fps",
genspec_name_ytf="youtube,I=120p,S=1",
genspec_name_ytnf="youtube,I=120p,S=8",
location="Cambridge, UK",
event_title="Town Bumps 2021",
tags="Rowing (Sport),Cambridge,Town Bumps 2021",
title="Town Bumps 2021",
mon=dict(
title="Monday",
subevent_title="Monday",
event_time="Monday 19th July 2021"
),
tues=dict(
title="Tuesday",
subevent_title="Tuesday",
event_time="Tuesday 20th July 2021"
),
wed=dict(
title="Wednesday",
subevent_title="Wednesday",
event_time="Wednesday 21st July 2021"
),
thurs=dict(
title="Thursday",
subevent_title="Thursday",
event_time="Thursday 22nd July 2021"
),
fri=dict(
title="Friday",
subevent_title="Friday",
event_time="Friday 23rd July 2021"
)
)
)
def command_makemays(self, params, options):
pprint(params)
event_dir, day_dir, div_dir = params
event_data = self.EVENT_DATA[event_dir]
current_year = event_data['year']
div_code = "%s%s" % (div_dir[3].upper(), div_dir[4])
root_dir = sloelib.SloeConfig.inst().get_global("treeroot")
primary_div_path = os.path.join(root_dir, 'primary', 'derived', event_dir, day_dir, div_dir)
primary_event_path = os.path.join(root_dir, 'primary', 'derived', event_dir)
primary_day_path = os.path.join(root_dir, 'primary', 'derived', event_dir, day_dir)
final_div_path = os.path.join(root_dir, 'final', 'derived', event_dir, day_dir, div_dir)
final_event_path = os.path.join(root_dir, 'final', 'derived', event_dir)
final_day_path = os.path.join(root_dir, 'final', 'derived', event_dir, day_dir)
if not os.path.exists(primary_div_path):
os.makedirs(primary_div_path)
if not os.path.exists(final_div_path):
os.makedirs(final_div_path)
# Primary albums
if not glob.glob(os.path.join(primary_event_path, '*ALBUM*ini')):
# Create the primary top level event album
primary_event_album = sloelib.SloeAlbum()
primary_event_album.create_new(div_dir, primary_event_path)
primary_event_album.set_values(
name=event_dir,
capture_device=event_data['capture_device'],
capture_info=event_data['capture_info'],
location=event_data['location'],
event_title=event_data['event_title'],
tags=event_data['tags'],
title=event_data['title']
)
primary_event_album.save_to_file()
if not glob.glob(os.path.join(primary_day_path, '*ALBUM*ini')):
# Create the primary day level event album
primary_day_album = sloelib.SloeAlbum()
primary_day_album.create_new(day_dir, primary_day_path)
primary_day_album.set_values(
name=day_dir,
event_time=event_data[day_dir]['event_time'],
subevent_title=event_data[day_dir]['subevent_title'],
title=event_data[day_dir]['title']
)
primary_day_album.save_to_file()
if not glob.glob(os.path.join(primary_div_path, '*ALBUM*ini')):
# Create the primary division level event album
primary_div_album = sloelib.SloeAlbum()
primary_div_album.create_new(div_dir, primary_div_path)
primary_div_album.set_values(
title="division %s" % div_code,
subevent_title="division %s" % div_code
)
primary_div_album.save_to_file()
# Final albums
if not glob.glob(os.path.join(final_event_path, '*ALBUM*ini')):
# Create the final top level event album
final_event_album = sloelib.SloeAlbum()
final_event_album.create_new(event_dir, final_event_path)
final_event_album.set_values(
source_album_uuid=primary_event_album.uuid,
title=event_data['title']
)
final_event_album.save_to_file()
if not glob.glob(os.path.join(final_day_path, '*ALBUM*ini')):
# Create the final day album
final_day_album = sloelib.SloeAlbum()
final_day_album.create_new(day_dir, final_day_path)
final_day_album.set_values(
title=event_data[day_dir]['title'],
source_album_uuid = primary_day_album.uuid
)
final_day_album.save_to_file()
if not glob.glob(os.path.join(final_div_path, '*ALBUM*ini')):
# Create the final division album
final_div_album = sloelib.SloeAlbum()
final_div_album.create_new(div_dir, final_div_path)
final_div_album.set_values(
name=div_dir,
title="division %s" % div_code,
source_album_uuid = primary_div_album.uuid
)
final_div_album.save_to_file()
# Primary OutputSpecs
if not glob.glob(os.path.join(primary_event_path, '*OUTPUTSPEC*ini')):
# Create the event level output specs
event_outputspec_ytf = sloelib.SloeOutputSpec()
event_outputspec_ytf.create_new()
event_outputspec_ytf.set_values(
name="%s-ytf" % event_dir,
genspec_name=event_data['genspec_name_ytf'],
glob_include="*",
output_path="final/derived/{subtree}/{basename}{suffix}{ext}",
priority=1000
)
event_outputspec_ytf.add_filepath_info(os.path.join(primary_event_path, 'dummy'))
event_outputspec_ytf.save_to_file()
event_outputspec_ytnf = sloelib.SloeOutputSpec()
event_outputspec_ytnf.create_new()
event_outputspec_ytnf.set_values(
name="%s-yt8" % event_dir,
genspec_name=event_data['genspec_name_ytnf'],
glob_include="*",
output_path="final/derived/{subtree}/{basename}{suffix}{ext}",
priority=500
)
event_outputspec_ytnf.add_filepath_info(os.path.join(primary_event_path, 'dummy'))
event_outputspec_ytnf.save_to_file()
# Final TransferSpecs
if not glob.glob(os.path.join(final_event_path, '*TRANSFERSPEC*ini')):
div_transferspec = sloelib.SloeTransferSpec()
div_transferspec.create_new()
div_transferspec.set_values(
name="%s-yt-div" % event_dir,
priority="1000",
transfer_type="youtube",
selectors="final/*/%s/*/div*" % event_dir,
# Category 17 is Sport
youtube_category="17",
youtube_description='#{ join(" ", origintree.event_title ) } #{ join(" ", origintree.subevent_title ) }, Crew #{ join(" ", originitem.name) } (#{ closest(origintree.event_time) }, #{ closest(origintree.location) })\n\nAnalysis: http://analysis.oarstack.com/yt/#{localitem.uuid}\n\nCapture: #{ closest(origintree.capture_device) } #{ closest(origintree.capture_info) }\nThis render: #{ join(". ", genspec.output_description, genspec.output_note) }.\n\nContact: info@oarstack.com quoting reference #{remoteitem.uuid}.',
youtube_privacy="public",
youtube_tags='#{ join(",", origintree.tags) },Slow Motion,yt:quality=high',
youtube_title='#{ originitem.name }, #{ join(" ", origintree.event_title, genspec.output_short_description) } [#{ closest(origintree.sitetag) }]'
)
div_transferspec.add_filepath_info(os.path.join(final_event_path, 'dummy'))
div_transferspec.save_to_file()
if not glob.glob(os.path.join(final_div_path, '*ALBUM*ini')):
raise sloelib.SloeError("Final album missing from %s" % final_div_path)
def make_playlist(subname, title, selector, short_speed, long_speed, tags):
playlist = sloelib.SloePlaylist()
playlist.create_new("+%s-%s" % (div_dir, subname), title, "1000", final_div_path)
playlist.set_values(
title=title,
transfer_type="youtube",
youtube_description="#{ join(\" \", origintree.event_title ) } #{ join(\" \", origintree.subevent_title ) } %s(#{ closest(origintree.event_time) }, #{ closest(origintree.location) })\n\nContact: info@oarstack.com" % long_speed,
youtube_privacy="public",
youtube_tags="#{ join(\",\", origintree.tags) }%s" % tags,
youtube_title="#{ join(\" \", origintree.event_title, origintree.subevent_title ) } %s [#{ closest(origintree.sitetag) }]" % short_speed
)
if selector is not None:
playlist.set_values(selector_genspec_name=selector)
pprint(playlist)
playlist.save_to_file()
if not glob.glob(os.path.join(final_div_path, '*PLAYLIST*ini')):
make_playlist("all", "Cambridge May Bumps %d division %s" % (current_year, div_code), None, "normal and slow motion", "alternating normal speed and slow motion ", ",Slow Motion")
if current_year == 2020:
# 240fps era
make_playlist("ytf", "Cambridge May Bumps %d division %s normal speed" % (current_year, div_code), "youtube,I=240p,S=1", "normal speed", "", "")
make_playlist("yt16", "Cambridge May Bumps %d division %s slow motion" % (current_year, div_code), "youtube,I=240p,S=16", "slow motion", "slow motion ", ",Slow Motion")
elif current_year in (2017, 2019, 2021):
# 120fps era
make_playlist("ytf", "Cambridge Town Bumps %d division %s normal speed" % (current_year, div_code), "youtube,I=120p,S=1", "normal speed", "", "")
make_playlist("yt8", "Cambridge Town Bumps %d division %s slow motion" % (current_year, div_code), "youtube,I=120p,S=8", "slow motion", "slow motion ", ",Slow Motion")
elif current_year == 2012:
# 720p/50fps
make_playlist("ytf", "Cambridge May Bumps %d division %s normal speed" % (current_year, div_code), "youtube720,I=50p,S=1", "normal speed", "", "")
make_playlist("ytq", "Cambridge May Bumps %d division %s slow motion" % (current_year, div_code), "youtube720,I=50p,S=4", "slow motion", "slow motion ", ",Slow Motion")
else:
# 60fps era
make_playlist("ytf", "Cambridge May Bumps %d division %s normal speed" % (current_year, div_code), "youtube,I=60p,S=1", "normal speed", "", "")
make_playlist("ytq", "Cambridge May Bumps %d division %s slow motion" % (current_year, div_code), "youtube,I=60p,S=4", "slow motion", "slow motion ", ",Slow Motion")
pass
def command_makeevent(self, params, options):
pprint(params)
event_name, event_dir, div_dir = params
div_code = div_dir
root_dir = sloelib.SloeConfig.inst().get_global("treeroot")
primary_path = os.path.join(root_dir, 'primary', 'derived', event_dir, div_dir)
final_path = os.path.join(root_dir, 'final', 'derived', event_dir, div_dir)
if not os.path.exists(os.path.dirname(primary_path)):
raise sloelib.SloeError("Primary path '%s' missing" % os.path.dirname(primary_path))
if not os.path.exists(os.path.dirname(final_path)):
raise sloelib.SloeError("Final path '%s' missing" % os.path.dirname(final_path))
if os.path.exists(primary_path):
raise sloelib.SloeError("Dir %s already exists" % primary_path)
else:
os.mkdir(primary_path)
if os.path.exists(final_path):
raise sloelib.SloeError("Dir %s already exists" % final_path)
else:
os.mkdir(final_path)
primary_album = sloelib.SloeAlbum()
primary_album.create_new(div_dir, primary_path)
primary_album.set_values(
title="division %s" % div_code,
subevent_title="division %s" % div_code
)
primary_album.save_to_file()
final_album = sloelib.SloeAlbum()
final_album.create_new(div_dir, final_path)
final_album.set_values(
title="division %s" % div_code,
source_album_uuid = primary_album.uuid
)
final_album.save_to_file()
def make_playlist(subname, title, selector, short_speed, long_speed, tags):
playlist = sloelib.SloePlaylist()
playlist.create_new("+%s-%s" % (div_dir, subname), title, "1000", final_path)
playlist.set_values(
title=title,
transfer_type="youtube",
youtube_description="#{ join(\" \", origintree.event_title ) } #{ join(\" \", origintree.subevent_title ) } %s(#{ closest(origintree.event_time) }, #{ closest(origintree.location) })\n\nContact: info@oarstack.com" % long_speed,
youtube_privacy="public",
youtube_tags="#{ join(\",\", origintree.tags) }%s" % tags,
youtube_title="#{ join(\" \", origintree.event_title, origintree.subevent_title ) } %s [#{ closest(origintree.sitetag) }]" % short_speed
)
if selector is not None:
playlist.set_values(selector_genspec_name=selector)
pprint(playlist)
playlist.save_to_file()
make_playlist("all", "%s division %s" % (event_name, div_code), None, "normal and slow motion", "alternating normal speed and slow motion ", ",Slow Motion")
make_playlist("ytf", "%s division %s normal speed" % (event_name, div_code), "youtube,I=60p,S=1", "normal speed", "", "")
make_playlist("ytq", "%s division %s slow motion" % (event_name, div_code), "youtube,I=60p,S=4", "slow motion", "slow motion ", ",Slow Motion")
def command_oarstacklist(self, params, options):
tree = sloelib.SloeTree.inst()
tree.load()
for subtree, album, items in sloelib.SloeTreeUtil.walk_items(tree.root_album):
indent = ""
if sloelib.SloeTreeUtil.object_matches_selector(album, params):
try:
for remoteplaylist in album.remoteplaylists:
ids = sloelib.SloeUtil.extract_common_id(remoteplaylist.common_id)
playlist = sloelib.SloeTreeNode.get_object_by_uuid(ids['P'])
title = playlist.title
if title[-1] in "0123456789":
title += " normal and slow motion"
print '<a title="%s" href="https://www.youtube.com/playlist?list=%s">%s</a>' % (title, remoteplaylist.remote_id, title)
except KeyError, e:
logging.error("Missing attribute for %s (%s)" % (album.get("name", "<Unknown>"), str(e)))
raise e
SloePluginOarstack("oarstack")
|
|
#!/usr/bin/env python
# coding: utf-8
import datetime
import locale
import os
import sys
import re
import random
import struct
import traceback
import argparse
import subprocess as sp
import unicodedata
from os.path import dirname, join
from doge import wow
ROOT = join(dirname(__file__), 'static')
DEFAULT_DOGE = 'doge.txt'
class Doge(object):
def __init__(self, tty, ns):
self.tty = tty
self.ns = ns
self.doge_path = join(ROOT, ns.doge_path or DEFAULT_DOGE)
if ns.frequency:
# such frequency based
self.words = \
wow.FrequencyBasedDogeDeque(*wow.WORD_LIST, step=ns.step)
else:
self.words = wow.DogeDeque(*wow.WORD_LIST)
def setup(self):
# Setup seasonal data
self.setup_seasonal()
if self.tty.pretty:
# stdout is a tty, load Shibe and calculate how wide he is
doge = self.load_doge()
max_doge = max(map(clean_len, doge)) + 15
else:
# stdout is being piped and we should not load Shibe
doge = []
max_doge = 15
if self.tty.width < max_doge:
# Shibe won't fit, so abort.
sys.stderr.write('wow, such small terminal\n')
sys.stderr.write('no doge under {0} column\n'.format(max_doge))
sys.exit(1)
# Check for prompt height so that we can fill the screen minus how high
# the prompt will be when done.
prompt = os.environ.get('PS1', '').split('\n')
line_count = len(prompt) + 1
# Create a list filled with empty lines and Shibe at the bottom.
fill = range(self.tty.height - len(doge) - line_count)
self.lines = ['\n' for x in fill]
self.lines += doge
# Try to fetch data fed thru stdin
had_stdin = self.get_stdin_data()
# Get some system data, but only if there was nothing in stdin
if not had_stdin:
self.get_real_data()
# Apply the text around Shibe
self.apply_text()
def setup_seasonal(self):
"""
Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence.
"""
# If we've specified a season, just run that one
if self.ns.season:
return self.load_season(self.ns.season)
# If we've specified another doge or no doge at all, it does not make
# sense to use seasons.
if self.ns.doge_path is not None and not self.ns.no_shibe:
return
now = datetime.datetime.now()
for season, data in wow.SEASONS.items():
start, end = data['dates']
start_dt = datetime.datetime(now.year, start[0], start[1])
# Be sane if the holiday season spans over New Year's day.
end_dt = datetime.datetime(
now.year + (start[0] > end[0] and 1 or 0), end[0], end[1])
if start_dt <= now <= end_dt:
# Wow, much holiday!
return self.load_season(season)
def load_season(self, season_key):
if season_key == 'none':
return
season = wow.SEASONS[season_key]
self.doge_path = join(ROOT, season['pic'])
self.words.extend(season['words'])
def apply_text(self):
"""
Apply text around doge
"""
# Calculate a random sampling of lines that are to have text applied
# onto them. Return value is a sorted list of line index integers.
linelen = len(self.lines)
affected = sorted(random.sample(range(linelen), int(linelen / 3.5)))
for i, target in enumerate(affected, start=1):
line = self.lines[target]
line = re.sub('\n', ' ', line)
word = self.words.get()
# If first or last line, or a random selection, use standalone wow.
if i == 1 or i == len(affected) or random.choice(range(20)) == 0:
word = 'wow'
# Generate a new DogeMessage, possibly based on a word.
self.lines[target] = DogeMessage(self, line, word).generate()
def load_doge(self):
"""
Return pretty ASCII Shibe.
wow
"""
if self.ns.no_shibe:
return ['']
with open(self.doge_path) as f:
if sys.version_info < (3, 0):
if locale.getpreferredencoding() == 'UTF-8':
doge_lines = [l.decode('utf-8') for l in f.xreadlines()]
else:
# encode to printable characters, leaving a space in place
# of untranslatable characters, resulting in a slightly
# blockier doge on non-UTF8 terminals
doge_lines = [
l.decode('utf-8')
.encode(locale.getpreferredencoding(), 'replace')
.replace('?', ' ')
for l in f.xreadlines()
]
else:
doge_lines = [l for l in f.readlines()]
return doge_lines
def get_real_data(self):
"""
Grab actual data from the system
"""
ret = []
username = os.environ.get('USER')
if username:
ret.append(username)
editor = os.environ.get('EDITOR')
if editor:
editor = editor.split('/')[-1]
ret.append(editor)
# OS, hostname and... architechture (because lel)
if hasattr(os, 'uname'):
uname = os.uname()
ret.append(uname[0])
ret.append(uname[1])
ret.append(uname[4])
# Grab actual files from $HOME.
files = os.listdir(os.environ.get('HOME'))
if files:
ret.append(random.choice(files))
# Grab some processes
ret += self.get_processes()[:2]
# Prepare the returned data. First, lowercase it.
# If there is unicode data being returned from any of the above
# Python 2 needs to decode the UTF bytes to not crash. See issue #45.
func = str.lower
if sys.version_info < (3,):
func = lambda x: str.lower(x).decode('utf-8')
self.words.extend(map(func, ret))
def filter_words(self, words, stopwords, min_length):
return [word for word in words if
len(word) >= min_length and word not in stopwords]
def get_stdin_data(self):
"""
Get words from stdin.
"""
if self.tty.in_is_tty:
# No pipez found
return False
if sys.version_info < (3, 0):
stdin_lines = (l.decode('utf-8') for l in sys.stdin.xreadlines())
else:
stdin_lines = (l for l in sys.stdin.readlines())
rx_word = re.compile("\w+", re.UNICODE)
# If we have stdin data, we should remove everything else!
self.words.clear()
word_list = [match.group(0)
for line in stdin_lines
for match in rx_word.finditer(line.lower())]
if self.ns.filter_stopwords:
word_list = self.filter_words(
word_list, stopwords=wow.STOPWORDS,
min_length=self.ns.min_length)
self.words.extend(word_list)
return True
def get_processes(self):
"""
Grab a shuffled list of all currently running process names
"""
procs = set()
try:
# POSIX ps, so it should work in most environments where doge would
p = sp.Popen(['ps', '-A', '-o', 'comm='], stdout=sp.PIPE)
output, error = p.communicate()
if sys.version_info > (3, 0):
output = output.decode('utf-8')
for comm in output.split('\n'):
name = comm.split('/')[-1]
# Filter short and weird ones
if name and len(name) >= 2 and ':' not in name:
procs.add(name)
finally:
# Either it executed properly or no ps was found.
proc_list = list(procs)
random.shuffle(proc_list)
return proc_list
def print_doge(self):
for line in self.lines:
if sys.version_info < (3, 0):
line = line.encode('utf8')
sys.stdout.write(line)
sys.stdout.flush()
class DogeMessage(object):
"""
A randomly placed and randomly colored message
"""
def __init__(self, doge, occupied, word):
self.doge = doge
self.tty = doge.tty
self.occupied = occupied
self.word = word
def generate(self):
if self.word == 'wow':
# Standalone wow. Don't apply any prefixes or suffixes.
msg = self.word
else:
# Add a prefix.
msg = u'{0} {1}'.format(wow.PREFIXES.get(), self.word)
# Seldomly add a suffix as well.
if random.choice(range(15)) == 0:
msg += u' {0}'.format(wow.SUFFIXES.get())
# Calculate the maximum possible spacer
interval = self.tty.width - onscreen_len(msg)
interval -= clean_len(self.occupied)
if interval < 1:
# The interval is too low, so the message can not be shown without
# spilling over to the subsequent line, borking the setup.
# Return the doge slice that was in this row if there was one,
# and a line break, effectively disabling the row.
return self.occupied + "\n"
# Apply spacing
msg = u'{0}{1}'.format(' ' * random.choice(range(interval)), msg)
if self.tty.pretty:
# Apply pretty ANSI color coding.
msg = u'\x1b[1m\x1b[38;5;{0}m{1}\x1b[39m\x1b[0m'.format(
wow.COLORS.get(), msg
)
# Line ends are pretty cool guys, add one of those.
return u'{0}{1}\n'.format(self.occupied, msg)
class TTYHandler(object):
def setup(self):
self.height, self.width = self.get_tty_size()
self.in_is_tty = sys.stdin.isatty()
self.out_is_tty = sys.stdout.isatty()
self.pretty = self.out_is_tty
if sys.platform == 'win32' and os.getenv('TERM') == 'xterm':
self.pretty = True
def _tty_size_windows(self, handle):
try:
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(handle)
buf = create_string_buffer(22)
if windll.kernel32.GetConsoleScreenBufferInfo(h, buf):
left, top, right, bottom = struct.unpack('4H', buf.raw[10:18])
return right - left + 1, bottom - top + 1
except:
pass
def _tty_size_linux(self, fd):
try:
import fcntl
import termios
return struct.unpack(
'hh',
fcntl.ioctl(
fd, termios.TIOCGWINSZ, struct.pack('hh', 0, 0)
)
)
except:
return
def get_tty_size(self):
"""
Get the current terminal size without using a subprocess
http://stackoverflow.com/questions/566746
I have no clue what-so-fucking ever over how this works or why it
returns the size of the terminal in both cells and pixels. But hey, it
does.
"""
if sys.platform == 'win32':
# stdin, stdout, stderr = -10, -11, -12
ret = self._tty_size_windows(-10)
ret = ret or self._tty_size_windows(-11)
ret = ret or self._tty_size_windows(-12)
else:
# stdin, stdout, stderr = 0, 1, 2
ret = self._tty_size_linux(0)
ret = ret or self._tty_size_linux(1)
ret = ret or self._tty_size_linux(2)
return ret or (25, 80)
def clean_len(s):
"""
Calculate the length of a string without it's color codes
"""
s = re.sub(r'\x1b\[[0-9;]*m', '', s)
return len(s)
def onscreen_len(s):
"""
Calculate the length of a unicode string on screen,
accounting for double-width characters
"""
if sys.version_info < (3, 0) and isinstance(s, str):
return len(s)
length = 0
for ch in s:
length += 2 if unicodedata.east_asian_width(ch) == 'W' else 1
return length
def setup_arguments():
parser = argparse.ArgumentParser('doge')
parser.add_argument(
'--shibe',
help='wow shibe file',
dest='doge_path',
choices=os.listdir(ROOT)
)
parser.add_argument(
'--no-shibe',
action="store_true",
help="wow no doge show :("
)
parser.add_argument(
'--season',
help='wow shibe season congrate',
choices=sorted(wow.SEASONS.keys()) + ['none']
)
parser.add_argument(
'-f', '--frequency',
help='such frequency based',
action='store_true'
)
parser.add_argument(
'--step',
help='beautiful step', # how much to step
# between ranks in FrequencyBasedDogeDeque
type=int,
default=2,
)
parser.add_argument(
'--min_length',
help='pretty minimum', # minimum length of a word
type=int,
default=1,
)
parser.add_argument(
'-s', '--filter_stopwords',
help='many words lol',
action='store_true'
)
parser.add_argument(
'-mh', '--max-height',
help='such max height',
type=int,
)
parser.add_argument(
'-mw', '--max-width',
help='such max width',
type=int,
)
return parser
def main():
tty = TTYHandler()
tty.setup()
parser = setup_arguments()
ns = parser.parse_args()
if ns.max_height:
tty.height = ns.max_height
if ns.max_width:
tty.width = ns.max_width
try:
shibe = Doge(tty, ns)
shibe.setup()
shibe.print_doge()
except (UnicodeEncodeError, UnicodeDecodeError):
# Some kind of unicode error happened. This is usually because the
# users system does not have a proper locale set up. Try to be helpful
# and figure out what could have gone wrong.
traceback.print_exc()
print()
lang = os.environ.get('LANG')
if not lang:
print('wow error: broken $LANG, so fail')
return 3
if not lang.endswith('UTF-8'):
print(
"wow error: locale '{0}' is not UTF-8. ".format(lang) +
"doge needs UTF-8 to print Shibe. Please set your system to "
"use a UTF-8 locale."
)
return 2
print(
"wow error: Unknown unicode error. Please report at "
"https://github.com/thiderman/doge/issues and include output from "
"/usr/bin/locale"
)
return 1
# wow very main
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
from ..describe import Description
from ..rest import Resource, RestException, loadmodel
from girder.utility import ziputil
from girder.constants import AccessType
from girder.api import access
class Item(Resource):
"""API endpoint for items"""
def __init__(self):
self.resourceName = 'item'
self.route('DELETE', (':id',), self.deleteItem)
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getItem)
self.route('GET', (':id', 'files'), self.getFiles)
self.route('GET', (':id', 'download'), self.download)
self.route('GET', (':id', 'rootpath'), self.rootpath)
self.route('POST', (), self.createItem)
self.route('PUT', (':id',), self.updateItem)
self.route('POST', (':id', 'copy'), self.copyItem)
self.route('PUT', (':id', 'metadata'), self.setMetadata)
@access.public
def find(self, params):
"""
Get a list of items with given search parameters. Currently accepted
search modes are:
1. Searching by folderId.
2. Searching with full text search.
To search with full text search, pass the "text" parameter. To search
by parent, (i.e. list child items in a folder) pass folderId. You can
also pass limit, offset, sort, and sortdir parameters.
:param text: Pass this to perform a full-text search of items.
:param folderId: Get child items of a particular folder.
:param limit: The result set size limit, default=50.
:param offset: Offset into the results, default=0.
:param sort: The field to sort by, default=lowerName.
:param sortdir: 1 for ascending, -1 for descending, default=1.
"""
limit, offset, sort = self.getPagingParameters(params, 'lowerName')
user = self.getCurrentUser()
if 'folderId' in params:
folder = self.model('folder').load(id=params['folderId'], user=user,
level=AccessType.READ, exc=True)
filters = {}
if 'text' in params:
filters['$text'] = {
'$search': params['text']
}
return [self.model('item').filter(item, user) for item in
self.model('folder').childItems(
folder=folder, limit=limit, offset=offset, sort=sort,
filters=filters)]
elif 'text' in params:
return [self.model('item').filter(item, user) for item in
self.model('item').textSearch(
params['text'], user=user, limit=limit, offset=offset,
sort=sort)]
else:
raise RestException('Invalid search mode.')
find.description = (
Description('Search for an item by certain properties.')
.responseClass('Item')
.param('folderId', "Pass this to list all items in a folder.",
required=False)
.param('text', "Pass this to perform a full text search for items.",
required=False)
.param('limit', "Result set size limit.", default=50,
required=False, dataType='int')
.param('offset', "Offset into result set.", default=0, required=False,
dataType='int')
.param('sort', "Field to sort the item list by.", default='lowerName',
required=False)
.param('sortdir', "1 for ascending, -1 for descending", default=1,
required=False, dataType='int')
.errorResponse()
.errorResponse('Read access was denied on the parent folder.', 403))
@access.public
@loadmodel(model='item', level=AccessType.READ)
def getItem(self, item, params):
return self.model('item').filter(item, self.getCurrentUser())
getItem.description = (
Description('Get an item by ID.')
.responseClass('Item')
.param('id', 'The ID of the item.', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403))
@access.user
def createItem(self, params):
"""
Create a new item.
:param folderId: The _id of the parent folder.
:type folderId: str
:param name: The name of the item to create.
:param description: Item description.
"""
self.requireParams(('name', 'folderId'), params)
user = self.getCurrentUser()
name = params['name'].strip()
description = params.get('description', '').strip()
folder = self.model('folder').load(id=params['folderId'], user=user,
level=AccessType.WRITE, exc=True)
item = self.model('item').createItem(
folder=folder, name=name, creator=user, description=description)
return self.model('item').filter(item, user=user)
createItem.description = (
Description('Create a new item.')
.responseClass('Item')
.param('folderId', 'The ID of the parent folder.')
.param('name', 'Name for the item.')
.param('description', "Description for the item.", required=False)
.errorResponse()
.errorResponse('Write access was denied on the parent folder.', 403))
@access.user
@loadmodel(model='item', level=AccessType.WRITE)
def updateItem(self, item, params):
user = self.getCurrentUser()
item['name'] = params.get('name', item['name']).strip()
item['description'] = params.get(
'description', item['description']).strip()
self.model('item').updateItem(item)
if 'folderId' in params:
folder = self.model('folder').load(
params['folderId'], user=user, level=AccessType.WRITE, exc=True)
if folder['_id'] != item['folderId']:
self.model('item').move(item, folder)
return self.model('item').filter(item, user=user)
updateItem.description = (
Description('Edit an item or move it to another folder.')
.responseClass('Item')
.param('id', 'The ID of the item.', paramType='path')
.param('name', 'Name for the item.', required=False)
.param('description', 'Description for the item.', required=False)
.param('folderId', 'Pass this to move the item to a new folder.',
required=False)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the item or folder.', 403))
@access.user
@loadmodel(model='item', level=AccessType.WRITE)
def setMetadata(self, item, params):
metadata = self.getBodyJson()
# Make sure we let user know if we can't accept a metadata key
for k in metadata:
if not len(k):
raise RestException('Key names must be at least one character '
'long.')
if '.' in k or k[0] == '$':
raise RestException(u'The key name {} must not contain a '
'period or begin with a dollar sign.'
.format(k))
return self.model('item').setMetadata(item, metadata)
setMetadata.description = (
Description('Set metadata fields on an item.')
.responseClass('Item')
.notes('Set metadata fields to null in order to delete them.')
.param('id', 'The ID of the item.', paramType='path')
.param('body', 'A JSON object containing the metadata keys to add',
paramType='body')
.errorResponse('ID was invalid.')
.errorResponse('Invalid JSON passed in request body.')
.errorResponse('Metadata key name was invalid.')
.errorResponse('Write access was denied for the item.', 403))
def _downloadMultifileItem(self, item, user):
cherrypy.response.headers['Content-Type'] = 'application/zip'
cherrypy.response.headers['Content-Disposition'] =\
u'attachment; filename="{}{}"'.format(item['name'], '.zip')
def stream():
zip = ziputil.ZipGenerator(item['name'])
for (path, file) in self.model('item').fileList(item,
subpath=False):
for data in zip.addFile(file, path):
yield data
yield zip.footer()
return stream
@access.public
@loadmodel(model='item', level=AccessType.READ)
def getFiles(self, item, params):
"""Get a page of files in an item."""
limit, offset, sort = self.getPagingParameters(params, 'name')
return list(self.model('item').childFiles(item=item, limit=limit,
offset=offset, sort=sort))
getFiles.description = (
Description('Get the files within an item.')
.responseClass('File')
.param('id', 'The ID of the item.', paramType='path')
.param('limit', "Result set size limit.", required=False, default=50,
dataType='int')
.param('offset', "Offset into result set.", required=False, default=0,
dataType='int')
.param('sort', "Field to sort the result list by.", default='name',
required=False)
.param('sortdir', "1 for ascending, -1 for descending", default=1,
required=False, dataType='int')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403))
@access.public
@loadmodel(model='item', level=AccessType.READ)
def download(self, item, params):
"""
Defers to the underlying assetstore adapter to stream the file out.
"""
offset = int(params.get('offset', 0))
user = self.getCurrentUser()
files = list(self.model('item').childFiles(item=item, limit=2))
format = params.get('format', '')
if format not in (None, '', 'zip'):
raise RestException('Unsupported format.')
if len(files) == 1 and format != 'zip':
return self.model('file').download(files[0], offset)
else:
return self._downloadMultifileItem(item, user)
download.cookieAuth = True
download.description = (
Description('Download the contents of an item.')
.param('id', 'The ID of the item.', paramType='path')
.param('format', 'If unspecified, items with one file are downloaded '
'as that file, and other items are downloaded as a zip '
'archive. If \'zip\', a zip archive is always sent.',
required=False)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403))
@access.user
@loadmodel(model='item', level=AccessType.WRITE)
def deleteItem(self, item, params):
"""
Delete an item and its contents.
"""
self.model('item').remove(item)
return {'message': u'Deleted item {}.'.format(item['name'])}
deleteItem.description = (
Description('Delete an item by ID.')
.param('id', 'The ID of the item.', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the item.', 403))
@access.public
@loadmodel(model='item', level=AccessType.READ)
def rootpath(self, item, params):
"""
Get the path to the root of the item's parent hierarchy.
"""
return self.model('item').parentsToRoot(item, self.getCurrentUser())
rootpath.description = (
Description('Get the path to the root of the item\'s hierarchy.')
.param('id', 'The ID of the item.', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403))
@access.user
@loadmodel(model='item', level=AccessType.READ)
def copyItem(self, item, params):
"""
Copy an existing item to a new item.
:param folderId: The _id of the parent folder for the new item.
:type folderId: str
:param name: The name of the item to create.
:param description: Item description.
"""
user = self.getCurrentUser()
name = params.get('name', None)
folderId = params.get('folderId', item['folderId'])
folder = self.model('folder').load(id=folderId, user=user,
level=AccessType.WRITE, exc=True)
description = params.get('description', None)
return self.model('item').copyItem(item, creator=user, name=name,
folder=folder,
description=description)
copyItem.description = (
Description('Copy an item.')
.responseClass('Item')
.param('id', 'The ID of the original item.', paramType='path')
.param('folderId', 'The ID of the parent folder.', required=False)
.param('name', 'Name for the new item.', required=False)
.param('description', "Description for the new item.", required=False)
.errorResponse()
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied on the original item.', 403)
.errorResponse('Write access was denied on the parent folder.', 403))
|
|
# -*- coding: utf-8 -*-
import feedparser
import tweepy
import time
from datetime import datetime
import random
from threading import Timer
from flask import current_app, flash
from flask.ext.login import current_user
from . import db
from . import infos_tweet
from .models import Feed, Article
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# THREADING TEST
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
class RepeatedTimer(object):
"""
Run function (arg or not) every interval seconds
http://stackoverflow.com/questions/3393612/
run-certain-code-every-n-seconds
"""
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class RssFlux():
"""
Activate get_articles (and func to deactivate : desactivate_collect).
Activate tweet_articles (and func to deactivate : desactivate_tweet).
functions :
> refresh (default rate = 1800 sec.)
> activate() / deactivate()
> get_articles
> Tweet articles from (self) Feed
"""
def __init__(self, idflux):
"""Connection init."""
self.app = current_app._get_current_object()
self.idflux = idflux
flux_info = Feed.query.filter_by(id=self.idflux).first()
self.name = flux_info.name
self.url = flux_info.url
self.collect_actif = flux_info.collect_actif
self.Tweet_actif = flux_info.Tweet_actif
# resfresh rate for geting articles (28800.0 = 8h)
self.refresh = 610.0 # every 10mn
# self.frequency = (24/flux_info.frequency) * 3600
self.frequency = 600.0 # every 10mn
if flux_info.hashtag:
self.hashtag = flux_info.hashtag
else:
self.hashtag = ''
self.rt = None
self.rt2 = None
# thread name
# self.name_Thread = '{0} {1}'.format('thread', idflux)
# print self.name_Thread
def get_articles(self):
"""Get every self.refresh all new artle of feed and insert bdd."""
# repeat in a thread every self.refresh the get_articles function
# self.name_Thread = threading.Timer(self.refresh, self.get_articles).start()
# Timer(self.refresh, self.get_articles).start()
rss = self.url
feeds = feedparser.parse(rss)
with self.app.app_context():
db.session.expunge_all()
# titles list of all articles in bdd
title_articles = [element.title for element in
Article.query.filter(Article.feed_id == self.idflux)]
# list title/link from last 10 items of Rss feed not in bdd
feedss = [(feeds.entries[i]['title'], feeds.entries[i]['link'])
for i in range(1, 10)
if feeds.entries[i]['title'] not in title_articles]
# Add new items from list feedss to bdd
for elem in feedss:
article = Article(title=elem[0],
url=elem[1],
feed_id=self.idflux)
db.session.add(article)
db.session.commit()
print "SCRAPP ARTICLE EFFECTUE"
def tweet_articles(self):
"""Format and tweet articles from bdd for self.flux."""
with self.app.app_context():
articles_to_tweet = Article.query.\
filter(Article.feed_id == self.idflux).\
filter(Article.tweeted == 0).all()
# checkingarticles to tweet
if articles_to_tweet:
auth = tweepy.OAuthHandler(infos_tweet.Key_consumer, infos_tweet.Consumer_secret)
auth.set_access_token(infos_tweet.Access_token, infos_tweet.Access_token_secret)
api = tweepy.API(auth)
try:
for tweets in articles_to_tweet:
# TITLE // LINK -> tweet_content
title = tweets.title[:100]
link_article = tweets.url
# FEED name for VIA -> tweet_content
name_feed = Feed.query.\
filter(Feed.id == Article.feed_id).first()
via_article = name_feed.name.split()[0]
tweet_content = "%s // %s - via %s" %\
(title, link_article, via_article)
# update twitted
tweets.tweeted = 1
tweets.date_tweeted = datetime.utcnow()
db.session.commit()
# send it
api.update_status(tweet_content)
# wait randomly
time.sleep(600 + random.randint(30, 60))
print "Tweet ID : "+str(tweets.id)+" : ENVOYE"
# check rate limit
except tweepy.RateLimitError:
print "Rate limite reach...sarace"
time.sleep(16 * 60)
else:
# no tweet to send
message = flash('No tweets to send')
print message
def activate_get(self):
"""Activate Flux to get Articles."""
print self.collect_actif
if not self.collect_actif:
print "enter activate_get"
self.rt2 = RepeatedTimer(self.refresh, self.get_articles)
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.collect_actif = True
db.session.commit()
print self.rt2
else:
print 'Collect already enable'
def desactivate_get(self):
"""Desactivate Flux to get Articles."""
if self.rt2:
self.rt2.stop()
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.collect_actif = False
db.session.commit()
else:
print 'Collect already disable'
def activate_tweet(self):
"""Activate Flux to get Articles."""
print "State TWEET (Tweet_actif) : "
print self.Tweet_actif
if not self.Tweet_actif:
print "enter activate_tweet"
self.rt = RepeatedTimer(self.frequency, self.tweet_articles)
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.Tweet_actif = True
db.session.commit()
print self.rt
else:
print 'Tweet already enable'
def desactivate_tweet(self):
"""Desactivate Flux to get Articles."""
if self.rt:
self.rt.stop()
# update Feed
flux_info = Feed.query.filter_by(id=self.idflux).first()
flux_info.Tweet_actif = False
db.session.commit()
else:
print 'Tweet already disable'
def state(self):
"""Print effective actions (tweet_articles / get_articles)."""
if self.rt.is_running is True:
if self.rt2.is_running is True:
return self.name+" : Collecting and Tweeting actif."
return self.name+" : Tweeting is actif."
elif self.rt2.is_running is True:
return self.name+" : Collecting is actif."
else:
print 'No actions'
def print_info(self):
self.attrs = vars(self)
print ', '.join("%s: %s" % item for item in self.attrs.items())
if __name__ == '__main__':
pass
|
|
#!/usr/bin/env python
import os
import re
import yaml
import threading
import itertools
import sys
import rospy
import rospkg
import roslaunch
from launchtree_config import LaunchtreeConfig, LaunchtreeArg, LaunchtreeRemap, LaunchtreeParam, LaunchtreeRosparam
from launchtree_widget import LaunchtreeEntryItem, LaunchtreeWidget
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, Signal
from python_qt_binding.QtWidgets import QFileDialog, QWidget, QTreeWidgetItem, QLabel, QLineEdit, QWidgetItem, QFileDialog
from python_qt_binding.QtWidgets import QInputDialog, QWizard, QWizardPage, QGridLayout, QComboBox, QSizePolicy, QPushButton
from python_qt_binding.QtGui import QIcon, QColor
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from editor_tree import EditorTree, YamlStruct, EditorNode
from wizards import YamlPage, XmlPage, AddWizard
from config_wizard import ConfigWizard, ConfigPage
#widget that stores information about a specific XML or YAML tag
class PropertyWidget(QWidget):
def __init__(self, name, value, update_path, obj, isXml):
super(PropertyWidget, self).__init__()
self.setObjectName('PropertyWidget')
self.name = name
self.value = value
self.update_path = update_path
self.obj = obj
self.isXml = isXml
# ui file load
self._rp = rospkg.RosPack()
self._rp_package_list = self._rp.list()
res_folder = os.path.join(self._rp.get_path('rqt_launch_editor'), 'resource')
ui_file = os.path.join(res_folder, 'editor_item_widget.ui')
loadUi(ui_file, self)
#make widget
self._label_name.setText(name)
self._lineEdit_arg.setText(value)
#change value in underlying native data structure
def update(self):
if self.isXml:
self.value = self._lineEdit_arg.text()
self.obj.attrib[self.name] = self.value
else:
self.value = self._lineEdit_arg.text()
self.update_path(self._lineEdit_arg.text())
def changed(self):
return str(self._lineEdit_arg.text()) != str(self.value)
#widget that creates overlying UI of application
#extends the UI and some visual elements from LaunchtreeWidget
class EditorWidget(LaunchtreeWidget):
def __init__(self, context):
super(EditorWidget, self).__init__(context)
self.setObjectName('EditorWidget')
self.curr_entry = None
self.gridLayout_2.setAlignment(Qt.AlignTop)
#set signals
self.apply.clicked.connect(self.apply_changes)
self._add_button.clicked.connect(self.add_dialog)
self._del_button.clicked.connect(self.delete_item)
self.save_as.clicked.connect(self.configure)
#write changes to file
def apply_changes(self):
if hasattr(self, 'editor_tree'):
layout = self.gridLayout_2
for i in range(layout.count()):
item = layout.itemAt(i)
if isinstance(item, QWidgetItem):
widg = item.widget()
if isinstance(widg, PropertyWidget):
if widg.changed():
widg.update()
self.editor_tree.apply_changes()
#helper function for tree widget icon
def get_icon(self, prop_widg):
elt = prop_widg.instance.obj
if type(elt).__name__ == 'Element':
tag = elt.tag
if tag == 'node':
return self._icon_node
elif tag == 'param':
return self._icon_param
elif tag == 'rosparam':
return self._icon_rosparam
elif tag == 'arg':
return self._icon_arg
elif tag == 'remap':
return self._icon_remap
else:
return self._icon_default
elif isinstance(elt, YamlStruct):
return self._icon_param
elif isinstance(elt, dict):
return self._icon_rosparam
else:
return self._icon_default
#generate tree widget
def display_config_tree(self, xml_tree):
filename = os.path.join(
self._rp.get_path(self.package_select.currentText()),
self.launchfile_select.currentText()
)
self.editor_tree = EditorTree(filename)
def _display_config_tree(root):
#create widget
i = LaunchtreeEntryItem()
i.instance = root
if type(i.instance.obj).__name__ == 'Element':
i.setText(0, root.name + ' (' + str(i.instance.obj.line_num) + ')')
else:
i.setText(0, root.name)
i.setIcon(0, self.get_icon(i))
# recursively add children to tree
for child in root.children:
i.addChild(_display_config_tree(child))
return i
return [_display_config_tree(self.editor_tree.getroot())]
#extend launchtree method to clear property pane
def load_launchfile(self):
super(EditorWidget, self).load_launchfile()
self.clear_prop_pane()
return
#delete all PropertyWidgets in property pane
def clear_prop_pane(self):
if not hasattr(self, 'curr_entry'):
return
if self.curr_entry is not None:
del_items = []
layout = self.gridLayout_2
for i in range(layout.count()):
item = layout.itemAt(i)
if isinstance(item, QWidgetItem):
widg = item.widget()
if isinstance(widg, PropertyWidget):
del_items.append(widg)
if widg.changed():
widg.update()
for widg in del_items:
layout.removeWidget(widg)
widg.setParent(None)
widg.deleteLater()
widg.hide()
del widg
#extending QTreeWidget method
#executed when a new tree item is selected
def launch_entry_changed(self, current, previous):
if current is None:
return
self.curr_entry = current
# log changes to data structure if applicable
if previous is not None:
prev_data = previous.instance.obj
del_items = []
layout = self.gridLayout_2
for i in range(layout.count()):
item = layout.itemAt(i)
if isinstance(item, QWidgetItem):
widg = item.widget()
if isinstance(widg, PropertyWidget):
del_items.append(widg)
if widg.changed():
widg.update()
for widg in del_items:
layout.removeWidget(widg)
widg.setParent(None)
widg.deleteLater()
widg.hide()
del widg
data = current.instance.obj
#generate new property widgets for selected element
size_pol = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
if isinstance(data, YamlStruct):
n = "Value: "
v = str(data.value)
prop_widg = PropertyWidget(n, v, lambda t: data.update(t), data, False)
prop_widg.setSizePolicy(size_pol)
self.gridLayout_2.addWidget(prop_widg)
elif type(data).__name__ == "Element":
for key, instance in data.attrib.items():
n = str(key)
v = str(instance)
prop_widg = PropertyWidget(n, v, lambda t: data.set(n, t), data, True)
prop_widg.setSizePolicy(size_pol)
self.gridLayout_2.addWidget(prop_widg)
# generate interface to add a new element
def add_dialog(self):
#can be None
if self.curr_entry is not None and not isinstance(self.curr_entry.instance.obj, YamlStruct):
self.wizard = AddWizard(self.curr_entry)
else:
return
#gather data on submit
self.wizard.setWindowTitle("Add New Entry")
self.wizard.setWindowModality(Qt.ApplicationModal)
self.wizard.show()
self.wizard.accepted.connect(self.add_action)
# called when add_wizard is submitted, changes data structures
def add_action(self):
self.editor_tree.add_to_tree(self.wizard.node, self.curr_entry.instance)
i = LaunchtreeEntryItem()
i.instance = self.wizard.node
i.setText(0, self.wizard.node.name)
icon = self.get_icon(i)
i.setIcon(0, icon)
#self.curr_entry.insertChild(0, i)
self.curr_entry.addChild(i)
#deal with possible repetition
matches = self.get_repeated(self.curr_entry)
if matches != []:
for parent in matches:
if parent is self.curr_entry:
continue
x = LaunchtreeEntryItem()
x.instance = self.wizard.node
x.setText(0, self.wizard.node.name)
x.setIcon(0, icon)
parent.addChild(x)
#delete selected XML or YAML item from widget
def delete_item(self):
curr = self.curr_entry
parent = curr.parent()
matches = self.get_repeated(curr)
#deleting launch tag creates invalid launch XML
if hasattr(curr.instance.obj, "tag"):
if curr.instance.obj.tag == "launch":
print "-----Launch tag cannot be deleted-----"
return
if parent is not None:
parent.removeChild(curr)
self.editor_tree.delete_item(curr.instance, parent.instance)
if matches != []:
for x in matches:
x_parent = x.parent()
x_parent.removeChild(x)
del x
else:
self.editor_tree.delete_item(curr.instance, None)
del curr
return
#configue files to writes changes to
def configure(self):
if not hasattr(self, 'editor_tree'):
return
self.config_wizard = ConfigWizard(self.editor_tree.file_map)
#self.verticalLayout_11.addWidget(self.config_wizard)
self.config_wizard.setWindowTitle("Configure File Setup")
self.config_wizard.setWindowModality(Qt.ApplicationModal)
self.config_wizard.resize(850, 100)
self.config_wizard.show()
#filename = QFileDialog.getSaveFileName(self)
#returns a list of widgets that map to same EditorNode object
def get_repeated(self, widg):
txt = widg.text(0)
matches = self.launch_view.findItems(txt, Qt.MatchExactly | Qt.MatchRecursive, 0)
if matches == []:
return []
else:
return_lst = []
for i in matches:
if i is widg:
continue
elif i.instance.obj is widg.instance.obj:
return_lst.append(i)
return return_lst
|
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
from nnabla.testing import assert_allclose
def test_manip():
v = nn.Variable([2, 3, 4])
assert v.shape == (2, 3, 4)
with pytest.raises(Exception):
v.reste_shape([1, 2])
v.reset_shape([1, 2], force=True)
assert v.shape == (1, 2)
@pytest.mark.parametrize("need_grad", [True, False])
def test_from_array(need_grad):
data = np.random.randint(0, 10, size=(2, 3, 4))
grad = np.random.randint(0, 10, size=(2, 3, 4))
v1 = nn.Variable.from_numpy_array(data, need_grad=need_grad)
assert np.all(v1.d == data)
assert v1.d.dtype == data.dtype
assert v1.need_grad == need_grad
v2 = nn.Variable.from_numpy_array(data, grad, need_grad)
assert np.all(v2.d == data)
assert v2.d.dtype == data.dtype
assert np.all(v2.g == grad)
assert v2.g.dtype == grad.dtype
assert v2.need_grad == need_grad
def test_data_grad_reference():
v = nn.Variable([2, 3, 4])
assert v.d.dtype == np.float32
assert v.g.dtype == np.float32
def test_dtype_conversion():
v = nn.Variable([2, 3, 4])
a = v.data.cast(np.int)
a[...] = 2
assert (v.data.dtype == np.int)
assert np.all(a == 2)
b = v.data.cast(np.float32)
assert b.dtype == np.float32
assert b is not a
assert np.all(b == 2)
b[...] = np.random.randn(*b.shape) * 10
c = v.data.cast(np.int32)
assert np.all(c == b.astype(np.int32))
def test_data_grad():
v = nn.Variable([2, 3, 4])
v.d[...] = np.random.randn(*v.shape)
assert v.d is not v.g
assert not np.all(v.d == v.g)
def test_get_unlinked_variable():
v = nn.Variable([2, 3, 4], need_grad=True)
grad = np.random.randn(*v.shape).astype(np.float32)
v.g = grad
v.d = np.random.randn(*v.shape)
import nnabla.functions as F
with nn.context_scope(nn.Context()), nn.auto_forward():
v2 = F.identity(v)
v2_u = v2.get_unlinked_variable()
assert v2_u.need_grad
v3 = F.identity(v2_u)
v2_u.grad.zero()
v2_g = v2_u.g.copy()
v3.backward(clear_buffer=False)
assert type(v2_u) == type(v2)
assert np.all(v.g == grad)
assert np.all(v2_u.g == v2.g)
assert np.all(v2_u.g == v2_g + 1)
# Check need_grad option
assert v2.get_unlinked_variable(need_grad=True).need_grad
assert not v2.get_unlinked_variable(need_grad=False).need_grad
def test_reshape():
v = nn.Variable([2, 3, 4], need_grad=True)
grad = np.random.randn(*v.shape).astype(np.float32)
v.g = grad
v.d = np.random.randn(*v.shape)
import nnabla.functions as F
with nn.context_scope(nn.Context()), nn.auto_forward():
v2 = F.identity(v)
v2_s = v2.reshape((3, 4, 2))
v3 = F.identity(v2_s)
v3.backward(clear_buffer=False)
assert np.all(v2_s.g.flat == v2.g.flat)
assert np.all(v2_s.g == 1)
v2.d = 1
assert np.all(v2_s.d == 1)
# Check unlink
v2_su = v2.reshape((3, 4, 2), unlink=True)
assert v2_su.need_grad
assert v2_su.parent is None
v2_su.need_grad = False
v2_su2 = v2_su.reshape((3, 4, 2), unlink=True)
assert not v2_su2.need_grad
assert v2_su2.parent is None
def test_persistent():
x = nn.Variable([2, 3, 4], need_grad=True)
x1 = x + 1
x2 = x1 + 1
x3 = x2 + 1
y = x3 + 1
x3.persistent = True
x.data.zero()
y.forward(clear_buffer=True)
assert_allclose(x3.d, 3)
y.forward(clear_no_need_grad=True)
y.backward(clear_buffer=True)
assert_allclose(x3.d, 3)
assert_allclose(x3.g, 1)
def test_name():
x = nn.Variable([2, 3])
x.name = "VariableName"
assert x.name == "VariableName"
def test_name_all_variables():
def net(h):
import nnabla.functions as F
import nnabla.parametric_functions as PF
h = PF.convolution(h, 3, (3, 3), name="conv1")
h = PF.batch_normalization(h, name="bn1")
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = PF.convolution(h, 3, (3, 3), name="conv2")
h = PF.batch_normalization(h, name="bn2")
pred = F.relu(h)
return pred
class Namer(object):
def __init__(self, ):
self.counter = 0
def __call__(self, nnabla_func):
for v in nnabla_func.outputs:
v.name = "{}_output_{:05d}".format(
nnabla_func.name, self.counter)
self.counter += 1
class Confirmer(object):
def __init__(self, ):
self.counter = 0
def __call__(self, nnabla_func):
for v in nnabla_func.outputs:
assert v.name == "{}_output_{:05d}".format(
nnabla_func.name, self.counter)
self.counter += 1
x = nn.Variable([2, 3, 8, 8])
pred = net(x)
pred.visit(Namer())
pred.forward(clear_no_need_grad=True)
pred.backward(clear_buffer=True)
pred.visit(Confirmer())
def test_clear_all_graph_links():
import nnabla.functions as F
import nnabla.parametric_functions as PF
class OneStepRNN(object):
def __init__(self, batch_size=8, state_size=8):
self.lstm0 = PF.LSTMCell(batch_size, state_size, name="lsmt0")
self.lstm1 = PF.LSTMCell(batch_size, state_size, name="lsmt1")
self.affine = PF.affine
def __call__(self, x, n_class=10):
h = self.lstm0(x)
h = self.lstm1(h)
h = self.affine(h, n_class)
return h
T = 3
batch_size = 2
dims = 4
state_size = 8
one_step_rnn = OneStepRNN(batch_size, state_size)
# Forward: unroll over time
loss = 0
for t in range(T):
x = nn.Variable.from_numpy_array(
np.random.randn(batch_size, dims))
y = nn.Variable.from_numpy_array(
np.random.choice(np.arange(10), batch_size, replace=True)).reshape((batch_size, 1))
pred = one_step_rnn(x)
l = F.mean(F.softmax_cross_entropy(pred, y))
loss += l
loss /= T
# Backward then truncate
loss.backward()
loss.clear_all_graph_links()
assert one_step_rnn.lstm0.h.parent == None
assert one_step_rnn.lstm0.c.parent == None
assert one_step_rnn.lstm1.h.parent == None
assert one_step_rnn.lstm1.c.parent == None
def test_function_references():
import nnabla as nn
import nnabla.parametric_functions as PF
v = nn.Variable.from_numpy_array(np.random.randn(2, 4))
assert len(v.function_references) == 0
h1 = PF.affine(v, 10, name="affine1")
assert len(v.function_references) == 1
assert h1.parent in v.function_references
h2 = PF.affine(v, 10, name="affine2")
assert len(v.function_references) == 2
assert h1.parent in v.function_references
assert h2.parent in v.function_references
del h1
assert len(v.function_references) == 1
assert h2.parent in v.function_references
del h2
assert len(v.function_references) == 0
@pytest.mark.parametrize("f", [lambda x: x, hash])
def test_variable_equality_and_hash(f):
shape = (2, 3, 4)
x = nn.Variable(shape)
assert f(x) == f(x)
y = nn.Variable(shape)
assert f(x) != f(y)
y = x.get_unlinked_variable()
assert f(x) == f(y)
y.need_grad = True
assert f(x) == f(y)
def test_variable_set():
# Testing hash and equality operator via set
shape = (2, 3, 4)
x = nn.Variable(shape)
s = set()
s.add(x)
assert x in s
y = nn.Variable(shape)
assert y not in s
y = x.get_unlinked_variable()
assert y in s
y.need_grad = True
assert y in s
def test_prohibit_clear_data():
import nnabla.functions as F
nn.prefer_cached_array(False)
shape = (2, 3, 4)
var_np = np.random.rand(*shape)
# the case of root variable
x1 = nn.Variable.from_numpy_array(var_np)
y1 = F.reshape(x1, (-1,), inplace=True)
y1 = F.reshape(y1, shape, inplace=True) * 2
x2 = nn.Variable.from_numpy_array(var_np)
y2 = F.reshape(x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y1, y2], clear_buffer=True)
assert_allclose(x1.d, x2.d)
assert_allclose(y1.d, y2.d)
# the case of persistent variable
x1 = nn.Variable.from_numpy_array(var_np)
p_y1 = F.mul_scalar(x1, 2).apply(persistent=True)
y1 = F.reshape(p_y1, (-1,), inplace=True)
y1 = F.reshape(y1, shape, inplace=True) * 2
x2 = nn.Variable.from_numpy_array(var_np)
p_y2 = F.mul_scalar(x2, 2).apply(persistent=True)
y2 = F.reshape(p_y2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y1, y2], clear_buffer=True)
assert_allclose(p_y1.d, p_y2.d)
assert_allclose(y1.d, y2.d)
# the case of rewire_on root variable
# graph A: x11 -> f_inplace -> y11
x11 = nn.Variable.from_numpy_array(var_np)
y11 = F.reshape(x11, (-1,), inplace=True)
# graph B: x12 -> f_inplace -> mul_scalar -> y12
x12 = nn.Variable(shape=y11.shape)
y12 = F.reshape(x12, shape, inplace=True) * 2
# graph A->B: x11 -> f_inplace -> f_inplace -> mul_scalar -> y12
x12.rewire_on(y11)
x2 = nn.Variable.from_numpy_array(var_np)
y2 = F.reshape(x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y12, y2], clear_buffer=True)
assert_allclose(x11.d, x2.d)
assert_allclose(y12.d, y2.d)
# the case of rewire_on persistent variable
# graph A: x11 -> mul_scalar -> p_x11 -> f_inplace -> y11
x11 = nn.Variable.from_numpy_array(var_np)
p_x11 = F.mul_scalar(x11, 2).apply(persistent=True)
y11 = F.reshape(p_x11, (-1,), inplace=True)
# graph B: x12 -> f_inplace -> mul_scalar -> y12
x12 = nn.Variable(shape=y11.shape)
y12 = F.reshape(x12, shape, inplace=True) * 2
# graph A->B: ... -> p_x11 -> f_inplace -> f_inplace -> mul_scalar -> y12
x12.rewire_on(y11)
x2 = nn.Variable.from_numpy_array(var_np)
p_x2 = F.mul_scalar(x2, 2).apply(persistent=True)
y2 = F.reshape(p_x2, (-1,), inplace=False)
y2 = F.reshape(y2, shape, inplace=False) * 2
nn.forward_all([y12, y2], clear_buffer=True)
assert_allclose(p_x11.d, p_x2.d)
assert_allclose(y12.d, y2.d)
def test_leaf_indexing_access():
import nnabla.functions as F
nn.set_auto_forward(False)
shape_x = (3, 2)
dx = np.random.rand(*shape_x)
shape_y = (2, 2)
dy = np.random.rand(*shape_y)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z = F.identity(x)
z.forward()
d1 = x.d.copy()
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z2 = F.identity(x)
d2 = x.d.copy()
nn.set_auto_forward(False)
x = nn.Variable.from_numpy_array(dx)
y = nn.Variable.from_numpy_array(dy)
x[0:2, :] = y
z3 = F.identity(x)
z3.forward()
d3 = x.d.copy()
d4 = z3.d.copy()
assert_allclose(d1, d2)
assert_allclose(d2, d3)
assert_allclose(d3, d4)
|
|
from typing import Iterable, Union
from abc import abstractmethod, ABCMeta
import re
import collections
class InvalidVersionError(ValueError):
pass
class Comparable(metaclass=ABCMeta):
@classmethod
@abstractmethod
def cmp(cls, v1: 'Comparable', v2: 'Comparable') -> int:
""" Compares two instances. """
def __lt__(self, other: 'Comparable') -> bool:
return self.cmp(self, other) < 0
def __gt__(self, other: 'Comparable') -> bool:
return self.cmp(self, other) > 0
def __eq__(self, other: 'Comparable') -> bool:
return self.cmp(self, other) == 0
def __le__(self, other: 'Comparable') -> bool:
return self.cmp(self, other) <= 0
def __ge__(self, other: 'Comparable') -> bool:
return self.cmp(self, other) >= 0
def __ne__(self, other: 'Comparable') -> bool:
return self.cmp(self, other) != 0
class VersionBase(Comparable):
@classmethod
def cmp(cls, v1: 'VersionBase', v2: 'VersionBase') -> int:
""" Compares two instances. """
# TODO types checking
if v1._version > v2._version:
return 1
elif v1._version == v2._version:
return 0
else:
return -1
@property
@abstractmethod
def full(self) -> str:
""" Full version string. """
@property
@abstractmethod
def parts(self) -> Iterable:
""" Full version as iterable. """
@property
@abstractmethod
def release(self) -> str:
""" Release part string. """
@property
@abstractmethod
def release_parts(self) -> Iterable:
""" Release part as iterable. """
def __init__(self, version: str, allow_non_stripped: bool = False):
if not isinstance(version, str):
raise InvalidVersionError('version should be a string')
if not allow_non_stripped and version != version.strip():
raise InvalidVersionError(
'version includes leading and/or trailing spaces'
)
self._version = self._parse(version)
def _parse(self, version: str):
return version
def __hash__(self):
return hash(self.full)
def __str__(self):
return self.full
def __repr__(self):
return "{}(version='{}')".format(self.__class__.__name__, self.full)
class SourceVersion(VersionBase):
pass
class PackageVersion(VersionBase):
@property
@abstractmethod
def upstream(self) -> SourceVersion:
""" Upstream part of the package. """
class SemVerBase(VersionBase):
@property
def major(self) -> str:
return self.release_parts[0]
@property
def minor(self) -> str:
return self.release_parts[1]
@property
def patch(self) -> str:
return self.release_parts[2]
class GenericVersion(VersionBase):
# combines allowed characters from:
# - PyPI: https://www.python.org/dev/peps/pep-0440
# - SemVer: https://semver.org/
re_generic = re.compile(r'[0-9a-zA-Z.\-+!]+')
@property
def full(self) -> str:
return self._version
@property
def parts(self) -> Iterable:
return (self.full,)
@property
def release(self) -> str:
return self.full
@property
def release_parts(self) -> Iterable:
return self.parts
def _parse(self, version):
if not self.re_generic.fullmatch(version):
raise InvalidVersionError('only alphanumerics and [.-+!] are allowed')
return version
class PEP440VersionFallback(Comparable):
""" Mimics packaging.version.Version. """
# covers only some cases
re_pep440 = re.compile(r'([0-9]+)\.([0-9]+)\.([0-9]+)(?:\.?(dev|rc|a|b)\.?([0-9]+))?')
@classmethod
def cmp(cls, v1: 'PEP440VersionFallback',
v2: 'PEP440VersionFallback') -> int:
""" Compares two instances. """
raise NotImplementedError("Please, install 'packaging' package")
def __init__(self, version: str):
match = self.re_pep440.fullmatch(version)
if not match:
raise InvalidVersionError(
"version '{}' is invalid, expected N.N.N[[.]{{a|b|rc|dev}}[.]N]"
.format(version)
)
self._version = tuple(
[int(p) if idx in (0, 1, 2, 4) and p is not None else p for idx, p in enumerate(match.groups())]
)
@property
def public(self):
return self.base_version + (
'' if self._version[3] is None else
"{}{}{}"
.format(
'.' if self.dev else '', self._version[3], self._version[4]
)
)
@property
def base_version(self):
return "{}.{}.{}".format(*self.release)
@property
def epoch(self):
return 0
@property
def release(self):
return self._version[:3]
@property
def local(self):
return None
@property
def pre(self):
if self.is_prerelease and not self.is_devrelease:
return self._version[3:]
@property
def is_prerelease(self):
return self._version[3] in ('a', 'b', 'rc', 'dev')
@property
def dev(self):
return self._version[4] if self.is_devrelease else None
@property
def is_devrelease(self):
return self._version[3] == 'dev'
@property
def post(self):
return None
@property
def is_postrelease(self):
return False
class PEP440BasedVersion(GenericVersion):
def _parse(self, version: str):
# TODO test that
try:
from packaging.version import (
Version as PEP440Version,
InvalidVersion as PEP440InvalidVersion
)
except ImportError:
# seems we work in pour environment
PEP440Version = PEP440VersionFallback
PEP440InvalidVersion = InvalidVersionError
try:
return PEP440Version(version)
except PEP440InvalidVersion as exc:
# TODO is it the best string to pass
raise InvalidVersionError(str(exc))
# TODO create API wrappers for dev, pre and post from PEP440Version
@property
def public(self) -> str:
return self._version.public
@property
def full(self) -> str:
res = self._version.public
if self._version.local:
res += "+{}".format(self._version.local)
return res
@property
def parts(self) -> Iterable:
# TODO
# - API for add_parts
add_parts = (None, None)
if self._version.dev is not None:
add_parts = ('dev', self._version.dev)
elif self._version.pre is not None:
add_parts = self._version.pre
elif self._version.post is not None:
add_parts = ('post', self._version.post)
return (
self._version.epoch,
*self.release_parts,
*add_parts,
self._version.local
)
@property
def release(self) -> str:
return '.'.join(map(str, self.release_parts))
@property
def release_parts(self) -> Iterable:
return self._version.release
class DigitDotVersion(PEP440BasedVersion):
def __init__(
self,
version: str,
parts_num: Union[None, int, Iterable[int]]=None,
**kwargs
):
super().__init__(version, **kwargs)
# additional restrictions
if (self._version.dev or
self._version.pre or
self._version.post or
self._version.epoch or
self._version.local):
raise InvalidVersionError("only dots and digits are expected")
if parts_num:
# TODO docs for typing doesn't specify explicitly whether
# typing.Iterable can be used instead or not
if not isinstance(parts_num, collections.abc.Iterable):
parts_num = [parts_num]
if len(self.parts) not in parts_num:
raise InvalidVersionError(
"invalid number of parts {}, should contain {}"
.format(len(self.parts), ' or '.join(map(str, parts_num)))
)
@property
def parts(self) -> Iterable:
return self._version.release
# TODO allows (silently normalizes) leading zeroes in parts
class SemVerReleaseVersion(DigitDotVersion, SemVerBase):
def __init__(self, version: str, **kwargs):
super().__init__(version, parts_num=3, **kwargs)
class PlenumVersion(
PEP440BasedVersion, SemVerBase, SourceVersion, PackageVersion
):
def __init__(self, version: str, **kwargs):
super().__init__(version, **kwargs)
# additional restrictions
if self._version.pre:
if self._version.pre[0] != 'rc':
raise InvalidVersionError(
"pre-release phase '{}' is unexpected"
.format(self._version.pre[0])
)
if self._version.post:
raise InvalidVersionError("post-release is unexpected")
if self._version.epoch:
raise InvalidVersionError("epoch is unexpected")
if self._version.local:
raise InvalidVersionError("local version part is unexpected")
if len(self.release_parts) != 3:
raise InvalidVersionError(
"release part should contain only 3 parts")
@property
def parts(self) -> Iterable:
return super().parts[1:6]
@property
def upstream(self) -> SourceVersion:
""" Upstream part of the package. """
return self
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Module for translating ONNX operators into Mxnet operatoes"""
# pylint: disable=unused-argument,protected-access
import numpy as np
from . import _translation_utils as translation_utils
from .... import symbol
# Method definitions for the callable objects mapped in the import_helper module
def identity(attrs, inputs, proto_obj):
"""Returns the identity function of the the input."""
return 'identity', attrs, inputs
def random_uniform(attrs, inputs, proto_obj):
"""Draw random samples from a uniform distribtuion."""
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
return 'random_uniform', new_attr, inputs
def random_normal(attrs, inputs, proto_obj):
"""Draw random samples from a Gaussian distribution."""
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
new_attr = translation_utils._fix_attribute_names(new_attr, {'mean' : 'loc'})
return 'random_uniform', new_attr, inputs
# Arithmetic Operations
def add(attrs, inputs, proto_obj):
"""Adding two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_add', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_add', new_attr, inputs
def subtract(attrs, inputs, proto_obj):
"""Subtracting two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_sub', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_sub', new_attr, inputs
def multiply(attrs, inputs, proto_obj):
"""Multiply two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_mul', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_mul', new_attr, inputs
def divide(attrs, inputs, proto_obj):
"""Divide two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_div', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_div', new_attr, inputs
def mean(attrs, inputs, proto_obj):
"""Mean of all the input tensors."""
concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs]
concat_sym = symbol.concat(*concat_input, dim=0)
mean_sym = symbol.mean(concat_sym, axis=0)
return mean_sym, attrs, inputs
def logical_and(attrs, inputs, proto_obj):
"""Logical and of two input arrays."""
return 'broadcast_logical_and', attrs, inputs
def logical_or(attrs, inputs, proto_obj):
"""Logical or of two input arrays."""
return 'broadcast_logical_or', attrs, inputs
def logical_xor(attrs, inputs, proto_obj):
"""Logical xor of two input arrays."""
return 'broadcast_logical_xor', attrs, inputs
def logical_not(attrs, inputs, proto_obj):
"""Logical not of two input arrays."""
return 'logical_not', attrs, inputs
def absolute(attrs, inputs, proto_obj):
"""Returns element-wise absolute value of the input."""
return 'abs', attrs, inputs
def negative(attrs, inputs, proto_obj):
"""Negation of every element in a tensor"""
return 'negative', attrs, inputs
def add_n(attrs, inputs, proto_obj):
"""Elementwise sum of arrays"""
return 'add_n', attrs, inputs
# Sorting and Searching
def argmax(attrs, inputs, proto_obj):
"""Returns indices of the maximum values along an axis"""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmax_op
def argmin(attrs, inputs, proto_obj):
"""Returns indices of the minimum values along an axis."""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmin_op
def maximum(attrs, inputs, proto_obj):
"""
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
"""
if len(inputs) > 1:
mxnet_op = symbol.maximum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.maximum(mxnet_op, op_input)
else:
mxnet_op = symbol.maximum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def minimum(attrs, inputs, proto_obj):
"""Elementwise minimum of arrays."""
# MXNet minimum compares only two symbols at a time.
# ONNX can send more than two to compare.
# Breaking into multiple mxnet ops to compare two symbols at a time
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = symbol.minimum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def lesser(attrs, inputs, proto_obj):
"""Logical Lesser operator with broadcasting."""
return 'broadcast_lesser', attrs, inputs
def greater(attrs, inputs, proto_obj):
"""Logical Greater operator with broadcasting."""
return 'broadcast_greater', attrs, inputs
def equal(attrs, inputs, proto_obj):
"""Logical Equal operator with broadcasting."""
return 'broadcast_equal', attrs, inputs
#Hyperbolic functions
def tanh(attrs, inputs, proto_obj):
"""Returns the hyperbolic tangent of the input array."""
return 'tanh', attrs, inputs
# Rounding
def ceil(attrs, inputs, proto_obj):
""" Calculate ceil value for input """
return 'ceil', attrs, inputs
def floor(attrs, inputs, proto_obj):
""" Calculate floor value for input """
return 'floor', attrs, inputs
# Joining and spliting
def concat(attrs, inputs, proto_obj):
""" Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'})
return 'concat', new_attrs, inputs
# Basic neural network functions
def softsign(attrs, inputs, proto_obj):
"""Computes softsign of x element-wise."""
return 'softsign', attrs, inputs
def sigmoid(attrs, inputs, proto_obj):
"""Computes elementwise sigmoid of the input array"""
return 'sigmoid', attrs, inputs
def hardsigmoid(attrs, inputs, proto_obj):
"""Computes elementwise hard sigmoid of the input array"""
return 'hard_sigmoid', attrs, inputs
def relu(attrs, inputs, proto_obj):
"""Computes rectified linear function."""
return 'relu', attrs, inputs
def pad(attrs, inputs, proto_obj):
""" Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
def matrix_multiplication(attrs, inputs, proto_obj):
"""Performs general matrix multiplication"""
return 'linalg_gemm2', attrs, inputs
def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps',
'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-5
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs
def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
new_attrs['eps'] = attrs.get('epsilon', 1e-5)
return 'InstanceNorm', new_attrs, inputs
def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs
def _elu(attrs, inputs, proto_obj):
"""Elu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'})
return 'LeakyReLU', new_attrs, inputs
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs
def log_softmax(attrs, inputs, proto_obj):
"""Computes the log softmax of the input. This is equivalent to
computing softmax followed by log."""
return 'log_softmax', attrs, inputs
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs
def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
conv_op = symbol.Convolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return conv_op, new_attrs, inputs
def deconv(attrs, inputs, proto_obj):
"""Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return deconv_op, new_attrs, inputs
def fully_connected(attrs, inputs, proto_obj):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj)
return 'FullyConnected', new_attrs, inputs
def global_maxpooling(attrs, inputs, proto_obj):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'max'})
return 'Pooling', new_attrs, inputs
def global_avgpooling(attrs, inputs, proto_obj):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'avg'})
return 'Pooling', new_attrs, inputs
def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'lp',
'p_value': p_value})
return 'Pooling', new_attrs, inputs
def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs
def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs
# Changing shape and type.
def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1]
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs
def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
if 'axis' not in attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0})
if not split_list:
num_outputs = len(proto_obj.model_metadata.get('output_tensor_data'))
else:
raise NotImplementedError("Operator {} in MXNet does not support variable splits."
"Tracking the issue to support variable split here: "
"https://github.com/apache/incubator-mxnet/issues/11594"
.format('split'))
new_attrs['num_outputs'] = num_outputs
return 'split', new_attrs, inputs
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs
def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs
def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs
def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs
def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
if 'a_min' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
return 'clip', new_attrs, inputs
def gather(attrs, inputs, proto_obj):
"""Gather elements from an input array along the given axis."""
return 'take', attrs, inputs
#Powers
def reciprocal(attrs, inputs, proto_obj):
"""Returns the reciprocal of the argument, element-wise."""
return 'reciprocal', attrs, inputs
def squareroot(attrs, inputs, proto_obj):
"""Returns element-wise square-root value of the input."""
return 'sqrt', attrs, inputs
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
if attrs['broadcast'] == 1:
return 'broadcast_power', new_attrs, inputs
else:
mxnet_op = symbol.pow(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
mxnet_op = symbol.broadcast_power(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
def exponent(attrs, inputs, proto_obj):
"""Elementwise exponent of input array."""
return 'exp', attrs, inputs
def _cos(attrs, inputs, proto_obj):
"""Elementwise cosine of input array."""
return 'cos', attrs, inputs
def _sin(attrs, inputs, proto_obj):
"""Elementwise sine of input array."""
return 'sin', attrs, inputs
def _tan(attrs, inputs, proto_obj):
"""Elementwise tan of input array."""
return 'tan', attrs, inputs
def arccos(attrs, inputs, proto_obj):
"""Elementwise inverse cos of input array."""
return 'arccos', attrs, inputs
def arcsin(attrs, inputs, proto_obj):
"""Elementwise inverse sin of input array."""
return 'arcsin', attrs, inputs
def arctan(attrs, inputs, proto_obj):
"""Elementwise inverse tan of input array."""
return 'arctan', attrs, inputs
def _log(attrs, inputs, proto_obj):
"""Elementwise log of input array."""
return 'log', attrs, inputs
# Reduce Functions
def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs
def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs
def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs
def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs
def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs
def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'ord' : 1})
return 'norm', new_attrs, inputs
def shape(attrs, inputs, proto_obj):
"""Returns shape of input array."""
return 'shape_array', attrs, inputs
def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs
def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('avg', inputs, new_attrs)
return new_op, new_attrs, inputs
def lp_pooling(attrs, inputs, proto_obj):
"""LP Pooling"""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
'p_value': p_value
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('lp', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('max', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs
def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs
def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs
|
|
# notify.py - email notifications for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''hooks for sending email push notifications
This extension implements hooks to send email notifications when
changesets are sent from or received by the local repository.
First, enable the extension as explained in :hg:`help extensions`, and
register the hook you want to run. ``incoming`` and ``changegroup`` hooks
are run when changesets are received, while ``outgoing`` hooks are for
changesets sent to another repository::
[hooks]
# one email for each incoming changeset
incoming.notify = python:hgext.notify.hook
# one email for all incoming changesets
changegroup.notify = python:hgext.notify.hook
# one email for all outgoing changesets
outgoing.notify = python:hgext.notify.hook
This registers the hooks. To enable notification, subscribers must
be assigned to repositories. The ``[usersubs]`` section maps multiple
repositories to a given recipient. The ``[reposubs]`` section maps
multiple recipients to a single repository::
[usersubs]
# key is subscriber email, value is a comma-separated list of repo patterns
user@host = pattern
[reposubs]
# key is repo pattern, value is a comma-separated list of subscriber emails
pattern = user@host
A ``pattern`` is a ``glob`` matching the absolute path to a repository,
optionally combined with a revset expression. A revset expression, if
present, is separated from the glob by a hash. Example::
[reposubs]
*/widgets#branch(release) = qa-team@example.com
This sends to ``qa-team@example.com`` whenever a changeset on the ``release``
branch triggers a notification in any repository ending in ``widgets``.
In order to place them under direct user management, ``[usersubs]`` and
``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
incorporated by reference::
[notify]
config = /path/to/subscriptionsfile
Notifications will not be sent until the ``notify.test`` value is set
to ``False``; see below.
Notifications content can be tweaked with the following configuration entries:
notify.test
If ``True``, print messages to stdout instead of sending them. Default: True.
notify.sources
Space-separated list of change sources. Notifications are activated only
when a changeset's source is in this list. Sources may be:
:``serve``: changesets received via http or ssh
:``pull``: changesets received via ``hg pull``
:``unbundle``: changesets received via ``hg unbundle``
:``push``: changesets sent or received via ``hg push``
:``bundle``: changesets sent via ``hg unbundle``
Default: serve.
notify.strip
Number of leading slashes to strip from url paths. By default, notifications
reference repositories with their absolute path. ``notify.strip`` lets you
turn them into relative paths. For example, ``notify.strip=3`` will change
``/long/path/repository`` into ``repository``. Default: 0.
notify.domain
Default email domain for sender or recipients with no explicit domain.
notify.style
Style file to use when formatting emails.
notify.template
Template to use when formatting emails.
notify.incoming
Template to use when run as an incoming hook, overriding ``notify.template``.
notify.outgoing
Template to use when run as an outgoing hook, overriding ``notify.template``.
notify.changegroup
Template to use when running as a changegroup hook, overriding
``notify.template``.
notify.maxdiff
Maximum number of diff lines to include in notification email. Set to 0
to disable the diff, or -1 to include all of it. Default: 300.
notify.maxsubject
Maximum number of characters in email's subject line. Default: 67.
notify.diffstat
Set to True to include a diffstat before diff content. Default: True.
notify.merge
If True, send notifications for merge changesets. Default: True.
notify.mbox
If set, append mails to this mbox file instead of sending. Default: None.
notify.fromauthor
If set, use the committer of the first changeset in a changegroup for
the "From" field of the notification mail. If not set, take the user
from the pushing repo. Default: False.
If set, the following entries will also be used to customize the
notifications:
email.from
Email ``From`` address to use if none can be found in the generated
email content.
web.baseurl
Root repository URL to combine with repository paths when making
references. See also ``notify.strip``.
'''
from mercurial.i18n import _
from mercurial import patch, cmdutil, templater, util, mail
import email.Parser, email.Errors, fnmatch, socket, time
testedwith = 'internal'
# template for single changeset can include email headers.
single_template = '''
Subject: changeset in {webroot}: {desc|firstline|strip}
From: {author}
changeset {node|short} in {root}
details: {baseurl}{webroot}?cmd=changeset;node={node|short}
description:
\t{desc|tabindent|strip}
'''.lstrip()
# template for multiple changesets should not contain email headers,
# because only first set of headers will be used and result will look
# strange.
multiple_template = '''
changeset {node|short} in {root}
details: {baseurl}{webroot}?cmd=changeset;node={node|short}
summary: {desc|firstline}
'''
deftemplates = {
'changegroup': multiple_template,
}
class notifier(object):
'''email notification class.'''
def __init__(self, ui, repo, hooktype):
self.ui = ui
cfg = self.ui.config('notify', 'config')
if cfg:
self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
self.repo = repo
self.stripcount = int(self.ui.config('notify', 'strip', 0))
self.root = self.strip(self.repo.root)
self.domain = self.ui.config('notify', 'domain')
self.mbox = self.ui.config('notify', 'mbox')
self.test = self.ui.configbool('notify', 'test', True)
self.charsets = mail._charsets(self.ui)
self.subs = self.subscribers()
self.merge = self.ui.configbool('notify', 'merge', True)
mapfile = self.ui.config('notify', 'style')
template = (self.ui.config('notify', hooktype) or
self.ui.config('notify', 'template'))
self.t = cmdutil.changeset_templater(self.ui, self.repo,
False, None, mapfile, False)
if not mapfile and not template:
template = deftemplates.get(hooktype) or single_template
if template:
template = templater.parsestring(template, quoted=False)
self.t.use_template(template)
def strip(self, path):
'''strip leading slashes from local path, turn into web-safe path.'''
path = util.pconvert(path)
count = self.stripcount
while count > 0:
c = path.find('/')
if c == -1:
break
path = path[c + 1:]
count -= 1
return path
def fixmail(self, addr):
'''try to clean up email addresses.'''
addr = util.email(addr.strip())
if self.domain:
a = addr.find('@localhost')
if a != -1:
addr = addr[:a]
if '@' not in addr:
return addr + '@' + self.domain
return addr
def subscribers(self):
'''return list of email addresses of subscribers to this repo.'''
subs = set()
for user, pats in self.ui.configitems('usersubs'):
for pat in pats.split(','):
if '#' in pat:
pat, revs = pat.split('#', 1)
else:
revs = None
if fnmatch.fnmatch(self.repo.root, pat.strip()):
subs.add((self.fixmail(user), revs))
for pat, users in self.ui.configitems('reposubs'):
if '#' in pat:
pat, revs = pat.split('#', 1)
else:
revs = None
if fnmatch.fnmatch(self.repo.root, pat):
for user in users.split(','):
subs.add((self.fixmail(user), revs))
return [(mail.addressencode(self.ui, s, self.charsets, self.test), r)
for s, r in sorted(subs)]
def node(self, ctx, **props):
'''format one changeset, unless it is a suppressed merge.'''
if not self.merge and len(ctx.parents()) > 1:
return False
self.t.show(ctx, changes=ctx.changeset(),
baseurl=self.ui.config('web', 'baseurl'),
root=self.repo.root, webroot=self.root, **props)
return True
def skipsource(self, source):
'''true if incoming changes from this source should be skipped.'''
ok_sources = self.ui.config('notify', 'sources', 'serve').split()
return source not in ok_sources
def send(self, ctx, count, data):
'''send message.'''
# Select subscribers by revset
subs = set()
for sub, spec in self.subs:
if spec is None:
subs.add(sub)
continue
revs = self.repo.revs('%r and %d:', spec, ctx.rev())
if len(revs):
subs.add(sub)
continue
if len(subs) == 0:
self.ui.debug('notify: no subscribers to selected repo '
'and revset\n')
return
p = email.Parser.Parser()
try:
msg = p.parsestr(data)
except email.Errors.MessageParseError, inst:
raise util.Abort(inst)
# store sender and subject
sender, subject = msg['From'], msg['Subject']
del msg['From'], msg['Subject']
if not msg.is_multipart():
# create fresh mime message from scratch
# (multipart templates must take care of this themselves)
headers = msg.items()
payload = msg.get_payload()
# for notification prefer readability over data precision
msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
# reinstate custom headers
for k, v in headers:
msg[k] = v
msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
# try to make subject line exist and be useful
if not subject:
if count > 1:
subject = _('%s: %d new changesets') % (self.root, count)
else:
s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
subject = '%s: %s' % (self.root, s)
maxsubject = int(self.ui.config('notify', 'maxsubject', 67))
if maxsubject:
subject = util.ellipsis(subject, maxsubject)
msg['Subject'] = mail.headencode(self.ui, subject,
self.charsets, self.test)
# try to make message have proper sender
if not sender:
sender = self.ui.config('email', 'from') or self.ui.username()
if '@' not in sender or '@localhost' in sender:
sender = self.fixmail(sender)
msg['From'] = mail.addressencode(self.ui, sender,
self.charsets, self.test)
msg['X-Hg-Notification'] = 'changeset %s' % ctx
if not msg['Message-Id']:
msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
(ctx, int(time.time()),
hash(self.repo.root), socket.getfqdn()))
msg['To'] = ', '.join(sorted(subs))
msgtext = msg.as_string()
if self.test:
self.ui.write(msgtext)
if not msgtext.endswith('\n'):
self.ui.write('\n')
else:
self.ui.status(_('notify: sending %d subscribers %d changes\n') %
(len(subs), count))
mail.sendmail(self.ui, util.email(msg['From']),
subs, msgtext, mbox=self.mbox)
def diff(self, ctx, ref=None):
maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
prev = ctx.p1().node()
ref = ref and ref.node() or ctx.node()
chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui))
difflines = ''.join(chunks).splitlines()
if self.ui.configbool('notify', 'diffstat', True):
s = patch.diffstat(difflines)
# s may be nil, don't include the header if it is
if s:
self.ui.write('\ndiffstat:\n\n%s' % s)
if maxdiff == 0:
return
elif maxdiff > 0 and len(difflines) > maxdiff:
msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
self.ui.write(msg % (len(difflines), maxdiff))
difflines = difflines[:maxdiff]
elif difflines:
self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
self.ui.write("\n".join(difflines))
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
'''send email notifications to interested subscribers.
if used as changegroup hook, send one email for all changesets in
changegroup. else send one email per changeset.'''
n = notifier(ui, repo, hooktype)
ctx = repo[node]
if not n.subs:
ui.debug('notify: no subscribers to repository %s\n' % n.root)
return
if n.skipsource(source):
ui.debug('notify: changes have source "%s" - skipping\n' % source)
return
ui.pushbuffer()
data = ''
count = 0
author = ''
if hooktype == 'changegroup' or hooktype == 'outgoing':
start, end = ctx.rev(), len(repo)
for rev in xrange(start, end):
if n.node(repo[rev]):
count += 1
if not author:
author = repo[rev].user()
else:
data += ui.popbuffer()
ui.note(_('notify: suppressing notification for merge %d:%s\n')
% (rev, repo[rev].hex()[:12]))
ui.pushbuffer()
if count:
n.diff(ctx, repo['tip'])
else:
if not n.node(ctx):
ui.popbuffer()
ui.note(_('notify: suppressing notification for merge %d:%s\n') %
(ctx.rev(), ctx.hex()[:12]))
return
count += 1
n.diff(ctx)
data += ui.popbuffer()
fromauthor = ui.config('notify', 'fromauthor')
if author and fromauthor:
data = '\n'.join(['From: %s' % author, data])
if count:
n.send(ctx, count, data)
|
|
import suggestive.widget as widget
import suggestive.mstat as mstat
import suggestive.signals as signals
from suggestive.error import CommandError
from suggestive.mvc.base import View, Model, Controller, TrackModel
from suggestive.buffer import Buffer
from mpd import CommandError as MpdCommandError
from suggestive.action import lastfm_love_track
import urwid
from math import floor, log10
import logging
logger = logging.getLogger('suggestive.playlist')
logger.addHandler(logging.NullHandler())
######################################################################
# Models
######################################################################
class PlaylistModel(Model):
def __init__(self):
super(PlaylistModel, self).__init__()
self._tracks = []
self._mpd_playlist = []
self._playlist_tracks = {}
self._now_playing = None
def __repr__(self):
return '<PlaylistModel>'
@property
def tracks(self):
return self._tracks
@tracks.setter
def tracks(self, newtracks):
self._tracks = newtracks
self.update_playlist_tracks()
self.update()
@property
def now_playing(self):
return self._now_playing
@now_playing.setter
def now_playing(self, value):
self._now_playing = value
@property
def mpd_playlist(self):
return self._mpd_playlist
@mpd_playlist.setter
def mpd_playlist(self, tracks):
self._mpd_playlist = tracks
self.update_playlist_tracks()
def track_ids(self, tracks):
return [track.db_track.id for track in tracks]
def update_playlist_tracks(self):
self._playlist_tracks = {
item['id']: track
for item, track in zip(self.mpd_playlist, self.tracks)
}
@property
def playlist_tracks(self):
return self._playlist_tracks
######################################################################
# Controllers
######################################################################
class PlaylistController(Controller):
def __init__(self, model, conf, loop):
super(PlaylistController, self).__init__(model, conf, loop)
self._conf = conf
# Connections
self._mpd = mstat.initialize_mpd(conf)
# Initialize
self.update_model()
@property
def playlist_size(self):
return len(self.model.tracks)
# Signal handler
@mstat.mpd_retry
def play_track(self, view):
logger.info('Play playlist track: {}'.format(view.canonical_text))
self._mpd.play(view.model.number)
@mstat.mpd_retry
def delete_track(self, view):
logger.info('Delete playlist track: {}'.format(view.canonical_text))
self._mpd.delete(view.model.number)
self.update_model()
# Signal handler
def love_track(self, view):
logger.info('Toggle loved for playlist track: {}'.format(
view.canonical_text))
db_track = view.model.db_track
if not db_track.id:
logger.error('Can not mark invalid track loved')
return
loved = db_track.lastfm_info.loved if db_track.lastfm_info else False
self.async_run(lastfm_love_track, self.conf, db_track, not loved)
mstat.db_track_love(self.conf, db_track, loved=not loved)
new_track = mstat.get_db_track(self.conf, db_track.id)
view.model.db_track = new_track
# Update expanded track model
lib_ctrl = self.controller_for('library')
track_model = lib_ctrl.model.track_model_for(db_track)
if track_model:
track_model.db_track = new_track
@mstat.mpd_retry
def clear(self):
self._mpd.stop()
self._mpd.clear()
self.update_model()
@mstat.mpd_retry
def load_playlist(self, name):
self._mpd.stop()
self._mpd.clear()
self._mpd.load(name)
@mstat.mpd_retry
def save_playlist(self, name):
try:
self._mpd.rm(name)
except Exception:
pass
self._mpd.save(name)
@mstat.mpd_retry
def mpd_playlist(self):
return self._mpd.playlistinfo()
@mstat.mpd_retry
def now_playing(self):
current = self._mpd.currentsong()
if current and 'pos' in current:
return int(current['pos'])
else:
return None
def playlist_tracks(self, playlist, positions):
logger.debug('Get playlist tracks from db')
db_tracks = mstat.database_tracks_from_mpd(self.conf, playlist)
logger.debug('Create models')
return [
TrackModel(db_track, position)
for db_track, position in zip(db_tracks, positions)
]
def track_models(self, playlist, current_tracks):
"""
Construct a list of TrackModels from the current playlist and the
current list of track models
"""
new_tracks = [None] * len(playlist)
missing = []
for position, item in enumerate(playlist):
if item['id'] in current_tracks:
track = TrackModel(
current_tracks[item['id']].db_track,
position)
new_tracks[position] = track
else:
missing.append((position, item))
if missing:
positions, missing_playlist = zip(*missing)
for track in self.playlist_tracks(missing_playlist, positions):
new_tracks[track.number] = track
assert None not in new_tracks
logger.debug('Track models: {}'.format([t.name for t in new_tracks]))
return new_tracks
def update_model(self):
logger.debug('Begin playlist model update')
playlist = self.mpd_playlist()
now_playing = self.now_playing()
if (playlist == self.model.mpd_playlist and
now_playing == self.model.now_playing):
logger.debug('No playlist changes; aborting')
return
logger.debug('Get current tracks')
current_tracks = self.model.playlist_tracks
logger.debug('Set model playlist')
# Update the playlist immediately so that extraneous attempts to update
# the playlist will be ignored
self.model.mpd_playlist = playlist
self.model.now_playing = now_playing
logger.debug('Get track models')
models = self.track_models(playlist, current_tracks)
logger.debug('Set track models')
self.model.tracks = models
logger.debug('Finished playlist model update')
def track_model_for(self, db_track):
return next(
(track for track in self.model.tracks if
track.db_track.id == db_track.id),
None
)
@mstat.mpd_retry
def seek(self, position):
try:
self._mpd.seekcur(str(position))
except MpdCommandError as ex:
logger.error('Could not seek to {}; {}'.format(position, ex))
@mstat.mpd_retry
def next_track(self, view):
self._mpd.next()
@mstat.mpd_retry
def previous_track(self, view):
self._mpd.previous()
######################################################################
# Views
######################################################################
@widget.signal_map({
'd': signals.DELETE,
'enter': signals.PLAY,
'm': signals.MOVE,
'L': signals.LOVE,
})
class TrackView(urwid.WidgetWrap, View, widget.Searchable):
__metaclass__ = urwid.signals.MetaSignals
signals = [signals.PLAY, signals.DELETE, signals.MOVE, signals.LOVE]
TRACK_FORMAT = '{artist} - {album} - {title}{suffix}'
def __init__(self,
model,
controller,
conf,
playing=False,
show_bumper=False,
focused=False):
View.__init__(self, model)
self._controller = controller
self._show_bumper = show_bumper
self.content = model.db_track
self._icon = urwid.SelectableIcon(self.text)
styles = self.styles(playing, focused)
super(TrackView, self).__init__(
urwid.AttrMap(self._icon, *styles))
def styles(self, playing, focused):
if focused and playing:
return ('focus playing',)
elif focused:
return ('focus playlist',)
elif playing:
return ('playing', 'focus playing')
else:
return ('playlist', 'focus playlist')
@property
def controller(self):
return self._controller
def add_bumper(self, text):
size = self.controller.playlist_size
digits = (floor(log10(size)) + 1) if size else 0
bumper = str(self.model.number)
return [
('bumper', bumper.ljust(digits + 1, ' ')),
text
]
@property
def text(self):
model = self.model
if model.loved:
suffix = ' [L]'
else:
suffix = ''
text = self.TRACK_FORMAT.format(
artist=model.db_artist.name,
album=model.db_album.name,
title=model.name,
suffix=suffix)
if self._show_bumper:
return self.add_bumper(text)
else:
return text
@property
def canonical_text(self):
model = self.model
return self.TRACK_FORMAT.format(
artist=model.db_artist.name,
album=model.db_album.name,
title=model.name,
suffix='')
@property
def searchable_text(self):
return self.canonical_text
def update(self):
self._w.original_widget.set_text(self.text)
@widget.signal_map({
'>': signals.NEXT_TRACK,
'<': signals.PREVIOUS_TRACK,
})
class PlaylistView(widget.SuggestiveListBox, View):
__metaclass__ = urwid.signals.MetaSignals
signals = [signals.NEXT_TRACK, signals.PREVIOUS_TRACK]
def __init__(self, model, controller, conf):
View.__init__(self, model)
self._controller = controller
self._conf = conf
walker = self.create_walker()
super(PlaylistView, self).__init__(walker)
@property
def controller(self):
return self._controller
def update(self, show_bumper=False):
logger.debug('Updating PlaylistView')
previous_position = self.focus_position
walker = self.body
walker[:] = self.track_views(show_bumper=show_bumper)
self.focus_remembered_position(previous_position)
def focus_remembered_position(self, position):
if len(self.body) == 0:
return
try:
# Try to focus on the same position in the playlist before we
# updated
self.set_focus(position)
except IndexError:
# If that failed, the playlist probably shrunk due to a deletion,
# and we were on the last position before the delete. Therefore,
# we should be able to focus on the position before the last
try:
self.set_focus(position - 1)
except IndexError:
# There are no tracks left; don't bother setting focus
pass
def track_views(self, show_bumper=False):
current = self.controller.now_playing()
focus = self.focus_position if show_bumper else None
if not self.model.tracks:
body = [urwid.AttrMap(urwid.Text('Playlist is empty'), 'track')]
else:
body = []
for track_m in self.model.tracks:
view = TrackView(
track_m,
self.controller,
self._conf,
playing=(track_m.number == current),
show_bumper=show_bumper,
focused=(track_m.number == focus))
urwid.connect_signal(
view,
signals.PLAY,
self.controller.play_track)
urwid.connect_signal(
view,
signals.DELETE,
self.controller.delete_track)
urwid.connect_signal(
view,
signals.LOVE,
self.controller.love_track)
body.append(view)
return body
def create_walker(self):
body = self.track_views()
return urwid.SimpleFocusListWalker(body)
def move_update_index(self, current, index):
try:
items = self.body
n_items = len(items)
if index >= n_items:
raise IndexError
logger.debug('Temporary move from {} to {}'.format(
current, index))
if index > current:
focus = items[current]
items.insert(index + 1, focus)
items.pop(current)
elif index < current:
focus = items.pop(current)
items.insert(index, focus)
except IndexError:
logger.error('Index out of range')
def keypress(self, size, key):
if key == 'c':
self.controller.clear()
super(PlaylistView, self).keypress(size, None)
return True
return super(PlaylistView, self).keypress(size, key)
######################################################################
# Buffer
######################################################################
class PlaylistBuffer(Buffer):
ITEM_FORMAT = '{artist} - {album} - {title}{suffix}'
def __init__(self, conf, loop):
self.conf = conf
self.model = PlaylistModel()
self.controller = PlaylistController(self.model, conf, loop)
self.view = PlaylistView(self.model, self.controller, conf)
urwid.connect_signal(self.view, signals.NEXT_TRACK,
self.controller.next_track)
urwid.connect_signal(self.view, signals.PREVIOUS_TRACK,
self.controller.previous_track)
self.current_track = None
self.status_format = conf.playlist.status_format
super(PlaylistBuffer, self).__init__(self.view)
self.update_status('Playlist')
def setup_bindings(self):
keybinds = super(PlaylistBuffer, self).setup_bindings()
keybinds.update({
'm': self.move_track,
})
return keybinds
def search(self, searcher):
self.view.search(searcher)
def next_search(self):
self.view.next_search_item()
def will_accept_focus(self):
return len(self.model.tracks) > 0
def move_track(self):
logger.debug('Start playlist move')
self.view.update(show_bumper=True)
self.move_prompt = widget.PlaylistMovePrompt(
self.view.focus_position)
urwid.connect_signal(
self.move_prompt,
signals.PROMPT_DONE,
self.complete_move)
urwid.connect_signal(
self.move_prompt,
signals.UPDATE_INDEX,
self.view.move_update_index)
self.update_footer(urwid.AttrMap(self.move_prompt, 'footer'))
self.update_focus('footer')
def complete_move(self, value, current_position):
urwid.disconnect_signal(
self,
self.move_prompt,
signals.PROMPT_DONE,
self.complete_move)
urwid.disconnect_signal(
self,
self.move_prompt,
signals.UPDATE_INDEX,
self.view.move_update_index)
self.update_focus('body')
try:
new_index = int(value)
logger.debug('Moving playlist track from {} to {}'.format(
current_position, new_index))
mpd = mstat.initialize_mpd(self.conf)
mpd.move(current_position, new_index)
self.view.focus_position = new_index
except (TypeError, ValueError):
logger.error('Invalid move index: {}'.format(value))
self.view.update()
self.update()
def now_playing_index(self, mpd):
current = mpd.currentsong()
if current and 'pos' in current:
return int(current['pos'])
else:
return None
def track_changed(self):
mpd = mstat.initialize_mpd(self.conf)
return self.current_track != self.now_playing_index(mpd)
def update(self, *args):
self.controller.update_model()
def update_playing_status(self):
self.update_status(self.status_text())
def status_params(self, status, track):
elapsed_time = int(status.get('time', '0').split(':')[0])
total_time = int(track.get('time', '0').split(':')[0])
elapsed_min, elapsed_sec = elapsed_time // 60, elapsed_time % 60
total_min, total_sec = total_time // 60, total_time % 60
state = status['state']
if state == 'play':
state = 'Now Playing'
return {
'status': state[0].upper() + state[1:],
'time_elapsed': '{}:{}'.format(
elapsed_min,
str(elapsed_sec).rjust(2, '0')
),
'time_total': '{}:{}'.format(
total_min,
str(total_sec).rjust(2, '0')
),
'title': track.get('title', 'Unknown Track'),
'artist': track.get('artist', 'Unknown Artist'),
'album_artist': track.get('album_artist', 'Unknown Artist'),
'album': track.get('album', 'Unknown Album'),
'filename': track['file'],
'track': track.get('track', 'Unknown'),
'date': track.get('date', 'Unknown'),
}
def status_text(self):
mpd = mstat.initialize_mpd(self.conf)
status = mpd.status()
text = ''
songid = status.get('songid')
if songid:
track = mpd.playlistid(songid)
if track:
params = self.status_params(status, track[0])
text = self.status_format.format(**params)
return 'Playlist | ' + text
return 'Playlist'
def clear_mpd_playlist(self):
self.controller.clear()
def load_playlist(self, name=None):
if name is None:
raise CommandError('Missing parameter: name')
try:
self.controller.load_playlist(name)
self.update_footer('Loaded playlist {}'.format(name))
return True
except MpdCommandError as ex:
logger.debug(ex)
raise CommandError("Unable to load playlist '{}'".format(
name))
def save_playlist(self, name=None):
if name is None:
raise CommandError('Missing parameter: name')
try:
self.controller.save_playlist(name)
self.update_footer('Saved playlist {}'.format(name))
return True
except MpdCommandError as ex:
logger.debug(ex)
raise CommandError("Unable to save playlist '{}'".format(
name))
def seek(self, position=None):
if position is None:
return
self.controller.seek(position)
|
|
"""
sentry.web.helpers
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.conf import settings
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponse
from django.template import loader, RequestContext, Context
from django.utils.safestring import mark_safe
from sentry import options
from sentry.constants import EVENTS_PER_PAGE
from sentry.models import Project, Team, ProjectOption
logger = logging.getLogger('sentry.errors')
def group_is_public(group, user):
"""
Return ``True`` if the this group if the user viewing it should see a restricted view.
This check should be used in combination with project membership checks, as we're only
verifying if the user should have a restricted view of something they already have access
to.
"""
# if the group isn't public, this check doesn't matter
if not group.is_public:
return False
# anonymous users always are viewing as if it were public
if not user.is_authenticated():
return True
# superusers can always view events
if user.is_superuser:
return False
# project owners can view events
if group.project in Project.objects.get_for_user(team=group.project.team, user=user):
return False
return True
_LOGIN_URL = None
def get_login_url(reset=False):
global _LOGIN_URL
if _LOGIN_URL is None or reset:
# if LOGIN_URL resolves force login_required to it instead of our own
# XXX: this must be done as late as possible to avoid idempotent requirements
try:
resolve(settings.LOGIN_URL)
except Exception:
_LOGIN_URL = settings.SENTRY_LOGIN_URL
else:
_LOGIN_URL = settings.LOGIN_URL
if _LOGIN_URL is None:
_LOGIN_URL = reverse('sentry-login')
return _LOGIN_URL
def get_default_context(request, existing_context=None, team=None):
from sentry.plugins import plugins
context = {
'EVENTS_PER_PAGE': EVENTS_PER_PAGE,
'URL_PREFIX': settings.SENTRY_URL_PREFIX,
'PLUGINS': plugins,
'ALLOWED_HOSTS': settings.ALLOWED_HOSTS,
'SENTRY_RAVEN_JS_URL': settings.SENTRY_RAVEN_JS_URL,
}
if request:
if existing_context and not team and 'team' in existing_context:
team = existing_context['team']
if team:
context['organization'] = team.organization
context.update({
'request': request,
})
if (not existing_context or 'TEAM_LIST' not in existing_context) and team:
context['TEAM_LIST'] = Team.objects.get_for_user(
organization=team.organization,
user=request.user,
with_projects=True,
)
return context
def render_to_string(template, context=None, request=None):
# HACK: set team session value for dashboard redirect
if context and 'team' in context and isinstance(context['team'], Team):
team = context['team']
else:
team = None
default_context = get_default_context(request, context, team=team)
if context is None:
context = default_context
else:
context = dict(context)
context.update(default_context)
if request:
context = RequestContext(request, context)
else:
context = Context(context)
return loader.render_to_string(template, context)
def render_to_response(template, context=None, request=None, status=200):
response = HttpResponse(render_to_string(template, context, request))
response.status_code = status
return response
def plugin_config(plugin, project, request):
"""
Configure the plugin site wide.
Returns a tuple composed of a redirection boolean and the content to
be displayed.
"""
NOTSET = object()
plugin_key = plugin.get_conf_key()
if project:
form_class = plugin.project_conf_form
template = plugin.project_conf_template
else:
form_class = plugin.site_conf_form
template = plugin.site_conf_template
test_results = None
initials = plugin.get_form_initial(project)
for field in form_class.base_fields:
key = '%s:%s' % (plugin_key, field)
if project:
value = ProjectOption.objects.get_value(project, key, NOTSET)
else:
value = options.get(key)
if value is not NOTSET:
initials[field] = value
form = form_class(
request.POST or None,
initial=initials,
prefix=plugin_key
)
if form.is_valid():
if 'action_test' in request.POST and plugin.is_testable():
try:
test_results = plugin.test_configuration(project)
except Exception as exc:
if hasattr(exc, 'read') and callable(exc.read):
test_results = '%s\n%s' % (exc, exc.read())
else:
test_results = exc
if test_results is None:
test_results = 'No errors returned'
else:
for field, value in form.cleaned_data.iteritems():
key = '%s:%s' % (plugin_key, field)
if project:
ProjectOption.objects.set_value(project, key, value)
else:
options.set(key, value)
return ('redirect', None)
# TODO(mattrobenolt): Reliably determine if a plugin is configured
# if hasattr(plugin, 'is_configured'):
# is_configured = plugin.is_configured(project)
# else:
# is_configured = True
is_configured = True
from django.template.loader import render_to_string
return ('display', mark_safe(render_to_string(template, {
'form': form,
'request': request,
'plugin': plugin,
'plugin_description': plugin.get_description() or '',
'plugin_test_results': test_results,
'plugin_is_configured': is_configured,
}, context_instance=RequestContext(request))))
def get_raven_js_url():
return settings.SENTRY_RAVEN_JS_URL
|
|
"""Implementation of restricted boltzmann machine
You need to be able to deal with different energy functions
This allows you to deal with real valued units.
TODO: monitor overfitting
"""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
from common import *
EXPENSIVE_CHECKS_ON = False
# TODO: different learning rates for weights and biases
# TODO: nesterov method for momentum
# TODO: rmsprop
"""
Represents a RBM
"""
class RBM(object):
def __init__(self, nrVisible, nrHidden, trainingFunction, dropout,
visibleDropout, activationFun=sigmoid):
# dropout = 1 means no dropout, keep all the weights
self.dropout = dropout
# dropout = 1 means no dropout, keep all the weights
self.visibleDropout = visibleDropout
self.nrHidden = nrHidden
self.nrVisible = nrVisible
self.trainingFunction = trainingFunction
self.activationFun = activationFun
self.initialized = False
def train(self, data):
# If the network has not been initialized yet, do it now
# Ie if this is the time it is traning batch of traning
if not self.initialized:
self.weights = self.initializeWeights(self.nrVisible, self.nrHidden)
self.biases = self.intializeBiases(data, self.nrHidden)
# self.data = data
# else:
# self.data = np.concatenate(self.data, data)
self.biases, self.weights = self.trainingFunction(data,
self.biases,
self.weights,
self.activationFun,
self.dropout,
self.visibleDropout)
self.testWeights = self.weights * self.dropout
assert self.weights.shape == (self.nrVisible, self.nrHidden)
assert self.biases[0].shape[0] == self.nrVisible
assert self.biases[1].shape[0] == self.nrHidden
""" Reconstructs the data given using this boltzmann machine."""
def reconstruct(self, dataInstances):
return reconstruct(self.biases, self.testWeights, dataInstances,
self.activationFun)
def hiddenRepresentation(self, dataInstances):
return updateLayer(Layer.HIDDEN, dataInstances, self.biases,
self.testWeights, self.activationFun, True)
@classmethod
def initializeWeights(cls, nrVisible, nrHidden):
return np.random.normal(0, 0.01, (nrVisible, nrHidden))
@classmethod
def intializeBiases(cls, data, nrHidden):
# get the procentage of data points that have the i'th unit on
# and set the visible vias to log (p/(1-p))
percentages = data.mean(axis=0, dtype='float')
vectorized = np.vectorize(safeLogFraction, otypes=[np.float])
visibleBiases = vectorized(percentages)
hiddenBiases = np.zeros(nrHidden)
return np.array([visibleBiases, hiddenBiases])
def reconstruct(biases, weights, dataInstances, activationFun):
hidden = updateLayer(Layer.HIDDEN, dataInstances, biases, weights,
activationFun, True)
visibleReconstructions = updateLayer(Layer.VISIBLE, hidden,
biases, weights, activationFun, False)
return visibleReconstructions
def reconstructionError(biases, weights, data, activationFun):
# Returns the rmse of the reconstruction of the data
# Good to keep track of it, should decrease trough training
# Initially faster, and then slower
reconstructions = reconstruct(biases, weights, data, activationFun)
return rmse(reconstructions, data)
""" Training functions."""
""" Full CD function.
Arguments:
data: the data to use for traning.
A numpy ndarray.
biases:
Returns:
Defaults the mini batch size 1, so normal learning
"""
# Think of removing the step method all together and keep one to just
# optimize the code but also make it easier to change them
# rather than have a function that you pass in for every batch
# if nice and easy refactoring can be seen then you can do that
def contrastiveDivergence(data, biases, weights, activationFun, dropout,
visibleDropout, miniBatchSize=10):
N = len(data)
epochs = N / miniBatchSize
# sample the probabily distributions allow you to chose from the
# visible units for dropout
on = sample(visibleDropout, data.shape)
dropoutData = data * on
epsilon = 0.01
decayFactor = 0.0002
weightDecay = True
reconstructionStep = 50
oldDeltaWeights = np.zeros(weights.shape)
oldDeltaVisible = np.zeros(biases[0].shape)
oldDeltaHidden = np.zeros(biases[1].shape)
batchLearningRate = epsilon / miniBatchSize
print "batchLearningRate"
print batchLearningRate
for epoch in xrange(epochs):
batchData = dropoutData[epoch * miniBatchSize: (epoch + 1) * miniBatchSize, :]
if epoch < epochs / 100:
momentum = 0.5
else:
momentum = 0.95
if epoch < (N/7) * 10:
cdSteps = 3
elif epoch < (N/9) * 10:
cdSteps = 5
else:
cdSteps = 10
if EXPENSIVE_CHECKS_ON:
if epoch % reconstructionStep == 0:
print "reconstructionError"
print reconstructionError(biases, weights, data, activationFun)
weightsDiff, visibleBiasDiff, hiddenBiasDiff =\
modelAndDataSampleDiffs(batchData, biases, weights,
activationFun, dropout, cdSteps)
# Update the weights
# data - model
# Positive phase - negative
# Weight decay factor
deltaWeights = (batchLearningRate * weightsDiff
- epsilon * weightDecay * decayFactor * weights)
deltaVisible = batchLearningRate * visibleBiasDiff
deltaHidden = batchLearningRate * hiddenBiasDiff
deltaWeights += momentum * oldDeltaWeights
deltaVisible += momentum * oldDeltaVisible
deltaHidden += momentum * oldDeltaHidden
oldDeltaWeights = deltaWeights
oldDeltaVisible = deltaVisible
oldDeltaHidden = deltaHidden
# Update the weighths
weights += deltaWeights
# Update the visible biases
biases[0] += deltaVisible
# Update the hidden biases
biases[1] += deltaHidden
print reconstructionError(biases, weights, data, activationFun)
return biases, weights
def modelAndDataSampleDiffs(batchData, biases, weights, activationFun,
dropout, cdSteps):
# Reconstruct the hidden weigs from the data
hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
binary=True)
# Chose the units to be active at this point
# different sets for each element in the mini batches
on = sample(dropout, hidden.shape)
dropoutHidden = on * hidden
hiddenReconstruction = dropoutHidden
for i in xrange(cdSteps - 1):
visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
biases, weights, activationFun,
binary=False)
hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
biases, weights, activationFun,
binary=True)
# sample the hidden units active (for dropout)
hiddenReconstruction = hiddenReconstruction * on
# Do the last reconstruction from the probabilities in the last phase
visibleReconstruction = updateLayer(Layer.VISIBLE, hiddenReconstruction,
biases, weights, activationFun,
binary=False)
hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
biases, weights, activationFun,
binary=False)
hiddenReconstruction = hiddenReconstruction * on
# here it should be hidden * on - hiddenreconstruction
# also below in the hidden bias
weightsDiff = np.dot(batchData.T, dropoutHidden) -\
np.dot(visibleReconstruction.T, hiddenReconstruction)
assert weightsDiff.shape == weights.shape
visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
assert visibleBiasDiff.shape == biases[0].shape
hiddenBiasDiff = np.sum(dropoutHidden - hiddenReconstruction, axis=0)
assert hiddenBiasDiff.shape == biases[1].shape
return weightsDiff, visibleBiasDiff, hiddenBiasDiff
""" Updates an entire layer. This procedure can be used both in training
and in testing.
Can even take multiple values of the layer, each of them given as rows
Uses matrix operations.
"""
def updateLayer(layer, otherLayerValues, biases, weights, activationFun,
binary=False):
bias = biases[layer]
size = otherLayerValues.shape[0]
if layer == Layer.VISIBLE:
activation = np.dot(otherLayerValues, weights.T)
else:
activation = np.dot(otherLayerValues, weights)
probs = activationFun(np.tile(bias, (size, 1)) + activation)
if binary:
# Sample from the distributions
return sampleAll(probs)
return probs
# Another training algorithm. Slower than Contrastive divergence, but
# gives better results. Not used in practice as it is too slow.
# This is what Hinton said but it is not OK due to NIPS paper
# This is huge code copy paste but keep it like this for now
def PCD(data, biases, weights, activationFun, dropout,
visibleDropout, miniBatchSize=10):
N = len(data)
epochs = N / miniBatchSize
# sample the probabily distributions allow you to chose from the
# visible units for dropout
# on = sample(visibleDropout, data.shape)
# dropoutData = data * on
dropoutData = data
epsilon = 0.01
decayFactor = 0.0002
weightDecay = True
reconstructionStep = 50
oldDeltaWeights = np.zeros(weights.shape)
oldDeltaVisible = np.zeros(biases[0].shape)
oldDeltaHidden = np.zeros(biases[1].shape)
batchLearningRate = epsilon / miniBatchSize
print "batchLearningRate"
print batchLearningRate
# make this an argument or something
nrFantasyParticles = miniBatchSize
fantVisible = np.random.randint(2, size=(nrFantasyParticles, weights.shape[0]))
fantHidden = np.random.randint(2, size=(nrFantasyParticles, weights.shape[1]))
fantasyParticles = (fantVisible, fantHidden)
steps = 10
for epoch in xrange(epochs):
batchData = dropoutData[epoch * miniBatchSize: (epoch + 1) * miniBatchSize, :]
if epoch < epochs / 100:
momentum = 0.5
else:
momentum = 0.95
if EXPENSIVE_CHECKS_ON:
if epoch % reconstructionStep == 0:
print "reconstructionError"
print reconstructionError(biases, weights, data, activationFun)
print fantasyParticles[0]
print fantasyParticles[1]
weightsDiff, visibleBiasDiff, hiddenBiasDiff, fantasyParticles =\
modelAndDataSampleDiffsPCD(batchData, biases, weights,
activationFun, dropout, steps, fantasyParticles)
# Update the weights
# data - model
# Positive phase - negative
# Weight decay factor
deltaWeights = (batchLearningRate * weightsDiff
- epsilon * weightDecay * decayFactor * weights)
deltaVisible = batchLearningRate * visibleBiasDiff
deltaHidden = batchLearningRate * hiddenBiasDiff
deltaWeights += momentum * oldDeltaWeights
deltaVisible += momentum * oldDeltaVisible
deltaHidden += momentum * oldDeltaHidden
oldDeltaWeights = deltaWeights
oldDeltaVisible = deltaVisible
oldDeltaHidden = deltaHidden
# Update the weighths
weights += deltaWeights
# Update the visible biases
biases[0] += deltaVisible
# Update the hidden biases
biases[1] += deltaHidden
print reconstructionError(biases, weights, data, activationFun)
return biases, weights
# Same modelAndDataSampleDiff but for persistent contrastive divergence
# First run it without dropout
def modelAndDataSampleDiffsPCD(batchData, biases, weights, activationFun,
dropout, steps, fantasyParticles):
# Reconstruct the hidden weigs from the data
hidden = updateLayer(Layer.HIDDEN, batchData, biases, weights, activationFun,
binary=True)
# Chose the units to be active at this point
# different sets for each element in the mini batches
# on = sample(dropout, hidden.shape)
# dropoutHidden = on * hidden
# hiddenReconstruction = dropoutHidden
for i in xrange(steps):
visibleReconstruction = updateLayer(Layer.VISIBLE, fantasyParticles[1],
biases, weights, activationFun,
binary=False)
hiddenReconstruction = updateLayer(Layer.HIDDEN, visibleReconstruction,
biases, weights, activationFun,
binary=True)
# sample the hidden units active (for dropout)
# hiddenReconstruction = hiddenReconstruction * on
fantasyParticles = (visibleReconstruction, hiddenReconstruction)
# here it should be hidden * on - hiddenReconstruction
# also below in the hidden bias
weightsDiff = np.dot(batchData.T, hidden) -\
np.dot(visibleReconstruction.T, hiddenReconstruction)
assert weightsDiff.shape == weights.shape
visibleBiasDiff = np.sum(batchData - visibleReconstruction, axis=0)
assert visibleBiasDiff.shape == biases[0].shape
hiddenBiasDiff = np.sum(hidden - hiddenReconstruction, axis=0)
assert hiddenBiasDiff.shape == biases[1].shape
return weightsDiff, visibleBiasDiff, hiddenBiasDiff, fantasyParticles
|
|
from ..external.qt import (QDialog, QWidget,
QHBoxLayout, QVBoxLayout, QGridLayout,
QGroupBox, QLabel,
QCheckBox, QLineEdit,
QDialogButtonBox,
QIntValidator, QDoubleValidator,
QTimer,
app)
import argparse as ap
import sys
class ArgparseWindow(QDialog):
"""This dialog takes an argparser as initialiser and returns an args
object just like argparse does."""
action_widgets = {}
"""A dictionary mapping an argparse action object to a tuple of
1. either None or a QCheckBox, that can toggle the switch
2. a QLineEdit or QCheckbox, that can set the value of the switch
"""
taken_dests = set()
"""What "dest" values are already taken."""
arguments = {}
"""What the commandline looked like the last time it was updated."""
_last_changed_obj = None
_red_background_timer = None
_red_background_item = None
def __init__(self, argparser, arguments=None, columns=3, **kwargs):
super(ArgparseWindow, self).__init__(**kwargs)
self.argp = argparser
if arguments:
self.arguments = arguments
else:
self.arguments = {}
self._red_background_timer = QTimer(self)
self.action_widgets = {}
self.taken_dests = set()
self.columns = columns
self.setup_ui()
def _widget_with_checkbox(self, widget, action):
cont = QWidget(parent=self)
box = QCheckBox(action.dest, parent=self)
box.setObjectName("%s_active" % action.dest)
widget.setObjectName("%s_widget" % action.dest)
def set_last_changed_obj(*a):
self._last_changed_obj = box
box.toggled.connect(set_last_changed_obj)
box.toggled.connect(widget.setEnabled)
box.toggled.connect(self.update_cmdline)
widget.setEnabled(False)
outer = QVBoxLayout()
layout = QHBoxLayout()
layout.addWidget(box)
layout.addWidget(widget)
outer.addLayout(layout)
label = QLabel(action.help)
label.setWordWrap(True)
outer.addWidget(label)
cont.setLayout(outer)
return cont, box
def build_action_widget(self, action):
if isinstance(action, (ap._StoreTrueAction, ap._StoreFalseAction)):
w = QWidget(parent=self)
cont, box = self._widget_with_checkbox(w, action)
if action.dest in self.arguments:
box.setChecked(self.arguments[action.dest])
else:
box.setChecked(action.default)
elif isinstance(action, ap._StoreAction):
w = QLineEdit()
if action.type == int or action.type == long:
w.setValidator(QIntValidator(w))
elif action.type == float:
w.setValidator(QDoubleValidator(w))
def set_last_changed_obj(*a):
self._last_changed_obj = w
if action.dest in self.arguments:
w.setText(unicode(self.arguments[action.dest]))
if action.default:
w.setText(unicode(action.default))
cont, box = self._widget_with_checkbox(w, action)
w.textChanged.connect(set_last_changed_obj)
w.textChanged.connect(self.update_cmdline)
elif isinstance(action, ap._HelpAction):
return None
else:
print "error"
print "could not build a widget for ", action
return None
self.action_widgets[action] = (box, w)
self.taken_dests.update([action.dest])
return cont
def build_action_group(self, ag):
w = QGroupBox(ag.title)
widgets = []
for action in ag._actions:
if action in self.action_widgets or action.dest in self.taken_dests:
continue
widget = self.build_action_widget(action)
if widget:
widgets.append(widget)
layout = QGridLayout()
for index, widget in enumerate(widgets):
layout.addWidget(widget, index / self.columns, index % self.columns)
w.setLayout(layout)
if not widgets:
w.deleteLater()
return None
return w
def setup_ui(self):
layout = QVBoxLayout()
self.cmdline = QLineEdit()
# XXX this could 'easily' be set to false with appropriate calls to
# self.argp.parse_args etc.
self.cmdline.setReadOnly(True)
layout.addWidget(self.cmdline)
for group in self.argp._action_groups:
group = self.build_action_group(group)
if group:
layout.addWidget(group)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.try_accept)
buttonbox.rejected.connect(self.reject)
layout.addWidget(buttonbox)
self.setLayout(layout)
def update_cmdline(self):
arguments = []
for action, (box, widget) in self.action_widgets.iteritems():
checked = box.isChecked()
if isinstance(action, ap._StoreFalseAction):
active = not checked
else:
active = checked
if active:
if isinstance(widget, QLineEdit):
arguments.extend([action.option_strings[-1], widget.text()])
else:
arguments.extend([action.option_strings[-1]])
# FIXME try validating against the argument parsers validators, too
try:
if not widget.hasAcceptableInput():
return
except AttributeError:
pass
self.arguments = arguments
self.cmdline.setText(" ".join(
[arg if " " not in arg else arg.replace(" ", '" "') for arg in self.arguments]))
correct_input = False
try:
rescue_stderr = sys.stderr
sys.stderr = open("/dev/null", "w")
self.args = self.argp.parse_args(self.arguments)
correct_input = True
except SystemExit:
if self._red_background_item:
self.reset_red_background()
self._last_changed_obj.setStyleSheet("background: red")
self._red_background_item = self._last_changed_obj
self._red_background_timer.start(2500)
# self._last_changed_obj.setVisible(False)
finally:
sys.stderr = rescue_stderr
if correct_input:
if self._last_changed_obj == self._red_background_item:
self.reset_red_background()
def reset_red_background(self):
self._red_background_item.setStyleSheet("")
self._red_background_item = None
def try_accept(self):
self.update_cmdline() # be extra sure, that this is up to date
self.accept()
class NewZasimWindow(ArgparseWindow):
def __init__(self):
ap = make_argument_parser()
arguments = vars(ap.parse_args())
super(NewZasimWindow, self).__init__(ap, arguments)
def make_argument_parser():
def make_rule_number(input):
input = input.lower()
try:
if input.startswith("0x"):
return int(input, 16)
elif input.startswith("0"):
if not any([a in input for a in "89abcdef"]):
return int(input, 7)
else:
return int(input)
except ValueError as e:
raise ap.ArgumentTypeError(str(e))
def parse_intlist(text):
if " " not in text and "," not in text:
return map(int, text)
import re
return re.findall(r"\d+", text)
argp = ap.ArgumentParser(
description="Run a 1d BinRule, a 2d Game of Life, or a 2d elementary "
"cellular automaton")
argp.add_argument("--onedim", default=False, action="store_true",
help="generate a one-dimensional cellular automaton")
argp.add_argument("--twodim", default=True, action="store_false", dest="onedim",
help="generate a two-dimensional cellular automaton")
argp.add_argument("--life", default=False, action="store_true",
help="generate a conway's game of life - implies --twodim")
argp.add_argument("-x", "--width", default=200, dest="width", type=int,
help="the width of the image surface")
argp.add_argument("-y", "--height", default=200, dest="height", type=int,
help="the height of the image surface")
argp.add_argument("-z", "--scale", default=3, dest="scale", type=int,
help="the size of each cell of the configuration")
argp.add_argument("-r", "--rule", default=None, type=make_rule_number,
help="the elementary cellular automaton rule number to use")
argp.add_argument("-R", "--alt-rule", default=None, type=make_rule_number,
help="the alternative rule to use. Supplying this will turn nondet into dual-rule mode")
argp.add_argument("-c", "--dont-copy-borders", default=True, action="store_false", dest="copy_borders",
help="copy borders or just read zeros?")
argp.add_argument("--black", default=None, type=float,
help="what percentage of the cells to make black at the beginning. (between 2 and 100 or 0.0 and 1.0)")
argp.add_argument("--nondet", default=100, type=float,
help="with what percentage should cells be executed? (either between 2 and 100 or 0.0 and 1.0)")
argp.add_argument("--beta", default=100, type=float,
help="with what probability should a cell succeed in exposing its "\
"state to its neighbours? (either between 2 and 100 or 0.0 and 1.0)")
argp.add_argument("--no-histogram", default=False, action="store_true", dest="no_histogram",
help="don't display a histogram")
argp.add_argument("--no-activity", default=False, action="store_true", dest="no_activity",
help="don't display the activity")
argp.add_argument("--base", default=2, type=int,
help="The base of the cells.")
argp.add_argument("--sparse", default=False, action="store_true",
help="should a sparse loop be created?")
argp.add_argument("--background", type=parse_intlist,
help="What background pattern should be generated?")
argp.add_argument("--pattern", type=parse_intlist, action="append", dest="patterns",
help="Add a pattern to the available patterns for the layout.")
argp.add_argument("--layout", type=parse_intlist,
help="What combinations of patterns to put in the middle.")
argp.add_argument("--run",
help="Let the simulation run immediately.")
return argp
if __name__ == "__main__":
argp = make_argument_parser()
args = argp.parse_args()
win = ArgparseWindow(argp, vars(args))
win.show()
app.exec_()
|
|
from basic_game.descriptors import Descriptor
from basic_game.directions import directions
from basic_game.language import list_prefix, normalize_input, get_noun, prepositions
from basic_game.objects import Container
from basic_game.writer import DEBUG, ConsoleWriter
from basic_game.verbs import BaseVerb
class BasicGameEngine(object):
"""Given a completed GameWorld, starts a game."""
def __init__(self, basic_game_world):
self.writer = ConsoleWriter()
self.descriptor = Descriptor(self.writer)
self.game = basic_game_world
self.player = basic_game_world.player
self.animals = basic_game_world.animals
self.done = False
self.turn_count = 0
self.points = 0
basic_game_world.writer = self.writer
basic_game_world.engine = self
def run(self):
"""Run the main loop until game is done.
"""
while not self.done:
self._describe_setting()
if self.player.location.game_end:
if self.player.location.game_end.check(self.game, self.player.location):
self.writer.output(self.player.location.game_end.text)
break
if self.player.game_end:
if self.player.game_end.check(self.game, self.player):
self.writer.output(self.player.game_end.text)
break
if self.player.health < 0:
self.writer.output("Better luck next time!")
break
command = self._get_input()
if command == 'q' or command == 'quit':
break
self._do_action(command)
self.writer.output("\ngoodbye!\n")
def _describe_setting(self):
"""Describe the new setting and actors that the player has encountered.
"""
actor = self.player
# if the actor moved, describe the room
if actor.check_if_moved():
self.descriptor.output_title(actor.location)
self.descriptor.output_stats(self.turn_count, self.points)
self.descriptor.output_location_description(actor.location)
# See if the animals want to do anything
for animal in self.animals.values():
# first check that it is not dead
if animal.health >= 0:
animal.act_autonomously(actor.location)
def _get_input(self):
""" Request and parse out player input."""
self.writer.clear_text()
self.writer.output("")
user_input = input("> ")
# remove punctuation and unecessary words
command = normalize_input(user_input)
return command
def _do_action(self, command):
actor = self.player
words = command.split()
if not words:
return
# following the Infocom convention commands are decomposed into
# VERB(verb), OBJECT(noun), INDIRECT_OBJECT(indirect).
# For example: "hit zombie with hammer" = HIT(verb) ZOMBIE(noun) WITH HAMMER(indirect).
things = list(actor.inventory.values()) + \
list(actor.location.contents.values()) + \
list(actor.location.exits.values()) + \
list(actor.location.actors.values()) + \
[actor.location] + \
[actor]
for c in actor.location.contents.values():
if isinstance(c, Container) and c.is_open:
things += c.contents.values()
potential_verbs = []
for t in things:
potential_verbs += t.verbs.keys()
# extract the VERB
verb = None
potential_verbs.sort(key=lambda key : -len(key))
for v in potential_verbs:
vv = v.split()
if list_prefix(vv, words):
verb = v
words = words[len(vv):]
if not verb:
verb = words[0]
words = words[1:]
# extract the OBJECT
noun = None
if words:
(noun, words) = get_noun(words, things)
# extract INDIRECT (object) in phrase of the form VERB OBJECT PREPOSITION INDIRECT
indirect = None
if len(words) > 1 and words[0].lower() in prepositions:
(indirect, words) = get_noun(words[1:], things)
self.turn_count += 1
# first check phrases
for thing in things:
f = thing.get_phrase(command, things)
if f:
if isinstance(f, BaseVerb):
if f.act(actor, noun, words):
return
else:
f(self.game, thing)
return
# if we have an INDIRECT object, try it's handle first
# e.g. "hit cat with hammer" -> hammer.hit(actor, 'cat', [])
if indirect:
# try inventory and room contents
things = list(actor.inventory.values()) + \
list(actor.location.contents.values())
for thing in things:
if indirect == thing.name:
v = thing.get_verb(verb)
if v:
if v.act(actor, noun, words):
return
for a in actor.location.actors.values():
if indirect == a.name:
v = a.get_verb(verb)
if v:
if v.act(a, noun, words):
return
# if we have a NOUN, try it's handler next
if noun:
for thing in things:
if noun == thing.name:
v = thing.get_verb(verb)
if v:
if v.act(actor, None, words):
return
for a in actor.location.actors.values():
if noun == a.name:
v = a.get_verb(verb)
if v:
if v.act(a, None, words):
return
# location specific VERB
v = actor.location.get_verb(verb)
if v:
if v.act(actor, noun, words):
return
# handle directional moves of the actor
if not noun:
if verb in directions:
actor.act_go1(actor, verb, None)
return
# general actor VERB
v = actor.get_verb(verb)
if v:
if v.act(actor, noun, words):
return
# not understood
self.writer.output("Huh?")
self.turn_count -= 1
return
|
|
from campaignmanagement_example_helper import *
import json
# Common
def output_bing_ads_webfault_error(error):
if hasattr(error, 'ErrorCode'):
output_status_message("ErrorCode: {0}".format(error.ErrorCode))
if hasattr(error, 'Code'):
output_status_message("Code: {0}".format(error.Code))
if hasattr(error, 'Details'):
output_status_message("Details: {0}".format(error.Details))
if hasattr(error, 'FieldPath'):
output_status_message("FieldPath: {0}".format(error.FieldPath))
if hasattr(error, 'Message'):
output_status_message("Message: {0}".format(error.Message))
output_status_message('')
def output_webfault_errors(ex):
if not hasattr(ex.fault, "detail"):
raise Exception("Unknown WebFault")
error_attribute_sets = (
["ApiFault", "OperationErrors", "OperationError"],
["AdApiFaultDetail", "Errors", "AdApiError"],
["ApiFaultDetail", "BatchErrors", "BatchError"],
["ApiFaultDetail", "OperationErrors", "OperationError"],
["EditorialApiFaultDetail", "BatchErrors", "BatchError"],
["EditorialApiFaultDetail", "EditorialErrors", "EditorialError"],
["EditorialApiFaultDetail", "OperationErrors", "OperationError"],
)
for error_attribute_set in error_attribute_sets:
if output_error_detail(ex.fault.detail, error_attribute_set):
return
# Handle serialization errors, for example: The formatter threw an exception while trying to deserialize the message:
# There was an error while trying to deserialize parameter https://bingads.microsoft.com/CampaignManagement/v13:Entities.
if hasattr(ex.fault, 'detail') \
and hasattr(ex.fault.detail, 'ExceptionDetail'):
api_errors=ex.fault.detail.ExceptionDetail
if isinstance(api_errors, list):
for api_error in api_errors:
output_status_message(api_error.Message)
else:
output_status_message(api_errors.Message)
return
raise Exception("Unknown WebFault")
def output_error_detail(error_detail, error_attribute_set):
api_errors = error_detail
for field in error_attribute_set:
api_errors = getattr(api_errors, field, None)
if api_errors is None:
return False
if isinstance(api_errors, list):
for api_error in api_errors:
output_bing_ads_webfault_error(api_error)
else:
output_bing_ads_webfault_error(api_errors)
return True
# Bulk
def output_percent_complete(progress):
output_status_message("Percent Complete: {0}".format(progress.percent_complete))
def output_bulk_errors(errors):
for error in errors:
if error.error is not None:
output_status_message("Number: {0}".format(error.error))
output_status_message("Error: {0}".format(error.number))
if error.editorial_reason_code is not None:
output_status_message("EditorialTerm: {0}".format(error.editorial_term))
output_status_message("EditorialReasonCode: {0}".format(error.editorial_reason_code))
output_status_message("EditorialLocation: {0}".format(error.editorial_location))
output_status_message("PublisherCountries: {0}".format(error.publisher_countries))
output_status_message('')
def output_bulk_quality_score_data(quality_score_data):
if quality_score_data is not None:
output_status_message("KeywordRelevance: {0}".format(quality_score_data.keyword_relevance))
output_status_message("LandingPageRelevance: {0}".format(quality_score_data.landing_page_relevance))
output_status_message("LandingPageUserExperience: {0}".format(quality_score_data._landing_page_user_experience))
output_status_message("QualityScore: {0}".format(quality_score_data.quality_score))
def output_bulk_bid_suggestions(bid_suggestions):
if bid_suggestions is not None:
output_status_message("BestPosition: {0}".format(bid_suggestions.best_position))
output_status_message("MainLine: {0}".format(bid_suggestions.main_line))
output_status_message("FirstPage: {0}".format(bid_suggestions.first_page))
def output_bulk_feeds(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkFeed:")
output_status_message("AccountId: {0}".format(entity.account_id))
output_status_message("ClientId: {0}".format(entity.client_id))
output_status_message("CustomAttributes: {0}".format(json.dumps(entity.custom_attributes, sort_keys=True, indent=4, separators=(',', ': '))))
output_status_message("Id: {0}".format(entity.id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Name: {0}".format(entity.name))
output_status_message("Status: {0}".format(entity.status))
output_status_message("SubType: {0}".format(entity.sub_type))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_feed_items(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkFeedItem:")
output_status_message("AdGroupName: {0}".format(entity.ad_group))
output_status_message("AudienceId: {0}".format(entity.audience_id))
output_status_message("CampaignName: {0}".format(entity.campaign))
output_status_message("ClientId: {0}".format(entity.client_id))
output_status_message("CustomAttributes: {0}".format(json.dumps(entity.custom_attributes, sort_keys=True, indent=4, separators=(',', ': '))))
if hasattr(entity, 'daytime_ranges') and entity.daytime_ranges is not None:
output_status_message("DayTimeRanges:")
for daytime_range in entity.daytime_ranges:
output_daytime(daytime_range)
output_status_message("DevicePreference: {0}".format(entity.device_preference))
output_status_message("EndDate: {0}".format(entity.end_date))
output_status_message("FeedId: {0}".format(entity.feed_id))
output_status_message("Id: {0}".format(entity.id))
output_status_message("IntentOption: {0}".format(entity.intent_option))
output_status_message("Keyword: {0}".format(entity.keyword))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("LocationId: {0}".format(entity.location_id))
output_status_message("MatchType: {0}".format(entity.match_type))
output_status_message("StartDate: {0}".format(entity.start_date))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_dynamic_search_ads(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkDynamicSearchAd:")
output_status_message("AdGroup Id: {0}".format(entity.ad_group_id))
output_status_message("AdGroup Name: {0}".format(entity.ad_group_name))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management DynamicSearchAd Object
output_ad(entity.dynamic_search_ad)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_expanded_text_ads(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkExpandedTextAd:")
output_status_message("AdGroup Id: {0}".format(entity.ad_group_id))
output_status_message("AdGroup Name: {0}".format(entity.ad_group_name))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management ExpandedTextAd Object
output_ad(entity.expanded_text_ad)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_product_partitions(bulk_ad_group_product_partitions):
"""
Outputs the list of BulkAdGroupProductPartition which each contain an AdGroupCriterion, formatted as a tree.
Each AdGroupCriterion must be either a BiddableAdGroupCriterion or NegativeAdGroupCriterion.
:param bulk_ad_group_product_partitions: The list of BulkAdGroupProductPartition to output formatted as a tree.
:type bulk_ad_group_product_partitions: BulkAdGroupProductPartition[]
"""
# Set up the tree for output
child_branches={}
tree_root=None
for bulk_ad_group_product_partition in bulk_ad_group_product_partitions:
ad_group_criterion=bulk_ad_group_product_partition.ad_group_criterion
partition=ad_group_criterion.Criterion
child_branches[ad_group_criterion.Id]=[]
# The product partition with ParentCriterionId set to null is the root node.
if partition.ParentCriterionId is not None:
child_branches[partition.ParentCriterionId].append(bulk_ad_group_product_partition)
else:
tree_root=bulk_ad_group_product_partition
# Outputs the tree root node and any children recursively
output_bulk_product_partition_tree(tree_root, child_branches, 0)
def output_bulk_product_partition_tree(node, child_branches, tree_level):
"""
Outputs the details of the specified product partition node,
and passes any children to itself recursively.
:param node: The node to output, whether a Subdivision or Unit.
:type node: BulkAdGroupProductPartition
:param child_branches: The child branches or nodes if any exist.
:type child_branches: dict{long, BulkAdGroupProductPartition[]}
:param tree_level: The number of descendents from the tree root node.
Used by this operation to format the tree structure output.
:type tree_level: int
"""
if node is None:
return
ad_group_criterion=node.ad_group_criterion
pad=''
for i in range(0, tree_level):
pad=pad + '\t'
output_status_message("{0}{1}".format(
pad,
ad_group_criterion.Criterion.PartitionType)
)
output_status_message("{0}ParentCriterionId: {1}".format(
pad,
ad_group_criterion.Criterion.ParentCriterionId)
)
output_status_message("{0}Id: {1}".format(
pad,
ad_group_criterion.Id)
)
if ad_group_criterion.Criterion.PartitionType == 'Unit':
if ad_group_criterion.Type == 'BiddableAdGroupCriterion':
output_status_message("{0}Bid Amount: {1}".format(
pad,
ad_group_criterion.CriterionBid.Amount)
)
elif ad_group_criterion.Type == 'NegativeAdGroupCriterion':
output_status_message("{0}Not Bidding on this Condition".format(
pad)
)
null_attribute="(All other)" if ad_group_criterion.Criterion.ParentCriterionId is not None else "(Tree Root)"
output_status_message("{0}Attribute: {1}".format(
pad,
null_attribute if ad_group_criterion.Criterion.Condition.Attribute is None else ad_group_criterion.Criterion.Condition.Attribute)
)
output_status_message("{0}Operand: {1}\n".format(
pad,
ad_group_criterion.Criterion.Condition.Operand)
)
for child_node in child_branches[ad_group_criterion.Id]:
output_bulk_product_partition_tree(child_node, child_branches, tree_level + 1)
def output_bulk_product_ads(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkProductAd:")
output_status_message("AdGroupId: {0}".format(entity.ad_group_id))
output_status_message("AdGroupName: {0}".format(entity.ad_group_name))
output_status_message("CampaignName: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management ProductAd Object
output_ad(entity.ad)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_keywords(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkKeyword:")
output_status_message("AdGroup Id: {0}".format(entity.ad_group_id))
output_status_message("AdGroup Name: {0}".format(entity.ad_group_name))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_bulk_quality_score_data(entity.quality_score_data)
output_bulk_bid_suggestions(entity.bid_suggestions)
# Output the Campaign Management Keyword Object
output_keyword(entity.keyword)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_action_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkActionAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management ActionAdExtension Object
output_adextension(entity.action_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_action_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignActionAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_app_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkAppAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management AppAdExtension Object
output_adextension(entity.app_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_app_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignAppAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_call_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCallAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management CallAdExtension Object
output_adextension(entity.call_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_call_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignCallAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_callout_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCalloutAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management CalloutAdExtension Object
output_adextension(entity.callout_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_callout_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignCalloutAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_location_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkLocationAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management LocationAdExtension Object
output_adextension(entity.location_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_location_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignLocationAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_price_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkPriceAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management PriceAdExtension Object
output_adextension(entity.price_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_price_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignPriceAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_review_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkReviewAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
output_status_message("Client Id: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management ReviewAdExtension Object
output_adextension(entity.review_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_review_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignReviewAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_sitelink_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkSitelinkAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management SitelinkAdExtension Object
output_adextension(entity.sitelink_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_sitelink_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignSitelinkAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_structured_snippet_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkStructuredSnippetAdExtension:")
output_status_message("Account Id: {0}".format(entity.account_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management StructuredSnippetAdExtension Object
output_adextension(entity.structured_snippet_ad_extension)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_structured_snippet_ad_extensions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignStructuredSnippetAdExtension:")
if entity.ad_extension_id_to_entity_id_association is not None:
output_status_message("AdExtensionId: {0}".format(entity.ad_extension_id_to_entity_id_association.AdExtensionId))
output_status_message("EntityId: {0}".format(entity.ad_extension_id_to_entity_id_association.EntityId))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("Client Id: {0}".format(entity.client_id))
output_status_message("Editorial Status: {0}".format(entity.editorial_status))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_remarketing_lists(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkRemarketingList:")
output_status_message("Status: {0}".format(entity.status))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management RemarketingList Object
output_audience(entity.remarketing_list)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_ad_group_remarketing_list_associations(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkAdGroupRemarketingListAssociation:")
output_status_message("AdGroup Name: {0}".format(entity.ad_group_name))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management BiddableAdGroupCriterion Object
output_biddableadgroupcriterion(entity.biddable_ad_group_criterion)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_ad_group_dynamic_search_ad_targets(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkAdGroupDynamicSearchAdTarget:")
output_status_message("AdGroup Name: {0}".format(entity.ad_group_name))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management BiddableAdGroupCriterion Object
output_biddableadgroupcriterion(entity.biddable_ad_group_criterion)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_ad_group_negative_dynamic_search_ad_targets(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkAdGroupNegativeDynamicSearchAdTarget:")
output_status_message("AdGroup Name: {0}".format(entity.ad_group_name))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management NegativeAdGroupCriterion Object
output_negativeadgroupcriterion(entity.negative_ad_group_criterion)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_budgets(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkBudget:")
output_status_message("AccountId: {0}".format(entity.account_id))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_status_message("Status: {0}".format(entity.status))
# Output the Campaign Management Budget Object
output_budget(entity.budget)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaigns(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaign:")
output_status_message("AccountId: {0}".format(entity.account_id))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_bulk_quality_score_data(entity.quality_score_data)
# Output the Campaign Management Campaign Object
output_campaign(entity.campaign)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_ad_groups(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkAdGroup:")
output_status_message("Campaign Id: {0}".format(entity.campaign_id))
output_status_message("Campaign Name: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
output_bulk_quality_score_data(entity.quality_score_data)
# Output the Campaign Management AdGroup Object
output_adgroup(entity.ad_group)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_ad_group_product_partitions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkAdGroupProductPartition:")
output_status_message("CampaignName: {0}".format(entity.campaign_name))
output_status_message("AdGroupName: {0}".format(entity.ad_group_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# BulkAdGroupProductPartition can have either BiddableAdGroupCriterion or NegativeAdGroupCriterion
if entity.ad_group_criterion is None:
output_status_message("Criterion is null or invalid.")
elif entity.ad_group_criterion.Type == 'BiddableAdGroupCriterion':
# Output the Campaign Management BiddableAdGroupCriterion
output_biddableadgroupcriterion(entity.ad_group_criterion)
elif entity.ad_group_criterion.Type == 'NegativeAdGroupCriterion':
# Output the Campaign Management NegativeAdGroupCriterion
output_negativeadgroupcriterion(entity.ad_group_criterion)
else:
output_status_message("Unknown ad group criterion type.")
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_campaign_product_scopes(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkCampaignProductScope:")
output_status_message("CampaignName: {0}".format(entity.campaign_name))
output_status_message("ClientId: {0}".format(entity.client_id))
if entity.last_modified_time is not None:
output_status_message("LastModifiedTime: {0}".format(entity.last_modified_time))
# Output the Campaign Management BiddableCampaignCriterion
output_biddablecampaigncriterion(entity.biddable_campaign_criterion)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
def output_bulk_offlineconversions(bulk_entities):
for entity in bulk_entities:
output_status_message("BulkOfflineConversion:")
output_status_message("ClientId: {0}".format(entity.client_id))
# Output the Campaign Management OfflineConversion Object
output_offlineconversion(entity.offline_conversion)
if entity.has_errors:
output_bulk_errors(entity.errors)
output_status_message('')
|
|
import logging
import shutil
import json
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed, Http404
from django.db.models import Q
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic import View, ListView, TemplateView
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.contrib.formtools.wizard.views import SessionWizardView
from bookmarks.models import Bookmark
from builds import utils as build_utils
from builds.models import Version
from builds.forms import AliasForm, VersionForm
from builds.filters import VersionFilter
from builds.models import VersionAlias
from core.utils import trigger_build
from oauth.models import GithubProject, BitbucketProject
from oauth import utils as oauth_utils
from projects.forms import (ProjectBackendForm, ProjectBasicsForm,
ProjectExtraForm, ProjectAdvancedForm,
UpdateProjectForm, SubprojectForm,
build_versions_form, UserForm, EmailHookForm,
TranslationForm, RedirectForm, WebHookForm)
from projects.models import Project, EmailHook, WebHook
from projects import constants
try:
from readthedocs.projects.signals import project_import
except:
from projects.signals import project_import
log = logging.getLogger(__name__)
class ProjectDashboard(ListView):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
model = Project
template_name = 'projects/project_dashboard.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProjectDashboard, self).dispatch(*args, **kwargs)
def get_queryset(self):
return Project.objects.dashboard(self.request.user)
def get_context_data(self, **kwargs):
context = super(ProjectDashboard, self).get_context_data(**kwargs)
filter = VersionFilter(constants.IMPORTANT_VERSION_FILTERS, queryset=self.get_queryset())
context['filter'] = filter
bookmarks = Bookmark.objects.filter(user=self.request.user)
if bookmarks.exists:
context['bookmark_list'] = bookmarks[:3]
else:
bookmarks = None
return context
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
Now redirects to the normal /projects/<slug> view.
"""
return HttpResponseRedirect(reverse('projects_detail',
args=[project_slug]))
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form_class = UpdateProjectForm
form = form_class(instance=project, data=request.POST or None,
user=request.user)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project settings updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_advanced(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form_class = ProjectAdvancedForm
form = form_class(instance=project, data=request.POST or None, initial={
'num_minor': 2, 'num_major': 2, 'num_point': 2})
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project settings updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_advanced.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project versions updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_versions.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_version_detail(request, project_slug, version_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
version = get_object_or_404(Version.objects.public(user=request.user, project=project, only_active=False), slug=version_slug)
form = VersionForm(request.POST or None, instance=version)
if request.method == 'POST' and form.is_valid():
form.save()
url = reverse('project_version_list', args=[project.slug])
return HttpResponseRedirect(url)
return render_to_response(
'projects/project_version_detail.html',
{'form': form, 'project': project, 'version': version},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
messages.success(request, _('Project deleted'))
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
class ImportWizardView(SessionWizardView):
'''Project import wizard'''
form_list = [('basics', ProjectBasicsForm),
('extra', ProjectExtraForm)]
condition_dict = {'extra': lambda self: self.is_advanced()}
def get_form_kwargs(self, step):
'''Get args to pass into form instantiation'''
kwargs = {}
kwargs['user'] = self.request.user
if step == 'basics':
kwargs['show_advanced'] = True
if step == 'extra':
extra_form = self.get_form_from_step('basics')
project = extra_form.save(commit=False)
kwargs['instance'] = project
return kwargs
def get_form_from_step(self, step):
form = self.form_list[step](
data=self.get_cleaned_data_for_step(step),
**self.get_form_kwargs(step)
)
form.full_clean()
return form
def get_template_names(self):
'''Return template names based on step name'''
return 'projects/import_{0}.html'.format(self.steps.current, 'base')
def done(self, form_list, **kwargs):
'''Save form data as object instance
Don't save form data directly, instead bypass documentation building and
other side effects for now, by signalling a save without commit. Then,
finish by added the members to the project and saving.
'''
# expect the first form
basics_form = form_list[0]
# Save the basics form to create the project instance, then alter
# attributes directly from other forms
project = basics_form.save()
for form in form_list[1:]:
for (field, value) in form.cleaned_data.items():
setattr(project, field, value)
else:
basic_only = True
project.save()
project_import.send(sender=project, request=self.request)
trigger_build(project, basic=basic_only)
return HttpResponseRedirect(reverse('projects_detail',
args=[project.slug]))
def is_advanced(self):
'''Determine if the user selected the `show advanced` field'''
data = self.get_cleaned_data_for_step('basics') or {}
return data.get('advanced', True)
class ImportView(TemplateView):
'''On GET, show the source select template, on POST, mock out a wizard
If we are accepting POST data, use the fields to seed the initial data in
:py:cls:`ImportWizardView`. The import templates will redirect the form to
`/dashboard/import`
'''
template_name = 'projects/project_import.html'
wizard_class = ImportWizardView
def post(self, request, *args, **kwargs):
initial_data = {}
initial_data['basics'] = {}
for key in ['name', 'repo', 'repo_type']:
initial_data['basics'][key] = request.POST.get(key)
initial_data['extra'] = {}
for key in ['description', 'project_url']:
initial_data['extra'][key] = request.POST.get(key)
request.method = 'GET'
return self.wizard_class.as_view(initial_dict=initial_data)(request)
class ImportDemoView(View):
'''View to pass request on to import form to import demo project'''
form_class = ProjectBasicsForm
request = None
args = None
kwargs = None
def get(self, request, *args, **kwargs):
'''Process link request as a form post to the project import form'''
self.request = request
self.args = args
self.kwargs = kwargs
data = self.get_form_data()
project = (Project.objects.for_admin_user(request.user)
.filter(repo=data['repo']).first())
if project is not None:
messages.success(
request, _('The demo project is already imported!'))
else:
kwargs = self.get_form_kwargs()
form = self.form_class(data=data, **kwargs)
if form.is_valid():
project = form.save()
project.save()
trigger_build(project, basic=True)
messages.success(
request, _('Your demo project is currently being imported'))
else:
for (_f, msg) in form.errors.items():
log.error(msg)
messages.error(request,
_('There was a problem adding the demo project'))
return HttpResponseRedirect(reverse('projects_dashboard'))
return HttpResponseRedirect(reverse('projects_detail',
args=[project.slug]))
def get_form_data(self):
'''Get form data to post to import form'''
return {
'name': '{0}-demo'.format(self.request.user.username),
'repo_type': 'git',
'repo': 'https://github.com/readthedocs/template.git'
}
def get_form_kwargs(self):
'''Form kwargs passed in during instantiation'''
return {'user': self.request.user}
@login_required
def edit_alias(request, project_slug, id=None):
proj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
if id:
alias = proj.aliases.get(pk=id)
form = AliasForm(instance=alias, data=request.POST or None)
else:
form = AliasForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
alias = form.save()
return HttpResponseRedirect(alias.project.get_absolute_url())
return render_to_response(
'projects/alias_edit.html',
{'form': form},
context_instance=RequestContext(request)
)
class AliasList(ListView):
model = VersionAlias
template_context_name = 'alias'
template_name = 'projects/alias_list.html',
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AliasList, self).dispatch(*args, **kwargs)
def get_queryset(self):
self.project = get_object_or_404(Project.objects.for_admin_user(self.request.user), slug=self.kwargs.get('project_slug'))
return self.project.aliases.all()
@login_required
def project_subprojects(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = SubprojectForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse(
'projects_subprojects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
subprojects = project.subprojects.all()
return render_to_response(
'projects/project_subprojects.html',
{'form': form, 'project': project, 'subprojects': subprojects},
context_instance=RequestContext(request)
)
@login_required
def project_subprojects_delete(request, project_slug, child_slug):
parent = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
child = get_object_or_404(Project.objects.for_admin_user(request.user), slug=child_slug)
parent.remove_subproject(child)
return HttpResponseRedirect(reverse('projects_subprojects',
args=[parent.slug]))
@login_required
def project_users(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = UserForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
users = project.users.all()
return render_to_response(
'projects/project_users.html',
{'form': form, 'project': project, 'users': users},
context_instance=RequestContext(request)
)
@login_required
def project_users_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
user = get_object_or_404(User.objects.all(), username=request.POST.get('username'))
if user == request.user:
raise Http404
project.users.remove(user)
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_notifications(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
email_form = EmailHookForm(data=request.POST or None, project=project)
webhook_form = WebHookForm(data=request.POST or None, project=project)
if request.method == 'POST':
if email_form.is_valid():
email_form.save()
if webhook_form.is_valid():
webhook_form.save()
project_dashboard = reverse('projects_notifications',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
emails = project.emailhook_notifications.all()
urls = project.webhook_notifications.all()
return render_to_response(
'projects/project_notifications.html',
{
'email_form': email_form,
'webhook_form': webhook_form,
'project': project,
'emails': emails,
'urls': urls,
},
context_instance=RequestContext(request)
)
@login_required
def project_notifications_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
try:
project.emailhook_notifications.get(email=request.POST.get('email')).delete()
except EmailHook.DoesNotExist:
try:
project.webhook_notifications.get(url=request.POST.get('email')).delete()
except WebHook.DoesNotExist:
raise Http404
project_dashboard = reverse('projects_notifications', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_translations(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = TranslationForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_translations',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
lang_projects = project.translations.all()
return render_to_response(
'projects/project_translations.html',
{'form': form, 'project': project, 'lang_projects': lang_projects},
context_instance=RequestContext(request)
)
@login_required
def project_translations_delete(request, project_slug, child_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
subproj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=child_slug)
project.translations.remove(subproj)
project_dashboard = reverse('projects_translations', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_redirects(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = RedirectForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_redirects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
redirects = project.redirects.all()
return render_to_response(
'projects/project_redirects.html',
{'form': form, 'project': project, 'redirects': redirects},
context_instance=RequestContext(request)
)
@login_required
def project_redirects_delete(request, project_slug):
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST is allowed')
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
redirect = get_object_or_404(project.redirects,
pk=request.POST.get('id_pk'))
if redirect.project == project:
redirect.delete()
else:
raise Http404
return HttpResponseRedirect(reverse('projects_redirects',
args=[project.slug]))
@login_required
def project_import_github(request, sync=False):
'''Show form that prefills import form with data from GitHub'''
github_connected = oauth_utils.import_github(user=request.user, sync=sync)
repos = GithubProject.objects.filter(users__in=[request.user])
# Find existing projects that match a repo url
for repo in repos:
ghetto_repo = repo.git_url.replace('git://', '').replace('.git', '')
projects = (Project
.objects
.public(request.user)
.filter(Q(repo__endswith=ghetto_repo) |
Q(repo__endswith=ghetto_repo + '.git')))
if projects:
repo.matches = [project.slug for project in projects]
else:
repo.matches = []
return render_to_response(
'projects/project_import_github.html',
{
'repos': repos,
'github_connected': github_connected,
'sync': sync,
},
context_instance=RequestContext(request)
)
@login_required
def project_import_bitbucket(request, sync=False):
'''Show form that prefills import form with data from BitBucket'''
bitbucket_connected = oauth_utils.import_bitbucket(user=request.user, sync=sync)
repos = BitbucketProject.objects.filter(users__in=[request.user])
# Find existing projects that match a repo url
for repo in repos:
ghetto_repo = repo.git_url.replace('git://', '').replace('.git', '')
projects = (Project
.objects
.public(request.user)
.filter(Q(repo__endswith=ghetto_repo) |
Q(repo__endswith=ghetto_repo + '.git')))
if projects:
repo.matches = [project.slug for project in projects]
else:
repo.matches = []
return render_to_response(
'projects/project_import_bitbucket.html',
{
'repos': repos,
'bitbucket_connected': bitbucket_connected,
'sync': sync,
},
context_instance=RequestContext(request)
)
|
|
import logging
import pathlib
import sys
from typing import Any
from unittest import mock
import pytest
from freezegun import freeze_time
from looker_sdk.sdk.api31.models import DBConnection
from datahub.configuration.common import PipelineExecutionError
from datahub.ingestion.run.pipeline import Pipeline
from tests.test_helpers import mce_helpers # noqa: F401
logging.getLogger("lkml").setLevel(logging.INFO)
FROZEN_TIME = "2020-04-14 07:00:00"
@freeze_time(FROZEN_TIME)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="lkml requires Python 3.7+")
def test_lookml_ingest(pytestconfig, tmp_path, mock_time):
"""Test backwards compatibility with previous form of config with new flags turned off"""
test_resources_dir = pytestconfig.rootpath / "tests/integration/lookml"
mce_out_file = "expected_output.json"
# Note this config below is known to create "bad" lineage since the config author has not provided enough information
# to resolve relative table names (which are not fully qualified)
# We keep this check just to validate that ingestion doesn't croak on this config
pipeline = Pipeline.create(
{
"run_id": "lookml-test",
"source": {
"type": "lookml",
"config": {
"base_folder": str(test_resources_dir / "lkml_samples"),
"connection_to_platform_map": {"my_connection": "conn"},
"parse_table_names_from_sql": True,
"tag_measures_and_dimensions": False,
"project_name": "lkml_samples",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/{mce_out_file}",
},
},
}
)
pipeline.run()
pipeline.pretty_print_summary()
pipeline.raise_from_status(raise_warnings=True)
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / mce_out_file,
golden_path=test_resources_dir / mce_out_file,
)
@freeze_time(FROZEN_TIME)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="lkml requires Python 3.7+")
def test_lookml_ingest_offline(pytestconfig, tmp_path, mock_time):
"""New form of config with offline specification of connection defaults"""
test_resources_dir = pytestconfig.rootpath / "tests/integration/lookml"
mce_out = "lookml_mces_offline.json"
pipeline = Pipeline.create(
{
"run_id": "lookml-test",
"source": {
"type": "lookml",
"config": {
"base_folder": str(test_resources_dir / "lkml_samples"),
"connection_to_platform_map": {
"my_connection": {
"platform": "snowflake",
"default_db": "default_db",
"default_schema": "default_schema",
}
},
"parse_table_names_from_sql": True,
"project_name": "lkml_samples",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/{mce_out}",
},
},
}
)
pipeline.run()
pipeline.pretty_print_summary()
pipeline.raise_from_status(raise_warnings=True)
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / mce_out,
golden_path=test_resources_dir / mce_out,
)
@freeze_time(FROZEN_TIME)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="lkml requires Python 3.7+")
def test_lookml_ingest_api_bigquery(pytestconfig, tmp_path, mock_time):
# test with BigQuery connection
ingestion_test(
pytestconfig,
tmp_path,
mock_time,
DBConnection(
dialect_name="bigquery", host="project-foo", database="default-db"
),
)
@freeze_time(FROZEN_TIME)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="lkml requires Python 3.7+")
def test_lookml_ingest_api_hive(pytestconfig, tmp_path, mock_time):
# test with Hive connection
ingestion_test(
pytestconfig,
tmp_path,
mock_time,
DBConnection(
dialect_name="hive2",
database="default-hive-db",
),
)
def ingestion_test(
pytestconfig: Any,
tmp_path: pathlib.Path,
mock_time: int,
mock_connection: DBConnection,
) -> None: # noqa : No need for type annotations here
test_resources_dir = pytestconfig.rootpath / "tests/integration/lookml"
mce_out_file = f"lookml_mces_api_{mock_connection.dialect_name}.json"
mocked_client = mock.MagicMock()
mock_model = mock.MagicMock(project_name="lkml_samples")
with mock.patch("looker_sdk.init31") as mock_sdk:
mock_sdk.return_value = mocked_client
# mock_connection = mock.MagicMock()
mocked_client.connection.return_value = mock_connection
mocked_client.lookml_model.return_value = mock_model
pipeline = Pipeline.create(
{
"run_id": "lookml-test",
"source": {
"type": "lookml",
"config": {
"base_folder": str(test_resources_dir / "lkml_samples"),
"api": {
"client_id": "fake_client_id",
"client_secret": "fake_secret",
"base_url": "fake_account.looker.com",
},
"parse_table_names_from_sql": True,
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/{mce_out_file}",
},
},
}
)
pipeline.run()
pipeline.pretty_print_summary()
pipeline.raise_from_status(raise_warnings=True)
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / mce_out_file,
golden_path=test_resources_dir / mce_out_file,
)
@freeze_time(FROZEN_TIME)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="lkml requires Python 3.7+")
def test_lookml_bad_sql_parser(pytestconfig, tmp_path, mock_time):
"""Incorrect specification of sql parser should not fail ingestion"""
test_resources_dir = pytestconfig.rootpath / "tests/integration/lookml"
mce_out = "lookml_mces_badsql_parser.json"
pipeline = Pipeline.create(
{
"run_id": "lookml-test",
"source": {
"type": "lookml",
"config": {
"base_folder": str(test_resources_dir / "lkml_samples"),
"connection_to_platform_map": {
"my_connection": {
"platform": "snowflake",
"default_db": "default_db",
"default_schema": "default_schema",
}
},
"parse_table_names_from_sql": True,
"project_name": "lkml_samples",
"sql_parser": "bad.sql.Parser",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/{mce_out}",
},
},
}
)
pipeline.run()
pipeline.pretty_print_summary()
pipeline.raise_from_status(raise_warnings=False)
try:
pipeline.raise_from_status(raise_warnings=True)
assert False, "Pipeline should have generated warnings"
except PipelineExecutionError:
pass
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / mce_out,
golden_path=test_resources_dir / mce_out,
)
@freeze_time(FROZEN_TIME)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="lkml requires Python 3.7+")
def test_lookml_github_info(pytestconfig, tmp_path, mock_time):
"""Add github info to config"""
test_resources_dir = pytestconfig.rootpath / "tests/integration/lookml"
mce_out = "lookml_mces_with_external_urls.json"
pipeline = Pipeline.create(
{
"run_id": "lookml-test",
"source": {
"type": "lookml",
"config": {
"base_folder": str(test_resources_dir / "lkml_samples"),
"connection_to_platform_map": {
"my_connection": {
"platform": "snowflake",
"default_db": "default_db",
"default_schema": "default_schema",
}
},
"parse_table_names_from_sql": True,
"project_name": "lkml_samples",
"github_info": {"repo": "datahub/looker-demo", "branch": "master"},
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/{mce_out}",
},
},
}
)
pipeline.run()
pipeline.pretty_print_summary()
pipeline.raise_from_status(raise_warnings=True)
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / mce_out,
golden_path=test_resources_dir / mce_out,
)
|
|
#!/usr/local/bin/python
from sprint import sprint as print
import binascii
import copy
import filecmp
from io import open
import os
import re
import requests
import stat
import basics
import config
import useful
errors = []
# ----------------------------------------------------------------------------
def fix_files(page_id):
dn = useful.relpath('.', config.LIB_MAN_DIR, page_id[7:].lower())
os.system('sudo chown bamca:www %s' % dn)
os.system('sudo chmod 775 %s' % dn)
os.system('sudo chown bamca:www %s/*.*' % dn)
os.system('sudo chmod 664 %s/*.*' % dn)
def grab_page(url):
print('read', url)
retval = requests.get(url)
return retval.text
def grab_list(ll, fl):
for url in fl:
fn = url[url.rfind('/') + 1:]
libdir = useful.relpath('.', config.LIB_MAN_DIR, ll['link_line.page_id'][7:].lower())
if not os.path.exists(libdir):
errors.append((ll, url))
sfn = os.path.join(libdir, fn)
dot = sfn.rfind('.')
sfn = sfn[:dot] + sfn[dot:].lower()
if os.path.exists(sfn):
print(sfn, 'already exists')
else:
img = grab_page(ll['pth'] + '/' + url)
save(ll, sfn, img)
def save(ll, sfn, img):
print('save', sfn)
open(sfn, 'w').write(img)
def mbx_forum(ll):
mbxf_img_re = re.compile(r'''<IMG src='(?P<u>[^']*)' width='200'>''')
pag = grab_page(ll['link_line.url'])
grab_list(ll, mbxf_img_re.findall(pag))
def mcch(ll):
print(ll['link_line.url'], "- ignored")
def mbdb(ll):
mbdb_sub_re = re.compile(r'''<a href="(?P<u>showmodel.php?[^"]*)"''')
mbdb_img_re = re.compile(r'''<img src="(?P<u>[^"]*)">''')
pag = grab_page(ll['link_line.url'])
for subpg in mbdb_sub_re.findall(pag):
imgpg = grab_page(ll['pth'] + '/' + subpg)
grab_list(ll, mbdb_img_re.findall(imgpg))
def cf(ll):
cf_img_re = re.compile(r'''<A HREF="(?P<u>[^"]*)" target="_blank">''')
pag = grab_page(ll['link_line.url'])
grab_list(ll, cf_img_re.findall(pag))
def mbdan(ll):
mbdan_img_re = re.compile(r'''<IMG SRC="(?P<u>[^"]*)"''', re.M | re.I)
pag = grab_page(ll['link_line.url'])
fl = mbdan_img_re.findall(pag)
fl = filter(lambda x: x != '../hr.gif', fl)
grab_list(ll, fl)
def mbxu(ll):
mbxu_sub_re = re.compile(r'''<a href="(?P<u>Ver_Detail_and_Var_Listing.php\?model=[^"]*)">''')
mbxu_img_re = re.compile(r'''<img src=(?P<u>[^ "]*) />''')
pag = grab_page(ll['link_line.url'])
for subpg in mbxu_sub_re.findall(pag):
imgpg = grab_page(ll['pth'] + '/' + subpg)
grab_list(ll, mbxu_img_re.findall(imgpg))
def areh(ll):
areh_sub_re = re.compile(
r'''<a href="(?P<u>[^"]*)" title="[^"]*" target="DATA"> <img src="[^"]*"></a> ''')
areh_img_re = re.compile(r'''<img src="(?P<u>[^"]*)">''')
pag = grab_page(ll['link_line.url'])
for subpg in areh_sub_re.findall(pag):
imgpg = grab_page(ll['pth'] + '/' + subpg)
grab_list(ll, areh_img_re.findall(imgpg))
def psdc(ll):
psdc_img_re = re.compile(r'''<a href="(?P<u>[^"]*)">''')
pag = grab_page(ll['link_line.url'])
fl = psdc_img_re.findall(pag)
fl = filter(lambda x: x != 'Notation.jpg' and not x.endswith('htm'), fl)
grab_list(ll, fl)
def mbwiki(ll):
print(ll['link_line.url'], "- ignored")
def toyvan(ll):
print(ll['link_line.url'], "- ignored")
def mcf(ll):
print(ll['link_line.url'], "- ignored")
def run_line(ll):
pth = ll['link_line.url']
ll['pth'] = pth[:pth.rfind('/')]
if ll['link_line.associated_link'] == 0:
print("Huh?")
elif ll['link_line.associated_link'] == 1:
mcf(ll)
elif ll['link_line.associated_link'] == 2:
pass # mbxf docs
elif ll['link_line.associated_link'] == 3:
pass # comparisons
elif ll['link_line.associated_link'] == 4:
mbwiki(ll)
elif ll['link_line.associated_link'] == 5:
toyvan(ll)
elif ll['link_line.associated_link'] == 6:
psdc(ll)
elif ll['link_line.associated_link'] == 7:
areh(ll)
elif ll['link_line.associated_link'] == 8:
mbdan(ll)
elif ll['link_line.associated_link'] == 9:
mbx_forum(ll)
elif ll['link_line.associated_link'] == 10:
cf(ll)
elif ll['link_line.associated_link'] == 11:
pass # toy brokers
elif ll['link_line.associated_link'] == 12:
pass # diecast plus
elif ll['link_line.associated_link'] == 13:
mcch(ll)
elif ll['link_line.associated_link'] == 14:
mbdb(ll)
elif ll['link_line.associated_link'] == 15:
mbxu(ll)
else:
print(ll['link_line.url'], "- ignored")
fix_files(ll['link_line.page_id'])
print()
def clean_dir(page_id):
ln = useful.relpath('.', config.LIB_DIR, '0files')
dn = useful.relpath('.', config.LIB_MAN_DIR, page_id.lower())
print("attempting to clean", page_id)
do_top_dir(ln, dn)
# Given this directory, clean it of things we know we don't want.
@basics.command_line
def main(pif):
where = ' and '.join(['associated_link=%s' % x for x in pif.switch['a']])
if pif.filelist:
for arg in pif.filelist:
for ll in pif.dbh.fetch_link_lines(page_id='single.' + arg, section='single', where=where):
run_line(ll)
clean_dir(arg)
else:
for ll in pif.dbh.fetch_link_lines(section='single', where=where):
run_line(ll)
if errors:
print()
print('Errors found...')
for err in errors:
print(err[0]['link_line.page_id'], err[1])
# from dirjoin
# num_rm = 0
def list_dirs(thisdir):
fl = os.listdir(thisdir)
subdirs = []
for f in fl:
full = thisdir + '/' + f
if os.path.isdir(full):
subdirs.append(f)
subdirs.sort()
return subdirs
def do_dir(thisdir, destdir, destdict):
subdirs = []
saved = crc_dir(thisdir, subdirs=subdirs)
thisdict = {}
for f in sorted(saved.keys()):
thisinfo = saved[f]
# thisdict[f.lower()] = thisinfo
thisdict[f] = thisinfo
# destinfo = destdict.get(f.lower(), None)
destinfo = destdict.get(f, None)
if not destinfo:
pass
elif not os.path.exists(destdir + '/' + f):
pass
elif not os.path.exists(thisdir + '/' + f):
pass
elif thisinfo[1:] == destinfo[1:]:
if os.path.realpath(thisdir + '/' + f) == os.path.realpath(destdir + '/' + f):
print('*** same file') # not gunna do it! wouldn't be prudent!
else:
try:
if filecmp.cmp(thisdir + '/' + f, destdir + '/' + f):
# num_rm = num_rm + 1
os.remove(thisdir + '/' + f)
print(' ', thisdir + '/' + f)
# del thisdict[f.lower()]
del thisdict[f]
else:
print('~', thisdir + '/' + f)
except Exception:
print('*** filecmp died')
else:
print(" diff:", f)
print(" ", thisinfo)
print(" ", destinfo)
fl = os.listdir(thisdir)
if not fl:
try:
os.rmdir(thisdir)
except Exception: # shrug
pass
return thisdict
def do_top_dir(destdir, srcdir):
print("joining", destdir, srcdir)
if os.path.exists(destdir) and os.path.exists(srcdir):
destdict = copy.deepcopy({})
destdict = do_dir(destdir, None, destdict)
do_dir(srcdir, destdir, destdict)
def crc_dir(destdir, subdirs=None):
# recurse is cheap
# same = diff = add = gone = unkn = 0
add = 0
print("+", destdir, end='')
fl = os.listdir(destdir)
print("(%d)" % len(fl))
files = dict()
fullfiles = dict()
for f in fl:
full = os.path.join(destdir, f)
try:
st = os.lstat(full)
except Exception:
continue
sig = (st.st_mtime, st.st_size)
if stat.S_ISLNK(st.st_mode):
continue
elif stat.S_ISDIR(st.st_mode):
if subdirs is not None:
subdirs.append(f)
continue
elif f[0] == '.':
continue
print(" ", f, end='')
try:
info = sig + file_crc(full)
except KeyboardInterrupt:
raise
except Exception:
print("*** Can't CRC", full)
continue
add += 1
print("+")
print(os.path.join(destdir, f), "added")
files[f] = info
fullfiles[os.path.join(destdir, f)] = info
return files
def file_crc(fn):
# print(".", fn,)
f = open(fn, 'rb')
crc = 0
while 1:
contents = f.read(16777216)
# print(".",)
if contents:
crc = binascii.crc32(contents, crc)
else:
break
# print()
return (crc,)
if __name__ == '__main__':
main(options='a')
|
|
"""Support for MQTT binary sensors."""
from datetime import timedelta
import functools
import logging
import voluptuous as vol
from homeassistant.components import binary_sensor
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_NAME,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.event as evt
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import dt as dt_util
from . import CONF_QOS, CONF_STATE_TOPIC, DOMAIN, PLATFORMS, subscription
from .. import mqtt
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttAvailability,
MqttEntity,
async_setup_entry_helper,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MQTT Binary sensor"
CONF_OFF_DELAY = "off_delay"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_FORCE_UPDATE = False
CONF_EXPIRE_AFTER = "expire_after"
PLATFORM_SCHEMA = mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFF_DELAY): cv.positive_int,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT binary sensor through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT binary sensor dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, binary_sensor.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT binary sensor."""
async_add_entities([MqttBinarySensor(hass, config, config_entry, discovery_data)])
class MqttBinarySensor(MqttEntity, BinarySensorEntity):
"""Representation a binary sensor that is updated by MQTT."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT binary sensor."""
self._state = None
self._expiration_trigger = None
self._delay_listener = None
expire_after = config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
self._expired = True
else:
self._expired = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
def off_delay_listener(now):
"""Switch device off after a delay."""
self._delay_listener = None
self._state = False
self.async_write_ha_state()
@callback
@log_messages(self.hass, self.entity_id)
def state_message_received(msg):
"""Handle a new received MQTT state message."""
payload = msg.payload
# auto-expire enabled?
expire_after = self._config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
# When expire_after is set, and we receive a message, assume device is
# not expired since it has to be to receive the message
self._expired = False
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self._value_is_expired, expiration_at
)
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
payload = value_template.async_render_with_possible_json_value(
payload, variables={"entity_id": self.entity_id}
)
if not payload.strip(): # No output from template, ignore
_LOGGER.debug(
"Empty template output for entity: %s with state topic: %s. Payload: '%s', with value template '%s'",
self._config[CONF_NAME],
self._config[CONF_STATE_TOPIC],
msg.payload,
value_template,
)
return
if payload == self._config[CONF_PAYLOAD_ON]:
self._state = True
elif payload == self._config[CONF_PAYLOAD_OFF]:
self._state = False
else: # Payload is not for this entity
template_info = ""
if value_template is not None:
template_info = f", template output: '{payload}', with value template '{str(value_template)}'"
_LOGGER.info(
"No matching payload found for entity: %s with state topic: %s. Payload: '%s'%s",
self._config[CONF_NAME],
self._config[CONF_STATE_TOPIC],
msg.payload,
template_info,
)
return
if self._delay_listener is not None:
self._delay_listener()
self._delay_listener = None
off_delay = self._config.get(CONF_OFF_DELAY)
if self._state and off_delay is not None:
self._delay_listener = evt.async_call_later(
self.hass, off_delay, off_delay_listener
)
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": state_message_received,
"qos": self._config[CONF_QOS],
}
},
)
@callback
def _value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._expired = True
self.async_write_ha_state()
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def force_update(self):
"""Force update."""
return self._config[CONF_FORCE_UPDATE]
@property
def available(self) -> bool:
"""Return true if the device is available and value has not expired."""
expire_after = self._config.get(CONF_EXPIRE_AFTER)
return MqttAvailability.available.fget(self) and (
expire_after is None or not self._expired
)
|
|
import string
# The given permutation tables
first_PCtable_String = "57,49,41,33,25,17,9,1,58,50,42,34,26,18,10,2,59,51,43,35,27,19,11,3,60,52,44,36,63,55,47,39,31,23,15,7,62,54,46,38,30,22,14,6,61,53,45,37,29,21,13,5,28,20,12,4"
second_PCtable_String = "14,17,11,24,1,5,3,28,15,6,21,10,23,19,12,4,26,8,16,7,27,20,13,2,41,52,31,37,47,55,30,40,51,45,33,48,44,49,39,56,34,53,46,42,50,36,29,32"
IPtableString = "58,50,42,34,26,18,10,2,60,52,44,36,28,20,12,4,62,54,46,38,30,22,14,6,64,56,48,40,32,24,16,8,57,49,41,33,25,17,9,1,59,51,43,35,27,19,11,3,61,53,45,37,29,21,13,5,63,55,47,39,31,23,15,7"
EBit_table = "32,1,2,3,4,5,4,5,6,7,8,9,8,9,10,11,12,13,12,13,14,15,16,17,16,17,18,19,20,21,20,21,22,23,24,25,24,25,26,27,28,29,28,29,30,31,32,1"
P_table = "16,7,20,21,29,12,28,17,1,15,23,26,5,18,31,10,2,8,24,14,32,27,3,9,19,13,30,6,22,11,4,25"
# The given S-boxes
S1 = [[14,4,13,1,2,15,11,8,3,10,6,12,5,9,0,7],
[0,15,7,4,14,2,13,1,10,6,12,11,9,5,3,8],
[4,1,14,8,13,6,2,11,15,12,9,7,3,10,5,0],
[15,12,8,2,4,9,1,7,5,11,3,14,10,0,6,13]]
S2 = [[15,1,8,14,6,11,3,4,9,7,2,13,12,0,5,10],
[3,13,4,7,15,2,8,14,12,0,1,10,6,9,11,5],
[0,14,7,11,10,4,13,1,5,8,12,6,9,3,2,15],
[13,8,10,1,3,15,4,2,11,6,7,12,0,5,14,9]]
S3 = [[10,0,9,14,6,3,15,5,1,13,12,7,11,4,2,8],
[13,7,0,9,3,4,6,10,2,8,5,14,12,11,15,1],
[13,6,4,9,8,15,3,0,11,1,2,12,5,10,14,7],
[1,10,13,0,6,9,8,7,4,15,14,3,11,5,2,12]]
S4 = [[7,13,14,3,0,6,9,10,1,2,8,5,11,12,4,15],
[13,8,11,5,6,15,0,3,4,7,2,12,1,10,14,9],
[10,6,9,0,12,11,7,13,15,1,3,14,5,2,8,4],
[3,15,0,6,10,1,13,8,9,4,5,11,12,7,2,14]]
S5 = [[2,12,4,1,7,10,11,6,8,5,3,15,13,0,14,9],
[14,11,2,12,4,7,13,1,5,0,15,10,3,9,8,6],
[4,2,1,11,10,13,7,8,15,9,12,5,6,3,0,14],
[11,8,12,7,1,14,2,13,6,15,0,9,10,4,5,3]]
S6 = [[12,1,10,15,9,2,6,8,0,13,3,4,14,7,5,11],
[10,15,4,2,7,12,9,5,6,1,13,14,0,11,3,8],
[9,14,15,5,2,8,12,3,7,0,4,10,1,13,11,6],
[4,3,2,12,9,5,15,10,11,14,1,7,6,0,8,13]]
S7 = [[4,11,2,14,15,0,8,13,3,12,9,7,5,10,6,1],
[13,0,11,7,4,9,1,10,14,3,5,12,2,15,8,6],
[1,4,11,13,12,3,7,14,10,15,6,8,0,5,9,2],
[6,11,13,8,1,4,10,7,9,5,0,15,14,2,3,12]]
S8 = [[13,2,8,4,6,15,11,1,10,9,3,14,5,0,12,7],
[1,15,13,8,10,3,7,4,12,5,6,11,0,14,9,2],
[7,11,4,1,9,12,14,2,0,6,10,13,15,3,5,8],
[2,1,14,7,4,10,8,13,15,12,9,0,3,5,6,11]]
# This function applies a permutation table to a binary string when the permutation table is given as a comma-separated string
def apply_permutation(binary, permutation_table):
table_array = permutation_table.split(",")
table_length = len(table_array)
result = ""
for i in range(0, table_length):
result += binary[int(table_array[i]) - 1]
return result
# This function shifts a given binary string to the left by one
def left_shift(string):
return string[1: len(string)] + string[0]
# This function implements an XOR operation on two binary input strings.
def xor(binary1, binary2):
if(len(binary1) != len(binary2)):
print "ERROR: XOR inputs not the same length (length = " + str(len(binary1)) + ", length = " + str(len(binary2)) + ")"
exit()
result = ""
length = len(binary1)
for i in range(0, length):
if(binary1[i] == binary2[i]):
result += "0"
else:
result += "1"
return result
# This function turns a binary value to a decimal
def binary_to_dec(binary):
if len(binary) == 4:
return 8*int(binary[0]) + 4*int(binary[1]) + 2*int(binary[2]) + 1*int(binary[3])
elif len(binary) == 2:
return 2*int(binary[0]) + 1*int(binary[1])
else:
print "ERROR: Faulty binary value in binary_to_dec(): " + binary
# This function turns a decimal value into a binary value
def dec_to_binary(decimal):
get_bin = lambda x, n: x >= 0 and str(bin(x))[2:].zfill(n) or "-" + str(bin(x))[3:].zfill(n) # taken from stackexchange
return get_bin(decimal, 4)
# This function implements an S-box
def sbox(block, sbox_type):
i = binary_to_dec(block[0] + block[5])
j = binary_to_dec(block[1:5])
return dec_to_binary(int( sbox_type[i][j] ))
# The mangler function
def f(Rn, Kn):
# expand R1 with the E Bit-selection table
expandedRn = apply_permutation(Rn, EBit_table)
print " E(R0) : " + expandedRn
# xor the expanded result with K1
xorResult = xor(expandedRn, Kn)
print " K1 XOR E(R0) : " + xorResult
# split the binary string into eight parts
B1 = xorResult[ 0 : 6 ]
B2 = xorResult[ 6 : 12 ]
B3 = xorResult[ 12 : 18 ]
B4 = xorResult[ 18 : 24 ]
B5 = xorResult[ 24 : 30 ]
B6 = xorResult[ 30 : 36 ]
B7 = xorResult[ 36 : 42 ]
B8 = xorResult[ 42 : 48 ]
print " "
print "Segmented binary:"
print " B1:{0} B2:{1} B3:{2} B4:{3} B5:{4} B6:{5} B7:{6} B8:{7}".format(B1, B2, B3, B4, B5, B6, B7, B8)
# apply the respective S-boxes to each binary part
SB1 = sbox(B1, S1)
SB2 = sbox(B2 ,S2)
SB3 = sbox(B3 ,S3)
SB4 = sbox(B4 ,S4)
SB5 = sbox(B5 ,S5)
SB6 = sbox(B6 ,S6)
SB7 = sbox(B7 ,S7)
SB8 = sbox(B8 ,S8)
print " "
print "S-box results:"
print " S1: {0} S2: {1} S3: {2} S4: {3} S5: {4} S6: {5} S7: {6} S8: {7} ".format(SB1, SB2, SB3, SB4, SB5, SB6, SB7, SB8)
P = apply_permutation( (SB1 + SB2 + SB3 + SB4 + SB5 + SB6 + SB7 + SB8), P_table)
return P
##########################
# Main method #
##########################
# Notes:
# request user input, and use an external library to turn the message into binary
# need to supply example message and key
message = ""
key = ""
print " "
print "message : " + message
print "key : " + key
print "-" * 86
# Apply permutation to K+
K_plus = apply_permutation(key, first_PCtable_String)
print " K+ : " + K_plus
C0 = K_plus[ 0 : (len(K_plus)/2) ] # first half of K_plus
D0 = K_plus[ (len(K_plus)/2) : len(K_plus) ] # second half of K_plus
print " C0 : " + C0
print " D0 : " + " "*len(C0) + D0
print "-" * 86
# Shift C1 and D1, combine them, and permute them to get K1
C1 = left_shift(C0)
D1 = left_shift(D0)
C1D1 = str(C1 + D1)
K1 = apply_permutation(C1D1, second_PCtable_String)
print " C1 : " + C1
print " D1 : " + " "*len(C1) + D1
print " C1D1 : " + C1D1
print " K1 : " + K1
print "-" * 86
# Apply the initial permutation to the message
IP = apply_permutation(message, IPtableString)
L0 = IP[ 0 : (len(IP)/2) ] # first half of IP
R0 = IP[ (len(IP)/2) : len(IP) ] # second half of IP
print " IP : " + IP
print " L0 : " + L0
print " R0 : " + " "*len(L0) + R0
print "-" * 86
L1 = R0
f_result = f(R0, K1)
R1 = xor(L0, f_result)
print " "
print "Mangler function output: " + f_result
print "-" * 86
print " "
print "----- Result of R1 (one round of DES): " + R1
|
|
from typing import Callable, Text, Union, Optional, Dict, Any, List, Tuple
import os
import ujson
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from django.shortcuts import redirect, render
from django.conf import settings
from six.moves import map
from zerver.decorator import require_realm_admin, zulip_login_required
from zerver.forms import CreateUserForm
from zerver.lib.actions import do_change_avatar_fields, do_change_bot_owner, \
do_change_is_admin, do_change_default_all_public_streams, \
do_change_default_events_register_stream, do_change_default_sending_stream, \
do_create_user, do_deactivate_user, do_reactivate_user, do_regenerate_api_key
from zerver.lib.avatar import avatar_url, get_gravatar_url, get_avatar_field
from zerver.lib.exceptions import JsonableError
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_error, json_success
from zerver.lib.streams import access_stream_by_name
from zerver.lib.upload import upload_avatar_image
from zerver.lib.validator import check_bool, check_string, check_int, check_url
from zerver.lib.users import check_valid_bot_type, check_change_full_name, \
check_full_name, check_short_name, check_valid_interface_type
from zerver.lib.utils import generate_random_token
from zerver.models import UserProfile, Stream, Message, email_allowed_for_realm, \
get_user_profile_by_id, get_user, Service, get_user_including_cross_realm
from zerver.lib.create_user import random_api_key
def deactivate_user_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user(email, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if target.is_bot:
return json_error(_('No such user'))
if check_last_admin(target):
return json_error(_('Cannot deactivate the only organization administrator'))
return _deactivate_user_profile_backend(request, user_profile, target)
def deactivate_user_own_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
if user_profile.is_realm_admin and check_last_admin(user_profile):
return json_error(_('Cannot deactivate the only organization administrator'))
do_deactivate_user(user_profile, acting_user=user_profile)
return json_success()
def check_last_admin(user_profile):
# type: (UserProfile) -> bool
admins = set(user_profile.realm.get_admin_users())
return user_profile.is_realm_admin and len(admins) == 1
def deactivate_bot_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user(email, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('No such bot'))
if not target.is_bot:
return json_error(_('No such bot'))
return _deactivate_user_profile_backend(request, user_profile, target)
def _deactivate_user_profile_backend(request, user_profile, target):
# type: (HttpRequest, UserProfile, UserProfile) -> HttpResponse
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
do_deactivate_user(target, acting_user=user_profile)
return json_success()
def reactivate_user_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user(email, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
do_reactivate_user(target, acting_user=user_profile)
return json_success()
@has_request_variables
def update_user_backend(request, user_profile, email,
full_name=REQ(default="", validator=check_string),
is_admin=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, Text, Optional[Text], Optional[bool]) -> HttpResponse
try:
target = get_user(email, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
if is_admin is not None:
if not is_admin and check_last_admin(user_profile):
return json_error(_('Cannot remove the only organization administrator'))
do_change_is_admin(target, is_admin)
if (full_name is not None and target.full_name != full_name and
full_name.strip() != ""):
# We don't respect `name_changes_disabled` here because the request
# is on behalf of the administrator.
check_change_full_name(target, full_name, user_profile)
return json_success()
# TODO: Since eventually we want to support using the same email with
# different organizations, we'll eventually want this to be a
# logged-in endpoint so that we can access the realm_id.
@zulip_login_required
def avatar(request, email_or_id, medium=False):
# type: (HttpRequest, str, bool) -> HttpResponse
"""Accepts an email address or user ID and returns the avatar"""
is_email = False
try:
int(email_or_id)
except ValueError:
is_email = True
try:
if is_email:
realm = request.user.realm
user_profile = get_user_including_cross_realm(email_or_id, realm)
else:
user_profile = get_user_profile_by_id(email_or_id)
# If there is a valid user account passed in, use its avatar
url = avatar_url(user_profile, medium=medium)
except UserProfile.DoesNotExist:
# If there is no such user, treat it as a new gravatar
email = email_or_id
avatar_version = 1
url = get_gravatar_url(email, avatar_version, medium)
# We can rely on the url already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_avatar_url does '?x=x'
# hacks to prevent us from having to jump through decode/encode hoops.
assert '?' in url
url += '&' + request.META['QUERY_STRING']
return redirect(url)
def get_stream_name(stream):
# type: (Optional[Stream]) -> Optional[Text]
if stream:
return stream.name
return None
@has_request_variables
def patch_bot_backend(request, user_profile, email,
full_name=REQ(default=None),
bot_owner=REQ(default=None),
default_sending_stream=REQ(default=None),
default_events_register_stream=REQ(default=None),
default_all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, Text, Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[bool]) -> HttpResponse
try:
bot = get_user(email, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(bot):
return json_error(_('Insufficient permission'))
if full_name is not None:
check_change_full_name(bot, full_name, user_profile)
if bot_owner is not None:
owner = get_user(bot_owner, user_profile.realm)
do_change_bot_owner(bot, owner, user_profile)
if default_sending_stream is not None:
if default_sending_stream == "":
stream = None # type: Optional[Stream]
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_sending_stream)
do_change_default_sending_stream(bot, stream)
if default_events_register_stream is not None:
if default_events_register_stream == "":
stream = None
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_events_register_stream)
do_change_default_events_register_stream(bot, stream)
if default_all_public_streams is not None:
do_change_default_all_public_streams(bot, default_all_public_streams)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_fields(bot, avatar_source)
else:
return json_error(_("You may only upload one file at a time"))
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
# Don't include the bot owner in case it is not set.
# Default bots have no owner.
if bot.bot_owner is not None:
json_result['bot_owner'] = bot.bot_owner.email
return json_success(json_result)
@has_request_variables
def regenerate_bot_api_key(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
bot = get_user(email, user_profile.realm)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(bot):
return json_error(_('Insufficient permission'))
do_regenerate_api_key(bot, user_profile)
json_result = dict(
api_key = bot.api_key
)
return json_success(json_result)
# Adds an outgoing webhook or embedded bot service.
def add_service(name, user_profile, base_url=None, interface=None, token=None):
# type: (Text, UserProfile, Text, int, Text) -> None
Service.objects.create(name=name,
user_profile=user_profile,
base_url=base_url,
interface=interface,
token=token)
@has_request_variables
def add_bot_backend(request, user_profile, full_name_raw=REQ("full_name"), short_name_raw=REQ("short_name"),
bot_type=REQ(validator=check_int, default=UserProfile.DEFAULT_BOT),
payload_url=REQ(validator=check_url, default=""),
service_name=REQ(default=None),
interface_type=REQ(validator=check_int, default=Service.GENERIC),
default_sending_stream_name=REQ('default_sending_stream', default=None),
default_events_register_stream_name=REQ('default_events_register_stream', default=None),
default_all_public_streams=REQ(validator=check_bool, default=None)):
# type: (HttpRequest, UserProfile, Text, Text, int, Optional[Text], Optional[Text], int, Optional[Text], Optional[Text], Optional[bool]) -> HttpResponse
short_name = check_short_name(short_name_raw)
service_name = service_name or short_name
short_name += "-bot"
full_name = check_full_name(full_name_raw)
email = '%s@%s' % (short_name, user_profile.realm.get_bot_domain())
form = CreateUserForm({'full_name': full_name, 'email': email})
if bot_type == UserProfile.EMBEDDED_BOT:
if not settings.EMBEDDED_BOTS_ENABLED:
return json_error(_("Embedded bots are not enabled."))
if service_name not in [bot.name for bot in EMBEDDED_BOTS]:
return json_error(_("Invalid embedded bot name."))
if not form.is_valid():
# We validate client-side as well
return json_error(_('Bad name or username'))
try:
get_user(email, user_profile.realm)
return json_error(_("Username already in use"))
except UserProfile.DoesNotExist:
pass
check_valid_bot_type(bot_type)
check_valid_interface_type(interface_type)
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
return json_error(_("You may only upload one file at a time"))
else:
avatar_source = UserProfile.AVATAR_FROM_USER
default_sending_stream = None
if default_sending_stream_name is not None:
(default_sending_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_sending_stream_name)
default_events_register_stream = None
if default_events_register_stream_name is not None:
(default_events_register_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_events_register_stream_name)
bot_profile = do_create_user(email=email, password='',
realm=user_profile.realm, full_name=full_name,
short_name=short_name,
bot_type=bot_type,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
if len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot_profile)
if bot_type in (UserProfile.OUTGOING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
add_service(name=service_name,
user_profile=bot_profile,
base_url=payload_url,
interface=interface_type,
token=random_api_key())
json_result = dict(
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(json_result)
def get_bots_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
bot_profiles = bot_profiles.select_related('default_sending_stream', 'default_events_register_stream')
bot_profiles = bot_profiles.order_by('date_joined')
def bot_info(bot_profile):
# type: (UserProfile) -> Dict[str, Any]
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success({'bots': list(map(bot_info, bot_profiles))})
@has_request_variables
def get_members_backend(request, user_profile,
client_gravatar=REQ(validator=check_bool, default=False)):
# type: (HttpRequest, UserProfile, bool) -> HttpResponse
'''
The client_gravatar field here is set to True if clients can compute
their own gravatars, which saves us bandwidth. We want to eventually
make this the default behavior, but we have old clients that expect
the server to compute this for us.
'''
realm = user_profile.realm
admin_ids = set(u.id for u in user_profile.realm.get_admin_users())
query = UserProfile.objects.filter(
realm_id=realm.id
).values(
'id',
'email',
'realm_id',
'full_name',
'is_bot',
'is_active',
'bot_type',
'avatar_source',
'avatar_version',
'bot_owner__email',
)
def get_member(row):
# type: (Dict[str, Any]) -> Dict[str, Any]
email = row['email']
user_id = row['id']
result = dict(
user_id=user_id,
email=email,
full_name=row['full_name'],
is_bot=row['is_bot'],
is_active=row['is_active'],
bot_type=row['bot_type'],
)
result['is_admin'] = user_id in admin_ids
result['avatar_url'] = get_avatar_field(
user_id=user_id,
email=email,
avatar_source=row['avatar_source'],
avatar_version=row['avatar_version'],
realm_id=row['realm_id'],
medium=False,
client_gravatar=client_gravatar,
)
if row['bot_owner__email']:
result['bot_owner'] = row['bot_owner__email']
return result
members = [get_member(row) for row in query]
return json_success({'members': members})
@require_realm_admin
@has_request_variables
def create_user_backend(request, user_profile, email=REQ(), password=REQ(),
full_name_raw=REQ("full_name"), short_name=REQ()):
# type: (HttpRequest, UserProfile, Text, Text, Text, Text) -> HttpResponse
full_name = check_full_name(full_name_raw)
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
return json_error(_('Bad name or username'))
# Check that the new user's email address belongs to the admin's realm
# (Since this is an admin API, we don't require the user to have been
# invited first.)
realm = user_profile.realm
if not email_allowed_for_realm(email, user_profile.realm):
return json_error(_("Email '%(email)s' not allowed for realm '%(realm)s'") %
{'email': email, 'realm': realm.string_id})
try:
get_user(email, user_profile.realm)
return json_error(_("Email '%s' already in use") % (email,))
except UserProfile.DoesNotExist:
pass
do_create_user(email, password, realm, full_name, short_name)
return json_success()
def generate_client_id():
# type: () -> str
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1,
user_id = user_profile.id,
full_name = user_profile.full_name,
email = user_profile.email,
is_bot = user_profile.is_bot,
is_admin = user_profile.is_realm_admin,
short_name = user_profile.short_name)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
def team_view(request):
# type: (HttpRequest) -> HttpResponse
with open(settings.CONTRIBUTORS_DATA) as f:
data = ujson.load(f)
return render(
request,
'zerver/team.html',
context=data,
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities to be used in Interactive Beam.
"""
import functools
import hashlib
import json
import logging
import pandas as pd
from apache_beam.dataframe.convert import to_pcollection
from apache_beam.dataframe.frame_base import DeferredBase
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive.caching.expression_cache import ExpressionCache
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.typehints.schemas import named_fields_from_element_type
_LOGGER = logging.getLogger(__name__)
def to_element_list(
reader, # type: Generator[Union[TestStreamPayload.Event, WindowedValueHolder]]
coder, # type: Coder
include_window_info, # type: bool
n=None, # type: int
include_time_events=False, # type: bool
):
# type: (...) -> List[WindowedValue]
"""Returns an iterator that properly decodes the elements from the reader.
"""
# Defining a generator like this makes it easier to limit the count of
# elements read. Otherwise, the count limit would need to be duplicated.
def elements():
for e in reader:
if isinstance(e, TestStreamPayload.Event):
if (e.HasField('watermark_event') or
e.HasField('processing_time_event')):
if include_time_events:
yield e
else:
for tv in e.element_event.elements:
decoded = coder.decode(tv.encoded_element)
yield (
decoded.windowed_value
if include_window_info else decoded.windowed_value.value)
elif isinstance(e, WindowedValueHolder):
yield (
e.windowed_value if include_window_info else e.windowed_value.value)
else:
yield e
# Because we can yield multiple elements from a single TestStreamFileRecord,
# we have to limit the count here to ensure that `n` is fulfilled.
count = 0
for e in elements():
if n and count >= n:
break
yield e
if not isinstance(e, TestStreamPayload.Event):
count += 1
def elements_to_df(elements, include_window_info=False, element_type=None):
# type: (List[WindowedValue], bool, Any) -> DataFrame
"""Parses the given elements into a Dataframe.
If the elements are a list of WindowedValues, then it will break out the
elements into their own DataFrame and return it. If include_window_info is
True, then it will concatenate the windowing information onto the elements
DataFrame.
"""
try:
columns_names = [
name for name, _ in named_fields_from_element_type(element_type)
]
except TypeError:
columns_names = None
rows = []
windowed_info = []
for e in elements:
rows.append(e.value)
if include_window_info:
windowed_info.append([e.timestamp.micros, e.windows, e.pane_info])
using_dataframes = isinstance(element_type, pd.DataFrame)
using_series = isinstance(element_type, pd.Series)
if using_dataframes or using_series:
rows_df = pd.concat(rows)
else:
rows_df = pd.DataFrame(rows, columns=columns_names)
if include_window_info and not using_series:
windowed_info_df = pd.DataFrame(
windowed_info, columns=['event_time', 'windows', 'pane_info'])
final_df = pd.concat([rows_df, windowed_info_df], axis=1)
else:
final_df = rows_df
return final_df
def register_ipython_log_handler():
# type: () -> None
"""Adds the IPython handler to a dummy parent logger (named
'apache_beam.runners.interactive') of all interactive modules' loggers so that
if is_in_notebook, logging displays the logs as HTML in frontends.
"""
# apache_beam.runners.interactive is not a module, thus this "root" logger is
# a dummy one created to hold the IPython log handler. When children loggers
# have propagate as True (by default) and logging level as NOTSET (by default,
# so the "root" logger's logging level takes effect), the IPython log handler
# will be triggered at the "root"'s own logging level. And if a child logger
# sets its logging level, it can take control back.
interactive_root_logger = logging.getLogger('apache_beam.runners.interactive')
if any([isinstance(h, IPythonLogHandler)
for h in interactive_root_logger.handlers]):
return
interactive_root_logger.setLevel(logging.INFO)
interactive_root_logger.addHandler(IPythonLogHandler())
# Disable the propagation so that logs emitted from interactive modules should
# only be handled by loggers and handlers defined within interactive packages.
interactive_root_logger.propagate = False
class IPythonLogHandler(logging.Handler):
"""A logging handler to display logs as HTML in IPython backed frontends."""
# TODO(BEAM-7923): Switch to Google hosted CDN once
# https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved.
log_template = """
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<div class="alert alert-{level}">{msg}</div>"""
logging_to_alert_level_map = {
logging.CRITICAL: 'danger',
logging.ERROR: 'danger',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'dark',
logging.NOTSET: 'light'
}
def emit(self, record):
try:
from html import escape
from IPython.core.display import HTML
from IPython.core.display import display
display(
HTML(
self.log_template.format(
level=self.logging_to_alert_level_map[record.levelno],
msg=escape(record.msg % record.args))))
except ImportError:
pass # NOOP when dependencies are not available.
def obfuscate(*inputs):
# type: (*Any) -> str
"""Obfuscates any inputs into a hexadecimal string."""
str_inputs = [str(input) for input in inputs]
merged_inputs = '_'.join(str_inputs)
return hashlib.md5(merged_inputs.encode('utf-8')).hexdigest()
class ProgressIndicator(object):
"""An indicator visualizing code execution in progress."""
# TODO(BEAM-7923): Switch to Google hosted CDN once
# https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved.
spinner_template = """
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<div id="{id}" class="spinner-border text-info" role="status">
</div>"""
spinner_removal_template = """
$("#{id}").remove();"""
def __init__(self, enter_text, exit_text):
# type: (str, str) -> None
self._id = 'progress_indicator_{}'.format(obfuscate(id(self)))
self._enter_text = enter_text
self._exit_text = exit_text
def __enter__(self):
try:
from IPython.core.display import HTML
from IPython.core.display import display
from apache_beam.runners.interactive import interactive_environment as ie
if ie.current_env().is_in_notebook:
display(HTML(self.spinner_template.format(id=self._id)))
else:
display(self._enter_text)
except ImportError as e:
_LOGGER.error(
'Please use interactive Beam features in an IPython'
'or notebook environment: %s' % e)
def __exit__(self, exc_type, exc_value, traceback):
try:
from IPython.core.display import Javascript
from IPython.core.display import display
from IPython.core.display import display_javascript
from apache_beam.runners.interactive import interactive_environment as ie
if ie.current_env().is_in_notebook:
script = self.spinner_removal_template.format(id=self._id)
display_javascript(
Javascript(
ie._JQUERY_WITH_DATATABLE_TEMPLATE.format(
customized_script=script)))
else:
display(self._exit_text)
except ImportError as e:
_LOGGER.error(
'Please use interactive Beam features in an IPython'
'or notebook environment: %s' % e)
def progress_indicated(func):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""A decorator using a unique progress indicator as a context manager to
execute the given function within."""
@functools.wraps(func)
def run_within_progress_indicator(*args, **kwargs):
with ProgressIndicator('Processing...', 'Done.'):
return func(*args, **kwargs)
return run_within_progress_indicator
def as_json(func):
# type: (Callable[..., Any]) -> Callable[..., str]
"""A decorator convert python objects returned by callables to json
string.
The decorated function should always return an object parsable by json.dumps.
If the object is not parsable, the str() of original object is returned
instead.
"""
def return_as_json(*args, **kwargs):
try:
return_value = func(*args, **kwargs)
return json.dumps(return_value)
except TypeError:
return str(return_value)
return return_as_json
def deferred_df_to_pcollection(df):
assert isinstance(df, DeferredBase), '{} is not a DeferredBase'.format(df)
# The proxy is used to output a DataFrame with the correct columns.
#
# TODO(BEAM-11064): Once type hints are implemented for pandas, use those
# instead of the proxy.
cache = ExpressionCache()
cache.replace_with_cached(df._expr)
proxy = df._expr.proxy()
return to_pcollection(df, yield_elements='pandas', label=str(df._expr)), proxy
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for PowerVMDriver.
"""
import contextlib
import os
import paramiko
from nova import context
from nova import db
from nova import test
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.network import model as network_model
from nova.openstack.common import processutils
from nova.tests import fake_network_cache_model
from nova.tests.image import fake
from nova.virt import images
from nova.virt.powervm import blockdev as powervm_blockdev
from nova.virt.powervm import common
from nova.virt.powervm import driver as powervm_driver
from nova.virt.powervm import exception
from nova.virt.powervm import lpar
from nova.virt.powervm import operator as powervm_operator
def fake_lpar(instance_name):
return lpar.LPAR(name=instance_name,
lpar_id=1, desired_mem=1024,
max_mem=2048, max_procs=2,
uptime=939395, state='Running')
def fake_ssh_connect(connection):
"""Returns a new paramiko.SSHClient object."""
return paramiko.SSHClient()
def raise_(ex):
"""Raises the given Exception."""
raise ex
class FakePowerVMOperator(powervm_operator.PowerVMOperator):
def get_lpar(self, instance_name, resource_type='lpar'):
return fake_lpar(instance_name)
def run_vios_command(self, cmd):
pass
class FakeIVMOperator(powervm_operator.IVMOperator):
def get_lpar(self, instance_name, resource_type='lpar'):
return fake_lpar(instance_name)
def list_lpar_instances(self):
return ['instance-00000001', 'instance-00000002']
def create_lpar(self, lpar):
pass
def start_lpar(self, instance_name):
pass
def stop_lpar(self, instance_name, time_out=30):
pass
def remove_lpar(self, instance_name):
pass
def get_vhost_by_instance_id(self, instance_id):
return 'vhostfake'
def get_virtual_eth_adapter_id(self):
return 1
def get_disk_name_by_vhost(self, vhost):
return 'lvfake01'
def remove_disk(self, disk_name):
pass
def run_cfg_dev(self, device_name):
pass
def attach_disk_to_vhost(self, disk, vhost):
pass
def get_memory_info(self):
return {'total_mem': 65536, 'avail_mem': 46336}
def get_cpu_info(self):
return {'total_procs': 8.0, 'avail_procs': 6.3}
def get_disk_info(self):
return {'disk_total': 10168,
'disk_used': 0,
'disk_avail': 10168}
def get_hostname(self):
return 'fake-powervm'
def rename_lpar(self, old, new):
pass
def _remove_file(self, file_path):
pass
def set_lpar_mac_base_value(self, instance_name, mac):
pass
def get_logical_vol_size(self, diskname):
pass
def macs_for_instance(self, instance):
return set(['FA:98:64:2B:29:39'])
def run_vios_command(self, cmd):
pass
class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter):
def __init__(self):
self.connection_data = common.Connection(host='fake_compute_1',
username='fake_user',
password='fake_pass')
pass
def _create_logical_volume(self, size):
return 'lvfake01'
def _remove_logical_volume(self, lv_name):
pass
def _copy_file_to_device(self, sourcePath, device, decrompress=True):
pass
def _copy_image_file(self, sourcePath, remotePath, decompress=False):
finalPath = '/tmp/rhel62.raw.7e358754160433febd6f3318b7c9e335'
size = 4294967296
return finalPath, size
def _copy_device_to_file(self, device_name, file_path):
pass
def _copy_image_file_from_host(self, remote_source_path, local_dest_dir,
compress=False):
snapshot_file = '/tmp/rhel62.raw.7e358754160433febd6f3318b7c9e335'
snap_ref = open(snapshot_file, 'w+')
snap_ref.close()
return snapshot_file
def fake_get_powervm_operator():
return FakeIVMOperator(common.Connection('fake_host', 'fake_user',
'fake_password'))
def create_instance(testcase):
fake.stub_out_image_service(testcase.stubs)
ctxt = context.get_admin_context()
instance_type = db.flavor_get(ctxt, 1)
sys_meta = flavors.save_flavor_info({}, instance_type)
return db.instance_create(ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1,
'memory_mb': 1024,
'vcpus': 2,
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'system_metadata': sys_meta})
class PowerVMDriverTestCase(test.TestCase):
"""Unit tests for PowerVM connection calls."""
fake_network_info = 'fake_network_info'
fake_create_lpar_instance_called = False
def fake_create_lpar_instance(self, instance, network_info,
host_stats=None):
"""Stub for the _create_lpar_instance method.
This stub assumes that 'instance' is the one created in the test case
setUp method and 'network_info' is equal to self.fake_network_info.
@return: fake LPAR based on instance parameter where the name of the
LPAR is the uuid of the instance
"""
self.fake_create_lpar_instance_called = True
self.assertEquals(self.instance, instance)
self.assertEquals(self.fake_network_info, network_info)
return self.powervm_connection._powervm._operator.get_lpar(
instance['uuid'])
def setUp(self):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(powervm_operator, 'get_powervm_operator',
fake_get_powervm_operator)
self.stubs.Set(powervm_operator, 'get_powervm_disk_adapter',
lambda: FakeBlockAdapter())
self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = create_instance(self)
def test_list_instances(self):
instances = self.powervm_connection.list_instances()
self.assertTrue('instance-00000001' in instances)
self.assertTrue('instance-00000002' in instances)
def test_instance_exists(self):
name = self.instance['name']
self.assertTrue(self.powervm_connection.instance_exists(name))
def test_spawn(self):
def fake_image_fetch(context, image_id, file_path,
user_id, project_id):
pass
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', fake_image_fetch)
image_meta = {}
image_meta['id'] = '666'
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.powervm_connection.spawn(context.get_admin_context(),
self.instance, image_meta, [], 's3cr3t',
fake_net_info)
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
def test_spawn_create_lpar_fail(self):
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', lambda *x, **y: None)
self.stubs.Set(
self.powervm_connection._powervm,
'get_host_stats',
lambda *x, **y: raise_(
(processutils.ProcessExecutionError('instance_name'))))
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.assertRaises(exception.PowerVMLPARCreationFailed,
self.powervm_connection.spawn,
context.get_admin_context(),
self.instance,
{'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
def test_spawn_cleanup_on_fail(self):
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', lambda *x, **y: None)
self.stubs.Set(
self.powervm_connection._powervm._disk_adapter,
'create_volume_from_image',
lambda *x, **y: raise_(exception.PowerVMImageCreationFailed()))
self.stubs.Set(
self.powervm_connection._powervm, '_cleanup',
lambda *x, **y: raise_(Exception('This should be logged.')))
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.assertRaises(exception.PowerVMImageCreationFailed,
self.powervm_connection.spawn,
context.get_admin_context(),
self.instance,
{'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
def test_snapshot(self):
def update_task_state(task_state, expected_state=None):
self._loc_task_state = task_state
self._loc_expected_task_state = expected_state
loc_context = context.get_admin_context()
arch = 'fake_arch'
properties = {'instance_id': self.instance['id'],
'user_id': str(loc_context.user_id),
'architecture': arch}
snapshot_name = 'fake_snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
image_service = fake.FakeImageService()
recv_meta = image_service.create(loc_context, sent_meta)
self.powervm_connection.snapshot(loc_context,
self.instance, recv_meta['id'],
update_task_state)
self.assertTrue(self._loc_task_state == task_states.IMAGE_UPLOADING and
self._loc_expected_task_state == task_states.IMAGE_PENDING_UPLOAD)
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['properties']['architecture'], arch)
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def _set_get_info_stub(self, state):
def fake_get_instance(instance_name):
return {'state': state,
'max_mem': 512,
'desired_mem': 256,
'max_procs': 2,
'uptime': 2000}
self.stubs.Set(self.powervm_connection._powervm, '_get_instance',
fake_get_instance)
def test_get_info_state_nostate(self):
self._set_get_info_stub('')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.NOSTATE)
def test_get_info_state_running(self):
self._set_get_info_stub('Running')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.RUNNING)
def test_get_info_state_starting(self):
self._set_get_info_stub('Starting')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.RUNNING)
def test_get_info_state_shutdown(self):
self._set_get_info_stub('Not Activated')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.SHUTDOWN)
def test_get_info_state_shutting_down(self):
self._set_get_info_stub('Shutting Down')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.SHUTDOWN)
def test_get_info_state_error(self):
self._set_get_info_stub('Error')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.CRASHED)
def test_get_info_state_not_available(self):
self._set_get_info_stub('Not Available')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.CRASHED)
def test_get_info_state_open_firmware(self):
self._set_get_info_stub('Open Firmware')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.CRASHED)
def test_get_info_state_unmapped(self):
self._set_get_info_stub('The Universe')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.NOSTATE)
def test_destroy(self):
self.powervm_connection.destroy(self.instance, None)
self.stubs.Set(FakeIVMOperator, 'get_lpar', lambda x, y: None)
name = self.instance['name']
self.assertFalse(self.powervm_connection.instance_exists(name))
def test_get_info(self):
info = self.powervm_connection.get_info(self.instance)
self.assertEqual(info['state'], power_state.RUNNING)
self.assertEqual(info['max_mem'], 2048)
self.assertEqual(info['mem'], 1024)
self.assertEqual(info['num_cpu'], 2)
self.assertEqual(info['cpu_time'], 939395)
def test_remote_utility_1(self):
path_one = '/some/file/'
path_two = '/path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_2(self):
path_one = '/some/file/'
path_two = 'path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_3(self):
path_one = '/some/file'
path_two = '/path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_4(self):
path_one = '/some/file'
path_two = 'path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def _test_finish_revert_migration_after_crash(self, backup_made,
new_made,
power_on):
inst = {'name': 'foo'}
network_info = []
network_info.append({'address': 'fa:89:f0:8b:9b:39'})
self.mox.StubOutWithMock(self.powervm_connection, 'instance_exists')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'destroy')
self.mox.StubOutWithMock(self.powervm_connection._powervm._operator,
'rename_lpar')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_on')
self.mox.StubOutWithMock(self.powervm_connection._powervm._operator,
'set_lpar_mac_base_value')
self.powervm_connection.instance_exists('rsz_foo').AndReturn(
backup_made)
if backup_made:
self.powervm_connection._powervm._operator.set_lpar_mac_base_value(
'rsz_foo', 'fa:89:f0:8b:9b:39')
self.powervm_connection.instance_exists('foo').AndReturn(new_made)
if new_made:
self.powervm_connection._powervm.destroy('foo')
self.powervm_connection._powervm._operator.rename_lpar('rsz_foo',
'foo')
if power_on:
self.powervm_connection._powervm.power_on('foo')
self.mox.ReplayAll()
self.powervm_connection.finish_revert_migration(inst, network_info,
block_device_info=None,
power_on=power_on)
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False, True)
def test_finish_revert_migration_after_crash_before_backup(self):
# NOTE(mriedem): tests the power_on=False case also
self._test_finish_revert_migration_after_crash(False, False, False)
def test_migrate_volume_use_instance_name(self):
inst_name = 'instance-00000000'
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_1'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'_copy_device_to_file', fake_noop)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path, inst_name)
expected_path = 'some/image/path/instance-00000000_rsz.gz'
self.assertEqual(file_path, expected_path)
def test_migrate_volume_use_lv_name(self):
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_1'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'_copy_device_to_file', fake_noop)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path)
expected_path = 'some/image/path/logical-vol-name_rsz.gz'
self.assertEqual(file_path, expected_path)
def _test_deploy_from_migrated_file(self, power_on):
instance = self.instance
context = 'fake_context'
network_info = []
network_info.append({'address': 'fa:89:f0:8b:9b:39'})
dest = '10.8.46.20'
disk_info = {}
disk_info['root_disk_file'] = 'some/file/path.gz'
disk_info['old_lv_size'] = 30
self.flags(powervm_mgr=dest)
fake_op = self.powervm_connection._powervm
self.deploy_from_vios_file_called = False
self.power_on = power_on
def fake_deploy_from_vios_file(lpar, file_path, size,
decompress, power_on):
exp_file_path = 'some/file/path.gz'
exp_size = 40 * 1024 ** 3
exp_decompress = True
self.deploy_from_vios_file_called = True
self.assertEqual(exp_file_path, file_path)
self.assertEqual(exp_size, size)
self.assertEqual(exp_decompress, decompress)
self.assertEqual(self.power_on, power_on)
self.stubs.Set(fake_op, '_deploy_from_vios_file',
fake_deploy_from_vios_file)
self.powervm_connection.finish_migration(context, None,
instance, disk_info, network_info,
None, resize_instance=True,
block_device_info=None,
power_on=power_on)
self.assertEqual(self.deploy_from_vios_file_called, True)
def test_deploy_from_migrated_file_power_on(self):
self._test_deploy_from_migrated_file(True)
def test_deploy_from_migrated_file_power_off(self):
self._test_deploy_from_migrated_file(False)
def test_set_lpar_mac_base_value(self):
instance = self.instance
context = 'fake_context'
dest = '10.8.46.20' # Some fake dest IP
instance_type = 'fake_instance_type'
network_info = []
network_info.append({'address': 'fa:89:f0:8b:9b:39'})
block_device_info = None
self.flags(powervm_mgr=dest)
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_connection._powervm._operator
self.stubs.Set(fake_op, 'get_vhost_by_instance_id', fake_noop)
self.stubs.Set(fake_op, 'get_disk_name_by_vhost', fake_noop)
self.stubs.Set(self.powervm_connection._powervm, 'power_off',
fake_noop)
self.stubs.Set(fake_op, 'get_logical_vol_size',
lambda *args, **kwargs: '20')
self.stubs.Set(self.powervm_connection, '_get_resize_name', fake_noop)
self.stubs.Set(fake_op, 'rename_lpar', fake_noop)
def fake_migrate_disk(*args, **kwargs):
disk_info = {}
disk_info['fake_dict'] = 'some/file/path.gz'
return disk_info
def fake_set_lpar_mac_base_value(inst_name, mac, *args, **kwargs):
# get expected mac address from FakeIVM set
fake_ivm = FakeIVMOperator(None)
exp_mac = fake_ivm.macs_for_instance(inst_name).pop()
self.assertEqual(exp_mac, mac)
self.stubs.Set(self.powervm_connection._powervm, 'migrate_disk',
fake_migrate_disk)
self.stubs.Set(fake_op, 'set_lpar_mac_base_value',
fake_set_lpar_mac_base_value)
disk_info = self.powervm_connection.migrate_disk_and_power_off(
context, instance,
dest, instance_type, network_info, block_device_info)
def test_migrate_build_scp_command(self):
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_2'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
@contextlib.contextmanager
def fake_vios_to_vios_auth(*args, **kwargs):
key_name = 'some_key'
yield key_name
self.stubs.Set(common, 'vios_to_vios_auth',
fake_vios_to_vios_auth)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
def fake_run_vios_command(*args, **kwargs):
cmd = args[0]
exp_cmd = ' '.join(['scp -o "StrictHostKeyChecking no" -i',
'some_key',
'some/image/path/logical-vol-name_rsz.gz',
'fake_user@compute_host_2:some/image/path'])
self.assertEqual(exp_cmd, cmd)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command',
fake_run_vios_command)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path)
def test_get_resize_name(self):
inst_name = 'instance-00000001'
expected_name = 'rsz_instance-00000001'
result = self.powervm_connection._get_resize_name(inst_name)
self.assertEqual(expected_name, result)
def test_get_long_resize_name(self):
inst_name = 'some_really_long_instance_name_00000001'
expected_name = 'rsz__really_long_instance_name_00000001'
result = self.powervm_connection._get_resize_name(inst_name)
self.assertEqual(expected_name, result)
def test_finish_migration_raises_exception(self):
# Tests that the finish_migration method will raise an exception
# if the 'root_disk_file' key is not found in the disk_info parameter.
self.stubs.Set(self.powervm_connection._powervm,
'_create_lpar_instance', self.fake_create_lpar_instance)
self.assertRaises(exception.PowerVMUnrecognizedRootDevice,
self.powervm_connection.finish_migration,
context.get_admin_context(), None,
self.instance, {'old_lv_size': '20'},
self.fake_network_info, None, True)
self.assertTrue(self.fake_create_lpar_instance_called)
def test_finish_migration_successful(self):
# Tests a successful migration (resize) flow and asserts various
# methods called along the way with expected argument values.
fake_file_path = 'some/file/path.py'
disk_info = {'root_disk_file': fake_file_path,
'old_lv_size': '10'}
fake_flavor = {'root_gb': 20}
fake_extract_flavor = lambda *args, **kwargs: fake_flavor
self.fake_deploy_from_migrated_file_called = False
def fake_deploy_from_migrated_file(lpar, file_path, size,
power_on=True):
self.fake_deploy_from_migrated_file_called = True
# assert the lpar is the one created for this test
self.assertEquals(self.instance['uuid'], lpar['name'])
self.assertEquals(fake_file_path, file_path)
# this tests that the 20GB fake_flavor was used
self.assertEqual(fake_flavor['root_gb'] * pow(1024, 3), size)
self.assertTrue(power_on)
self.stubs.Set(self.powervm_connection._powervm,
'_create_lpar_instance',
self.fake_create_lpar_instance)
self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor)
self.stubs.Set(self.powervm_connection._powervm,
'deploy_from_migrated_file',
fake_deploy_from_migrated_file)
self.powervm_connection.finish_migration(context.get_admin_context(),
None, self.instance,
disk_info,
self.fake_network_info,
None, True)
self.assertTrue(self.fake_create_lpar_instance_called)
self.assertTrue(self.fake_deploy_from_migrated_file_called)
def test_check_host_resources_insufficient_memory(self):
# Tests that the _check_host_resources method will raise an exception
# when the host has insufficient memory for the request.
host_stats = {'host_memory_free': 512,
'vcpus': 12,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientFreeMemory,
self.powervm_connection._powervm._check_host_resources,
self.instance, vcpus=2, mem=4096, host_stats=host_stats)
def test_check_host_resources_insufficient_vcpus(self):
# Tests that the _check_host_resources method will raise an exception
# when the host has insufficient CPU for the request.
host_stats = {'host_memory_free': 4096,
'vcpus': 2,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientCPU,
self.powervm_connection._powervm._check_host_resources,
self.instance, vcpus=12, mem=512, host_stats=host_stats)
def test_create_lpar_instance_raise_insufficient_memory(self):
# This test will raise an exception because we use the instance
# created for this test case which requires 1024 MB of memory
# but the host only has 512 free.
host_stats = {'host_memory_free': 512,
'vcpus': 12,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientFreeMemory,
self.powervm_connection._powervm._create_lpar_instance,
self.instance, self.fake_network_info, host_stats)
def test_create_lpar_instance_raise_insufficient_vcpus(self):
# This test will raise an exception because we use the instance
# created for this test case which requires 2 CPUs but the host only
# has 1 CPU free.
host_stats = {'host_memory_free': 4096,
'vcpus': 1,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientCPU,
self.powervm_connection._powervm._create_lpar_instance,
self.instance, self.fake_network_info, host_stats)
def test_confirm_migration_old_instance_destroyed(self):
# Tests that the source instance is destroyed when a migration
# is confirmed.
resize_name = 'rsz_instance'
self.fake_destroy_called = False
def fake_get_resize_name(instance_name):
self.assertEquals(self.instance['name'], instance_name)
return resize_name
def fake_destroy(instance_name, destroy_disks=True):
self.fake_destroy_called = True
self.assertEquals(resize_name, instance_name)
self.assertTrue(destroy_disks)
self.stubs.Set(self.powervm_connection, '_get_resize_name',
fake_get_resize_name)
self.stubs.Set(self.powervm_connection._powervm, 'destroy',
fake_destroy)
self.powervm_connection.confirm_migration(True, self.instance,
self.fake_network_info)
self.assertTrue(self.fake_destroy_called)
def test_get_host_stats(self):
host_stats = self.powervm_connection.get_host_stats(True)
self.assertIsNotNone(host_stats)
self.assertEquals(host_stats['vcpus'], 8.0)
self.assertEquals(round(host_stats['vcpus_used'], 1), 1.7)
self.assertEquals(host_stats['host_memory_total'], 65536)
self.assertEquals(host_stats['host_memory_free'], 46336)
self.assertEquals(host_stats['disk_total'], 10168)
self.assertEquals(host_stats['disk_used'], 0)
self.assertEquals(host_stats['disk_available'], 10168)
self.assertEquals(host_stats['disk_total'],
host_stats['disk_used'] +
host_stats['disk_available'])
self.assertEquals(host_stats['cpu_info'], ('ppc64', 'powervm', '3940'))
self.assertEquals(host_stats['hypervisor_type'], 'powervm')
self.assertEquals(host_stats['hypervisor_version'], '7.1')
self.assertEquals(host_stats['hypervisor_hostname'], "fake-powervm")
self.assertEquals(host_stats['supported_instances'][0][0], "ppc64")
self.assertEquals(host_stats['supported_instances'][0][1], "powervm")
self.assertEquals(host_stats['supported_instances'][0][2], "hvm")
def test_get_host_uptime(self):
# Tests that the get_host_uptime method issues the proper sysstat
# command and parses the output correctly.
exp_cmd = "ioscli sysstat -short fake_user"
output = [("02:54PM up 24 days, 5:41, 1 user, "
"load average: 0.06, 0.03, 0.02")]
fake_op = self.powervm_connection._powervm
self.mox.StubOutWithMock(fake_op._operator, 'run_vios_command')
fake_op._operator.run_vios_command(exp_cmd).AndReturn(output)
self.mox.ReplayAll()
# the host parameter isn't used so we just pass None
uptime = self.powervm_connection.get_host_uptime(None)
self.assertEquals(output[0], uptime)
class PowerVMDriverLparTestCase(test.TestCase):
"""Unit tests for PowerVM connection calls."""
def setUp(self):
super(PowerVMDriverLparTestCase, self).setUp()
self.stubs.Set(powervm_operator.PowerVMOperator, '_update_host_stats',
lambda self: None)
self.powervm_connection = powervm_driver.PowerVMDriver(None)
def test_set_lpar_mac_base_value_command(self):
inst_name = 'some_instance'
mac = 'FA:98:64:2B:29:39'
exp_mac_str = mac[:-2].replace(':', '')
exp_cmd = ('chsyscfg -r lpar -i "name=%(inst_name)s, '
'virtual_eth_mac_base_value=%(exp_mac_str)s"') % locals()
fake_op = self.powervm_connection._powervm
self.mox.StubOutWithMock(fake_op._operator, 'run_vios_command')
fake_op._operator.run_vios_command(exp_cmd)
self.mox.ReplayAll()
fake_op._operator.set_lpar_mac_base_value(inst_name, mac)
class PowerVMDriverCommonTestCase(test.TestCase):
"""Unit tests for the nova.virt.powervm.common module."""
def setUp(self):
super(PowerVMDriverCommonTestCase, self).setUp()
# our fake connection information never changes since we can't
# actually connect to anything for these tests
self.connection = common.Connection('fake_host', 'user', 'password')
def test_check_connection_ssh_is_none(self):
"""
Passes a null ssh object to the check_connection method.
The method should create a new ssh connection using the
Connection object and return it.
"""
self.stubs.Set(common, 'ssh_connect', fake_ssh_connect)
ssh = common.check_connection(None, self.connection)
self.assertIsNotNone(ssh)
def test_check_connection_transport_is_dead(self):
"""
Passes an ssh object to the check_connection method which
does not have a transport set.
The method should create a new ssh connection using the
Connection object and return it.
"""
self.stubs.Set(common, 'ssh_connect', fake_ssh_connect)
ssh1 = fake_ssh_connect(self.connection)
ssh2 = common.check_connection(ssh1, self.connection)
self.assertIsNotNone(ssh2)
self.assertNotEqual(ssh1, ssh2)
def test_check_connection_raise_ssh_exception(self):
"""
Passes an ssh object to the check_connection method which
does not have a transport set.
The method should raise an SSHException.
"""
self.stubs.Set(common, 'ssh_connect',
lambda *x, **y: raise_(paramiko.SSHException(
'Error connecting to host.')))
ssh = fake_ssh_connect(self.connection)
self.assertRaises(paramiko.SSHException,
common.check_connection,
ssh, self.connection)
def fake_copy_image_file(source_path, remote_path):
return '/tmp/fake_file', 1
class PowerVMLocalVolumeAdapterTestCase(test.TestCase):
"""
Unit tests for nova.virt.powervm.blockdev.PowerVMLocalVolumeAdapter.
"""
def setUp(self):
super(PowerVMLocalVolumeAdapterTestCase, self).setUp()
self.context = context.get_admin_context()
self.connection = common.Connection(host='fake_compute_1',
username='fake_user',
password='fake_pass')
self.powervm_adapter = powervm_blockdev.PowerVMLocalVolumeAdapter(
self.connection)
self.instance = create_instance(self)
self.image_id = self.instance['image_ref']
def test_create_volume_from_image_fails_no_disk_name(self):
"""
Tests that delete_volume is not called after create_logical_volume
fails.
"""
def fake_create_logical_volume(size):
raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
def fake_delete_volume(volume_info):
self.fail("Should not be called to do cleanup.")
self.stubs.Set(self.powervm_adapter, '_copy_image_file',
fake_copy_image_file)
self.stubs.Set(self.powervm_adapter, '_create_logical_volume',
fake_create_logical_volume)
self.stubs.Set(self.powervm_adapter, 'delete_volume',
fake_delete_volume)
self.assertRaises(exception.PowerVMNoSpaceLeftOnVolumeGroup,
self.powervm_adapter.create_volume_from_image,
self.context, self.instance, self.image_id)
def test_create_volume_from_image_fails_with_disk_name(self):
"""
Tests that delete_volume is called to cleanup the volume after
create_logical_volume was successful but copy_file_to_device fails.
"""
disk_name = 'lvm_disk_name'
def fake_create_logical_volume(size):
return disk_name
def fake_copy_file_to_device(source_path, device):
raise exception.PowerVMConnectionFailed()
self.delete_volume_called = False
def fake_delete_volume(volume_info):
self.assertEquals(disk_name, volume_info)
self.delete_volume_called = True
self.stubs.Set(self.powervm_adapter, '_copy_image_file',
fake_copy_image_file)
self.stubs.Set(self.powervm_adapter, '_create_logical_volume',
fake_create_logical_volume)
self.stubs.Set(self.powervm_adapter, '_copy_file_to_device',
fake_copy_file_to_device)
self.stubs.Set(self.powervm_adapter, 'delete_volume',
fake_delete_volume)
self.assertRaises(exception.PowerVMConnectionFailed,
self.powervm_adapter.create_volume_from_image,
self.context, self.instance, self.image_id)
self.assertTrue(self.delete_volume_called)
def test_copy_image_file_ftp_failed(self):
file_path = os.tempnam('/tmp', 'image')
remote_path = '/mnt/openstack/images'
exp_remote_path = os.path.join(remote_path,
os.path.basename(file_path))
exp_cmd = ' '.join(['/usr/bin/rm -f', exp_remote_path])
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_adapter
self.stubs.Set(fake_op, 'run_vios_command', fake_noop)
self.stubs.Set(fake_op, '_checksum_local_file', fake_noop)
self.mox.StubOutWithMock(common, 'ftp_put_command')
self.mox.StubOutWithMock(self.powervm_adapter,
'run_vios_command_as_root')
msg_args = {'ftp_cmd': 'PUT',
'source_path': file_path,
'dest_path': remote_path}
exp_exception = exception.PowerVMFTPTransferFailed(**msg_args)
common.ftp_put_command(self.connection, file_path,
remote_path).AndRaise(exp_exception)
self.powervm_adapter.run_vios_command_as_root(exp_cmd).AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.PowerVMFTPTransferFailed,
self.powervm_adapter._copy_image_file,
file_path, remote_path)
def test_copy_image_file_wrong_checksum(self):
file_path = os.tempnam('/tmp', 'image')
remote_path = '/mnt/openstack/images'
exp_remote_path = os.path.join(remote_path,
os.path.basename(file_path))
exp_cmd = ' '.join(['/usr/bin/rm -f', exp_remote_path])
def fake_md5sum_remote_file(remote_path):
return '3202937169'
def fake_checksum_local_file(source_path):
return '3229026618'
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_adapter
self.stubs.Set(fake_op, 'run_vios_command', fake_noop)
self.stubs.Set(fake_op, '_md5sum_remote_file',
fake_md5sum_remote_file)
self.stubs.Set(fake_op, '_checksum_local_file',
fake_checksum_local_file)
self.stubs.Set(common, 'ftp_put_command', fake_noop)
self.mox.StubOutWithMock(self.powervm_adapter,
'run_vios_command_as_root')
self.powervm_adapter.run_vios_command_as_root(exp_cmd).AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.PowerVMFileTransferFailed,
self.powervm_adapter._copy_image_file,
file_path, remote_path)
def test_checksum_local_file(self):
file_path = os.tempnam('/tmp', 'image')
img_file = file(file_path, 'w')
img_file.write('This is a test')
img_file.close()
exp_md5sum = 'ce114e4501d2f4e2dcea3e17b546f339'
self.assertEqual(self.powervm_adapter._checksum_local_file(file_path),
exp_md5sum)
os.remove(file_path)
def test_copy_image_file_from_host_with_wrong_checksum(self):
local_path = 'some/tmp'
remote_path = os.tempnam('/mnt/openstack/images', 'image')
def fake_md5sum_remote_file(remote_path):
return '3202937169'
def fake_checksum_local_file(source_path):
return '3229026618'
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_adapter
self.stubs.Set(fake_op, 'run_vios_command_as_root', fake_noop)
self.stubs.Set(fake_op, '_md5sum_remote_file',
fake_md5sum_remote_file)
self.stubs.Set(fake_op, '_checksum_local_file',
fake_checksum_local_file)
self.stubs.Set(common, 'ftp_get_command', fake_noop)
self.assertRaises(exception.PowerVMFileTransferFailed,
self.powervm_adapter._copy_image_file_from_host,
remote_path, local_path)
|
|
"""
A minimal EWMH-aware OO layer over xpyb. This is NOT intended to be
complete - it only implements the subset of functionalty needed by qtile.
"""
from __future__ import print_function, division
import six
from xcffib.xproto import CW, WindowClass, EventMask
from xcffib.xfixes import SelectionEventMask
import xcffib
import xcffib.randr
import xcffib.xinerama
import xcffib.xproto
from . import xkeysyms
keysyms = xkeysyms.keysyms
# These should be in xpyb:
ModMasks = {
"shift": 1 << 0,
"lock": 1 << 1,
"control": 1 << 2,
"mod1": 1 << 3,
"mod2": 1 << 4,
"mod3": 1 << 5,
"mod4": 1 << 6,
"mod5": 1 << 7,
}
ModMapOrder = [
"shift",
"lock",
"control",
"mod1",
"mod2",
"mod3",
"mod4",
"mod5"
]
AllButtonsMask = 0b11111 << 8
ButtonMotionMask = 1 << 13
ButtonReleaseMask = 1 << 3
NormalHintsFlags = {
"USPosition": 1, # User-specified x, y
"USSize": 2, # User-specified width, height
"PPosition": 4, # Program-specified position
"PSize": 8, # Program-specified size
"PMinSize": 16, # Program-specified minimum size
"PMaxSize": 32, # Program-specified maximum size
"PResizeInc": 64, # Program-specified resize increments
"PAspect": 128, # Program-specified min and max aspect ratios
"PBaseSize": 256, # Program-specified base size
"PWinGravity": 512, # Program-specified window gravity
}
HintsFlags = {
"InputHint": 1, # input
"StateHint": 2, # initial_state
"IconPixmapHint": 4, # icon_pixmap
"IconWindowHint": 8, # icon_window
"IconPositionHint": 16, # icon_x & icon_y
"IconMaskHint": 32, # icon_mask
"WindowGroupHint": 64, # window_group
"MessageHint": 128, # (this bit is obsolete)
"UrgencyHint": 256, # urgency
}
WindowTypes = {
'_NET_WM_WINDOW_TYPE_DESKTOP': "desktop",
'_NET_WM_WINDOW_TYPE_DOCK': "dock",
'_NET_WM_WINDOW_TYPE_TOOLBAR': "toolbar",
'_NET_WM_WINDOW_TYPE_MENU': "menu",
'_NET_WM_WINDOW_TYPE_UTILITY': "utility",
'_NET_WM_WINDOW_TYPE_SPLASH': "splash",
'_NET_WM_WINDOW_TYPE_DIALOG': "dialog",
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU': "dropdown",
'_NET_WM_WINDOW_TYPE_POPUP_MENU': "menu",
'_NET_WM_WINDOW_TYPE_TOOLTIP': "tooltip",
'_NET_WM_WINDOW_TYPE_NOTIFICATION': "notification",
'_NET_WM_WINDOW_TYPE_COMBO': "combo",
'_NET_WM_WINDOW_TYPE_DND': "dnd",
'_NET_WM_WINDOW_TYPE_NORMAL': "normal",
}
WindowStates = {
None: 'normal',
'_NET_WM_STATE_FULLSCREEN': 'fullscreen',
}
# Maps property names to types and formats.
PropertyMap = {
# ewmh properties
"_NET_DESKTOP_GEOMETRY": ("CARDINAL", 32),
"_NET_SUPPORTED": ("ATOM", 32),
"_NET_SUPPORTING_WM_CHECK": ("WINDOW", 32),
"_NET_WM_NAME": ("UTF8_STRING", 8),
"_NET_WM_PID": ("CARDINAL", 32),
"_NET_CLIENT_LIST": ("WINDOW", 32),
"_NET_CLIENT_LIST_STACKING": ("WINDOW", 32),
"_NET_NUMBER_OF_DESKTOPS": ("CARDINAL", 32),
"_NET_CURRENT_DESKTOP": ("CARDINAL", 32),
"_NET_DESKTOP_NAMES": ("UTF8_STRING", 8),
"_NET_WORKAREA": ("CARDINAL", 32),
"_NET_ACTIVE_WINDOW": ("WINDOW", 32),
"_NET_WM_DESKTOP": ("CARDINAL", 32),
"_NET_WM_STRUT": ("CARDINAL", 32),
"_NET_WM_STRUT_PARTIAL": ("CARDINAL", 32),
"_NET_WM_WINDOW_OPACITY": ("CARDINAL", 32),
"_NET_WM_WINDOW_TYPE": ("CARDINAL", 32),
# Net State
"_NET_WM_STATE": ("ATOM", 32),
"_NET_WM_STATE_STICKY": ("ATOM", 32),
"_NET_WM_STATE_SKIP_TASKBAR": ("ATOM", 32),
"_NET_WM_STATE_FULLSCREEN": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_HORZ": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_VERT": ("ATOM", 32),
"_NET_WM_STATE_ABOVE": ("ATOM", 32),
"_NET_WM_STATE_BELOW": ("ATOM", 32),
"_NET_WM_STATE_MODAL": ("ATOM", 32),
"_NET_WM_STATE_HIDDEN": ("ATOM", 32),
"_NET_WM_STATE_DEMANDS_ATTENTION": ("ATOM", 32),
# ICCCM
"WM_STATE": ("WM_STATE", 32),
# Qtile-specific properties
"QTILE_INTERNAL": ("CARDINAL", 32)
}
# TODO add everything required here
# http://standards.freedesktop.org/wm-spec/1.4/ar01s03.html
SUPPORTED_ATOMS = [
'_NET_WM_PID',
'_NET_ACTIVE_WINDOW',
'_NET_WM_DESKTOP',
'_NET_CURRENT_DESKTOP',
'_NET_CLIENT_LIST',
'_NET_CLIENT_LIST_STACKING',
'_NET_SUPPORTED',
'_NET_WM_STATE',
'_NET_WM_STATE_FULLSCREEN',
'_NET_SUPPORTING_WM_CHECK',
'_NET_WM_NAME',
'_NET_WM_STRUT',
'_NET_WM_STRUT_PARTIAL',
'_NET_WM_WINDOW_TYPE',
'WM_WINDOW_ROLE',
'WM_TAKE_FOCUS',
'WM_PROTOCOLS',
'WM_DELETE_WINDOW',
'UTF8_STRING',
]
SUPPORTED_ATOMS += WindowTypes.keys()
XCB_CONN_ERRORS = {
1: 'XCB_CONN_ERROR',
2: 'XCB_CONN_CLOSED_EXT_NOTSUPPORTED',
3: 'XCB_CONN_CLOSED_MEM_INSUFFICIENT',
4: 'XCB_CONN_CLOSED_REQ_LEN_EXCEED',
5: 'XCB_CONN_CLOSED_PARSE_ERR',
6: 'XCB_CONN_CLOSED_INVALID_SCREEN',
7: 'XCB_CONN_CLOSED_FDPASSING_FAILED',
}
def toStr(s):
# return "".join([chr(i) for i in s.name])
return s.name.to_string()
class MaskMap:
"""
A general utility class that encapsulates the way the mask/value idiom
works in xpyb. It understands a special attribute _maskvalue on
objects, which will be used instead of the object value if present.
This lets us passin a Font object, rather than Font.fid, for example.
"""
def __init__(self, obj):
self.mmap = []
for i in dir(obj):
if not i.startswith("_"):
self.mmap.append((getattr(obj, i), i.lower()))
self.mmap.sort()
def __call__(self, **kwargs):
"""
kwargs: keys should be in the mmap name set
Returns a (mask, values) tuple.
"""
mask = 0
values = []
for m, s in self.mmap:
if s in kwargs:
val = kwargs.get(s)
if val is not None:
mask |= m
values.append(getattr(val, "_maskvalue", val))
del kwargs[s]
if kwargs:
raise ValueError("Unknown mask names: %s" % list(kwargs.keys()))
return mask, values
ConfigureMasks = MaskMap(xcffib.xproto.ConfigWindow)
AttributeMasks = MaskMap(CW)
GCMasks = MaskMap(xcffib.xproto.GC)
class AtomCache:
def __init__(self, conn):
self.conn = conn
self.atoms = {}
self.reverse = {}
# We can change the pre-loads not to wait for a return
for name in WindowTypes.keys():
self.insert(name=name)
for i in dir(xcffib.xproto.Atom):
if not i.startswith("_"):
self.insert(name=i, atom=getattr(xcffib.xproto.Atom, i))
def insert(self, name=None, atom=None):
assert name or atom
if atom is None:
c = self.conn.conn.core.InternAtom(False, len(name), name)
atom = c.reply().atom
if name is None:
c = self.conn.conn.core.GetAtomName(atom)
name = c.reply().name.to_string()
self.atoms[name] = atom
self.reverse[atom] = name
def get_name(self, atom):
if atom not in self.reverse:
self.insert(atom=atom)
return self.reverse[atom]
def __getitem__(self, key):
if key not in self.atoms:
self.insert(name=key)
return self.atoms[key]
class _Wrapper:
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, x):
return getattr(self.wrapped, x)
class Screen(_Wrapper):
"""
This represents an actual X screen.
"""
def __init__(self, conn, screen):
_Wrapper.__init__(self, screen)
self.default_colormap = Colormap(conn, screen.default_colormap)
self.root = Window(conn, self.root)
# FIXME: Where is the right place to set the cursor?
# self.root.set_cursor("Normal")
class PseudoScreen:
"""
This may be a Xinerama screen or a RandR CRTC, both of which are
rectagular sections of an actual Screen.
"""
def __init__(self, conn, x, y, width, height):
self.conn = conn
self.x = x
self.y = y
self.width = width
self.height = height
class Colormap:
def __init__(self, conn, cid):
self.conn = conn
self.cid = cid
def alloc_color(self, color):
"""
Flexible color allocation.
"""
if color.startswith("#"):
if len(color) != 7:
raise ValueError("Invalid color: %s" % color)
def x8to16(i):
return 0xffff * (i & 0xff) // 0xff
r = x8to16(int(color[1] + color[2], 16))
g = x8to16(int(color[3] + color[4], 16))
b = x8to16(int(color[5] + color[6], 16))
return self.conn.conn.core.AllocColor(self.cid, r, g, b).reply()
else:
return self.conn.conn.core.AllocNamedColor(
self.cid, len(color), color
).reply()
class Xinerama:
def __init__(self, conn):
self.ext = conn.conn(xcffib.xinerama.key)
def query_screens(self):
r = self.ext.QueryScreens().reply()
return r.screen_info
class RandR:
def __init__(self, conn):
self.ext = conn.conn(xcffib.randr.key)
self.ext.SelectInput(
conn.default_screen.root.wid,
xcffib.randr.NotifyMask.ScreenChange
)
def query_crtcs(self, root):
l = []
for i in self.ext.GetScreenResources(root).reply().crtcs:
info = self.ext.GetCrtcInfo(i, xcffib.CurrentTime).reply()
d = dict(
x=info.x,
y=info.y,
width=info.width,
height=info.height
)
l.append(d)
return l
class XFixes:
selection_mask = SelectionEventMask.SetSelectionOwner | \
SelectionEventMask.SelectionClientClose | \
SelectionEventMask.SelectionWindowDestroy
def __init__(self, conn):
self.conn = conn
self.ext = conn.conn(xcffib.xfixes.key)
self.ext.QueryVersion(xcffib.xfixes.MAJOR_VERSION,
xcffib.xfixes.MINOR_VERSION)
def select_selection_input(self, window, selection="PRIMARY"):
SELECTION = self.conn.atoms[selection]
self.conn.xfixes.ext.SelectSelectionInput(window.wid,
SELECTION,
self.selection_mask)
class GC:
def __init__(self, conn, gid):
self.conn = conn
self.gid = gid
def change(self, **kwargs):
mask, values = GCMasks(**kwargs)
self.conn.conn.core.ChangeGC(self.gid, mask, values)
class Window:
def __init__(self, conn, wid):
self.conn = conn
self.wid = wid
def _propertyString(self, r):
"""
Extract a string from a window property reply message.
"""
return r.value.to_string()
def _propertyUTF8(self, r):
return r.value.to_utf8()
def send_event(self, synthevent, mask=EventMask.NoEvent):
self.conn.conn.core.SendEvent(False, self.wid, mask, synthevent.pack())
def kill_client(self):
self.conn.conn.core.KillClient(self.wid)
def set_input_focus(self):
self.conn.conn.core.SetInputFocus(
xcffib.xproto.InputFocus.PointerRoot,
self.wid,
xcffib.xproto.Time.CurrentTime
)
def warp_pointer(self, x, y):
self.conn.conn.core.WarpPointer(
0,
self.wid,
0,
0,
0,
0,
x,
y
)
def get_name(self):
"""
Tries to retrieve a canonical window name. We test the following
properties in order of preference: _NET_WM_VISIBLE_NAME,
_NET_WM_NAME, WM_NAME.
"""
r = self.get_property(
"_NET_WM_VISIBLE_NAME",
xcffib.xproto.GetPropertyType.Any
)
if r:
return self._propertyUTF8(r)
r = self.get_property("_NET_WM_NAME", xcffib.xproto.GetPropertyType.Any)
if r:
return self._propertyUTF8(r)
r = self.get_property(
xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.GetPropertyType.Any
)
if r:
return self._propertyString(r)
def get_wm_hints(self):
r = self.get_property("WM_HINTS", xcffib.xproto.GetPropertyType.Any)
if r:
l = r.value.to_atoms()
flags = set()
for k, v in HintsFlags.items():
if l[0] & v:
flags.add(k)
return dict(
flags=flags,
input=l[1],
initial_state=l[2],
icon_pixmap=l[3],
icon_window=l[4],
icon_x=l[5],
icon_y=l[6],
icon_mask=l[7],
window_group=l[8]
)
def get_wm_normal_hints(self):
r = self.get_property(
"WM_NORMAL_HINTS",
xcffib.xproto.GetPropertyType.Any
)
if r:
l = r.value.to_atoms()
flags = set()
for k, v in NormalHintsFlags.items():
if l[0] & v:
flags.add(k)
return dict(
flags=flags,
min_width=l[1 + 4],
min_height=l[2 + 4],
max_width=l[3 + 4],
max_height=l[4 + 4],
width_inc=l[5 + 4],
height_inc=l[6 + 4],
min_aspect=l[7 + 4],
max_aspect=l[8 + 4],
base_width=l[9 + 4],
base_height=l[9 + 4],
win_gravity=l[9 + 4],
)
def get_wm_protocols(self):
r = self.get_property("WM_PROTOCOLS", xcffib.xproto.GetPropertyType.Any)
if r:
l = r.value.to_atoms()
return set([self.conn.atoms.get_name(i) for i in l])
else:
return set()
def get_wm_state(self):
r = self.get_property("WM_STATE", xcffib.xproto.GetPropertyType.Any)
if r:
return r.value.to_atoms()
def get_wm_class(self):
"""
Return an (instance, class) tuple if WM_CLASS exists, or None.
"""
r = self.get_property("WM_CLASS", "STRING")
if r:
s = self._propertyString(r)
return tuple(s.strip("\0").split("\0"))
def get_wm_window_role(self):
r = self.get_property("WM_WINDOW_ROLE", "STRING")
if r:
return self._propertyString(r)
def get_wm_transient_for(self):
r = self.get_property("WM_TRANSIENT_FOR", "ATOM")
if r:
return list(r.value)
def get_wm_icon_name(self):
r = self.get_property("WM_ICON_NAME", "UTF8_STRING")
if r:
return self._propertyString(r)
def get_wm_client_machine(self):
r = self.get_property("WM_CLIENT_MACHINE", "UTF8_STRING")
if r:
return self._propertyString(r)
def get_geometry(self):
q = self.conn.conn.core.GetGeometry(self.wid)
return q.reply()
def get_wm_desktop(self):
r = self.get_property("_NET_WM_DESKTOP", "CARDINAL", unpack=int)
if r:
return r[0]
def get_wm_type(self):
"""
http://standards.freedesktop.org/wm-spec/wm-spec-latest.html#id2551529
"""
r = self.get_property('_NET_WM_WINDOW_TYPE', "ATOM", unpack=int)
if r:
name = self.conn.atoms.get_name(r[0])
return WindowTypes.get(name, name)
def get_net_wm_state(self):
# TODO: _NET_WM_STATE is a *list* of atoms
# We're returning only the first one, but we don't need anything
# other than _NET_WM_STATE_FULLSCREEN (at least for now)
# Fixing this requires refactoring each call to use a list instead
r = self.get_property('_NET_WM_STATE', "ATOM", unpack=int)
if r:
name = self.conn.atoms.get_name(r[0])
return WindowStates.get(name, name)
def get_net_wm_pid(self):
r = self.get_property("_NET_WM_PID", unpack=int)
if r:
return r[0]
def configure(self, **kwargs):
"""
Arguments can be: x, y, width, height, border, sibling, stackmode
"""
mask, values = ConfigureMasks(**kwargs)
# hack for negative numbers
values = [i & 0xffffffff for i in values]
return self.conn.conn.core.ConfigureWindow(self.wid, mask, values)
def set_attribute(self, **kwargs):
mask, values = AttributeMasks(**kwargs)
self.conn.conn.core.ChangeWindowAttributesChecked(
self.wid, mask, values
)
def set_cursor(self, name):
cursorId = self.conn.cursors[name]
mask, values = AttributeMasks(cursor=cursorId)
self.conn.conn.core.ChangeWindowAttributesChecked(
self.wid, mask, values
)
def set_property(self, name, value, type=None, format=None):
"""
name: String Atom name
type: String Atom name
format: 8, 16, 32
"""
if name in PropertyMap:
if type or format:
raise ValueError(
"Over-riding default type or format for property."
)
type, format = PropertyMap[name]
else:
if None in (type, format):
raise ValueError(
"Must specify type and format for unknown property."
)
try:
if isinstance(value, six.string_types):
# xcffib will pack the strings
pass
else:
# if this runs without error, the value is already a list, don't wrap it
six.next(iter(value))
except StopIteration:
# The value was an iterable, just empty
value = []
except TypeError:
# the value wasn't an interable and wasn't a string, so let's
# wrap it.
value = [value]
self.conn.conn.core.ChangePropertyChecked(
xcffib.xproto.PropMode.Replace,
self.wid,
self.conn.atoms[name],
self.conn.atoms[type],
format, # Format - 8, 16, 32
len(value),
value
).check()
def get_property(self, prop, type=None, unpack=None):
"""
Return the contents of a property as a GetPropertyReply. If unpack
is specified, a tuple of values is returned. The type to unpack,
either `str` or `int` must be specified.
"""
if type is None:
if prop not in PropertyMap:
raise ValueError(
"Must specify type for unknown property."
)
else:
type, _ = PropertyMap[prop]
try:
r = self.conn.conn.core.GetProperty(
False, self.wid,
self.conn.atoms[prop]
if isinstance(prop, six.string_types)
else prop,
self.conn.atoms[type]
if isinstance(type, six.string_types)
else type,
0, (2 ** 32) - 1
).reply()
if not r.value_len:
if unpack:
return []
return None
elif unpack:
# Should we allow more options for unpacking?
if unpack is int:
return r.value.to_atoms()
elif unpack is str:
return r.value.to_string()
else:
return r
except xcffib.xproto.WindowError:
return None
def list_properties(self):
r = self.conn.conn.core.ListProperties(self.wid).reply()
return [self.conn.atoms.get_name(i) for i in r.atoms]
def map(self):
self.conn.conn.core.MapWindow(self.wid)
def unmap(self):
self.conn.conn.core.UnmapWindowChecked(self.wid).check()
def get_attributes(self):
return self.conn.conn.core.GetWindowAttributes(self.wid).reply()
def create_gc(self, **kwargs):
gid = self.conn.conn.generate_id()
mask, values = GCMasks(**kwargs)
self.conn.conn.core.CreateGC(gid, self.wid, mask, values)
return GC(self.conn, gid)
def ungrab_key(self, key, modifiers):
"""
Passing None means any key, or any modifier.
"""
if key is None:
key = xcffib.xproto.Atom.Any
if modifiers is None:
modifiers = xcffib.xproto.ModMask.Any
self.conn.conn.core.UngrabKey(key, self.wid, modifiers)
def grab_key(self, key, modifiers, owner_events,
pointer_mode, keyboard_mode):
self.conn.conn.core.GrabKey(
owner_events,
self.wid,
modifiers,
key,
pointer_mode,
keyboard_mode
)
def ungrab_button(self, button, modifiers):
"""
Passing None means any key, or any modifier.
"""
if button is None:
button = xcffib.xproto.Atom.Any
if modifiers is None:
modifiers = xcffib.xproto.ModMask.Any
self.conn.conn.core.UngrabButton(button, self.wid, modifiers)
def grab_button(self, button, modifiers, owner_events,
event_mask, pointer_mode, keyboard_mode):
self.conn.conn.core.GrabButton(
owner_events,
self.wid,
event_mask,
pointer_mode,
keyboard_mode,
xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
button,
modifiers,
)
def grab_pointer(self, owner_events, event_mask, pointer_mode,
keyboard_mode, cursor=None):
self.conn.conn.core.GrabPointer(
owner_events,
self.wid,
event_mask,
pointer_mode,
keyboard_mode,
xcffib.xproto.Atom._None,
cursor or xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
)
def ungrab_pointer(self):
self.conn.conn.core.UngrabPointer(xcffib.xproto.Atom._None)
def query_tree(self):
q = self.conn.conn.core.QueryTree(self.wid).reply()
root = None
parent = None
if q.root:
root = Window(self.conn, q.root)
if q.parent:
parent = Window(self.conn, q.root)
return root, parent, [Window(self.conn, i) for i in q.children]
class Font:
def __init__(self, conn, fid):
self.conn = conn
self.fid = fid
@property
def _maskvalue(self):
return self.fid
def text_extents(self, s):
s = s + "aaa"
print(s)
x = self.conn.conn.core.QueryTextExtents(self.fid, len(s), s).reply()
print(x)
return x
class Connection:
_extmap = {
"xinerama": Xinerama,
"randr": RandR,
"xfixes": XFixes,
}
def __init__(self, display):
self.conn = xcffib.connect(display=display)
self._connected = True
self.cursors = Cursors(self)
self.setup = self.conn.get_setup()
extensions = self.extensions()
self.screens = [Screen(self, i) for i in self.setup.roots]
self.default_screen = self.screens[self.conn.pref_screen]
for i in extensions:
if i in self._extmap:
setattr(self, i, self._extmap[i](self))
self.pseudoscreens = []
if "xinerama" in extensions:
for i, s in enumerate(self.xinerama.query_screens()):
scr = PseudoScreen(
self,
s.x_org,
s.y_org,
s.width,
s.height,
)
self.pseudoscreens.append(scr)
elif "randr" in extensions:
for i in self.randr.query_crtcs(self.screens[0].root.wid):
scr = PseudoScreen(
self,
i["x"],
i["y"],
i["width"],
i["height"],
)
self.pseudoscreens.append(scr)
self.atoms = AtomCache(self)
self.code_to_syms = {}
self.first_sym_to_code = None
self.refresh_keymap()
self.modmap = None
self.refresh_modmap()
def refresh_keymap(self, first=None, count=None):
if first is None:
first = self.setup.min_keycode
count = self.setup.max_keycode - self.setup.min_keycode + 1
q = self.conn.core.GetKeyboardMapping(first, count).reply()
l = []
for i, v in enumerate(q.keysyms):
if not i % q.keysyms_per_keycode:
if l:
self.code_to_syms[
(i // q.keysyms_per_keycode) + first - 1
] = l
l = []
l.append(v)
else:
l.append(v)
assert len(l) == q.keysyms_per_keycode
self.code_to_syms[first + count - 1] = l
first_sym_to_code = {}
for k, s in self.code_to_syms.items():
if s[0] and not s[0] in first_sym_to_code:
first_sym_to_code[s[0]] = k
self.first_sym_to_code = first_sym_to_code
def refresh_modmap(self):
q = self.conn.core.GetModifierMapping().reply()
modmap = {}
for i, k in enumerate(q.keycodes):
l = modmap.setdefault(ModMapOrder[i // q.keycodes_per_modifier], [])
l.append(k)
self.modmap = modmap
def get_modifier(self, keycode):
"""
Return the modifier matching keycode.
"""
for n, l in self.modmap.items():
if keycode in l:
return n
return None
def keysym_to_keycode(self, keysym):
return self.first_sym_to_code.get(keysym, 0)
def keycode_to_keysym(self, keycode, modifier):
if keycode >= len(self.code_to_syms) or \
modifier >= len(self.code_to_syms[keycode]):
return 0
return self.code_to_syms[keycode][modifier]
def create_window(self, x, y, width, height):
wid = self.conn.generate_id()
self.conn.core.CreateWindow(
self.default_screen.root_depth,
wid,
self.default_screen.root.wid,
x, y, width, height, 0,
WindowClass.InputOutput,
self.default_screen.root_visual,
CW.BackPixel | CW.EventMask,
[
self.default_screen.black_pixel,
EventMask.StructureNotify | EventMask.Exposure
]
)
return Window(self, wid)
def disconnect(self):
self.conn.disconnect()
self._connected = False
def flush(self):
if self._connected:
return self.conn.flush()
def xsync(self):
# The idea here is that pushing an innocuous request through
# the queue and waiting for a response "syncs" the connection, since
# requests are serviced in order.
self.conn.core.GetInputFocus().reply()
def grab_server(self):
return self.conn.core.GrabServer()
def get_setup(self):
return self.conn.get_setup()
def open_font(self, name):
fid = self.conn.generate_id()
self.conn.core.OpenFont(fid, len(name), name)
return Font(self, fid)
def extensions(self):
return set([
toStr(i).lower()
for i in self.conn.core.ListExtensions().reply().names
])
# Stolen from samurai-x
# (Don't know where to put it, so I'll put it here)
# XCB cursors doesn't want to be themed, libxcursor
# would be better choice I think
# and we (indirectly) depend on it anyway...
class Cursors(dict):
def __init__(self, conn):
self.conn = conn
FLEUR = 52
LEFT_PTR = 68
SIZING = 120
BOTTOM_LEFT_CORNER = 12
BOTTOM_RIGHT_CORNER = 14
TOP_LEFT_CORNER = 134
TOP_RIGHT_CORNER = 136
DOUBLE_ARROW_HORIZ = 108
DOUBLE_ARROW_VERT = 116
cursors = (
('Normal', LEFT_PTR),
('Resize', SIZING),
('ResizeH', DOUBLE_ARROW_HORIZ),
('ResizeV', DOUBLE_ARROW_VERT),
('Move', FLEUR),
('TopRight', TOP_RIGHT_CORNER),
('TopLeft', TOP_LEFT_CORNER),
('BotRight', BOTTOM_RIGHT_CORNER),
('BotLeft', BOTTOM_LEFT_CORNER),
)
for name, cursor_font in cursors:
self._new(name, cursor_font)
def _new(self, name, cursor_font):
fid = self.conn.conn.generate_id()
self.conn.conn.core.OpenFont(fid, len("cursor"), "cursor")
cursor = self.conn.conn.generate_id()
self.conn.conn.core.CreateGlyphCursor(
cursor, fid, fid,
cursor_font, cursor_font + 1,
0, 0, 0,
65535, 65535, 65535
)
self[name] = cursor
|
|
import datetime
from django.test import TestCase
from haystack.indexes import *
from haystack.exceptions import SearchFieldError
from haystack.fields import CharField, FacetField
from haystack.sites import SearchSite, AlreadyRegistered, NotRegistered
from core.models import MockModel, AnotherMockModel
class MockNotAModel(object):
pass
class FakeSearchIndex(BasicSearchIndex):
def update_object(self, instance, **kwargs):
# Incorrect behavior but easy to test and all we care about is that we
# make it here. We rely on the `SearchIndex` tests to ensure correct
# behavior.
return True
def remove_object(self, instance, **kwargs):
# Incorrect behavior but easy to test and all we care about is that we
# make it here. We rely on the `SearchIndex` tests to ensure correct
# behavior.
return True
class InvalidSearchIndex(SearchIndex):
document = CharField(document=True)
class ValidSearchIndex(SearchIndex):
text = CharField(document=True)
author = CharField(index_fieldname='name')
title = CharField(indexed=False)
class AlternateValidSearchIndex(SearchIndex):
text = CharField(document=True)
author = CharField(faceted=True)
title = CharField(faceted=True)
class ExplicitFacetSearchIndex(SearchIndex):
text = CharField(document=True)
author = CharField(faceted=True)
title = CharField()
title_facet = FacetCharField(facet_for='title')
bare_facet = FacetCharField()
class MultiValueValidSearchIndex(SearchIndex):
text = CharField(document=True)
author = MultiValueField(stored=False)
title = CharField(indexed=False)
class SearchSiteTestCase(TestCase):
def setUp(self):
super(SearchSiteTestCase, self).setUp()
self.site = SearchSite()
def test_register(self):
self.assertRaises(AttributeError, self.site.register, MockNotAModel)
self.site.register(MockModel)
self.assertEqual(len(self.site._registry), 1)
self.assert_(MockModel in self.site._registry)
self.assertRaises(AlreadyRegistered, self.site.register, MockModel)
def test_unregister(self):
self.assertRaises(NotRegistered, self.site.unregister, MockModel)
# Depends on proper function of register.
self.site.register(MockModel)
self.site.unregister(MockModel)
self.assertEqual(len(self.site._registry), 0)
self.assertFalse(MockModel in self.site._registry)
def test_get_index(self):
self.assertRaises(NotRegistered, self.site.get_index, MockModel)
self.site.register(MockModel)
self.assert_(isinstance(self.site.get_index(MockModel), BasicSearchIndex))
def test_get_indexes(self):
self.assertEqual(self.site.get_indexes(), {})
self.site.register(MockModel)
indexes = self.site.get_indexes()
self.assert_(isinstance(indexes, dict))
self.assertEqual(len(indexes), 1)
self.assert_(MockModel in indexes)
def test_get_indexed_models(self):
self.assertEqual(self.site.get_indexed_models(), [])
self.site.register(MockModel)
indexed_models = self.site.get_indexed_models()
self.assertEqual(len(indexed_models), 1)
self.assert_(MockModel in indexed_models)
def test_all_searchfields(self):
self.site.register(MockModel)
fields = self.site.all_searchfields()
self.assertEqual(len(fields), 1)
self.assert_('text' in fields)
self.assert_(isinstance(fields['text'], CharField))
self.assertEqual(fields['text'].document, True)
self.assertEqual(fields['text'].use_template, True)
self.site.register(AnotherMockModel)
fields = self.site.all_searchfields()
self.assertEqual(len(fields), 1)
self.assert_('text' in fields)
self.assert_(isinstance(fields['text'], CharField))
self.assertEqual(fields['text'].document, True)
self.assertEqual(fields['text'].use_template, True)
self.site.unregister(AnotherMockModel)
self.site.register(AnotherMockModel, AlternateValidSearchIndex)
fields = self.site.all_searchfields()
self.assertEqual(len(fields), 5)
self.assertEqual(sorted(fields.keys()), ['author', 'author_exact', 'text', 'title', 'title_exact'])
self.assert_('text' in fields)
self.assert_(isinstance(fields['text'], CharField))
self.assertEqual(fields['text'].document, True)
self.assertEqual(fields['text'].use_template, True)
self.assert_('title' in fields)
self.assert_(isinstance(fields['title'], CharField))
self.assertEqual(fields['title'].document, False)
self.assertEqual(fields['title'].use_template, False)
self.assertEqual(fields['title'].faceted, True)
self.assertEqual(fields['title'].indexed, True)
self.assert_('author' in fields)
self.assert_(isinstance(fields['author'], CharField))
self.assertEqual(fields['author'].document, False)
self.assertEqual(fields['author'].use_template, False)
self.assertEqual(fields['author'].faceted, True)
self.assertEqual(fields['author'].stored, True)
self.assertEqual(fields['author'].index_fieldname, 'author')
self.site.unregister(MockModel)
self.site.register(MockModel, ValidSearchIndex)
fields = self.site.all_searchfields()
self.assertEqual(len(fields), 6)
self.assertEqual(sorted(fields.keys()), ['author', 'author_exact', 'name', 'text', 'title', 'title_exact'])
self.assert_('text' in fields)
self.assert_(isinstance(fields['text'], CharField))
self.assertEqual(fields['text'].document, True)
self.assertEqual(fields['text'].use_template, False)
self.assert_('title' in fields)
self.assert_(isinstance(fields['title'], CharField))
self.assertEqual(fields['title'].document, False)
self.assertEqual(fields['title'].use_template, False)
self.assertEqual(fields['title'].faceted, True)
self.assertEqual(fields['title'].indexed, True)
self.assert_('author' in fields)
self.assert_(isinstance(fields['author'], CharField))
self.assertEqual(fields['author'].document, False)
self.assertEqual(fields['author'].use_template, False)
self.assertEqual(fields['author'].faceted, True)
self.assertEqual(fields['author'].index_fieldname, 'author')
self.assertEqual(fields['name'].document, False)
self.assertEqual(fields['name'].use_template, False)
self.assertEqual(fields['name'].faceted, False)
self.assertEqual(fields['name'].index_fieldname, 'name')
self.site.unregister(AnotherMockModel)
self.site.register(AnotherMockModel, MultiValueValidSearchIndex)
fields = self.site.all_searchfields()
self.assertEqual(len(fields), 4)
self.assertEqual(sorted(fields.keys()), ['author', 'name', 'text', 'title'])
self.assert_('text' in fields)
self.assert_(isinstance(fields['text'], CharField))
self.assertEqual(fields['text'].document, True)
self.assertEqual(fields['text'].use_template, False)
self.assert_('title' in fields)
self.assert_(isinstance(fields['title'], CharField))
self.assertEqual(fields['title'].document, False)
self.assertEqual(fields['title'].use_template, False)
self.assertEqual(fields['title'].faceted, False)
self.assertEqual(fields['title'].indexed, False)
self.assert_('author' in fields)
self.assert_(isinstance(fields['author'], MultiValueField))
self.assertEqual(fields['author'].document, False)
self.assertEqual(fields['author'].use_template, False)
self.assertEqual(fields['author'].stored, False)
self.assertEqual(fields['author'].faceted, False)
self.assertEqual(fields['author'].index_fieldname, 'author')
self.site.unregister(AnotherMockModel)
self.site.register(AnotherMockModel, InvalidSearchIndex)
self.assertRaises(SearchFieldError, self.site.all_searchfields)
def test_get_index_fieldname(self):
self.assertEqual(self.site._cached_field_mapping, None)
self.site.register(MockModel, ValidSearchIndex)
self.site.register(AnotherMockModel)
self.site.get_index_fieldname('text')
self.assertEqual(self.site._cached_field_mapping, {
'text': {'index_fieldname': 'text', 'facet_fieldname': None},
'title': {'index_fieldname': 'title', 'facet_fieldname': None},
'author': {'index_fieldname': 'name', 'facet_fieldname': None},
})
self.assertEqual(self.site.get_index_fieldname('text'), 'text')
self.assertEqual(self.site.get_index_fieldname('author'), 'name')
self.assertEqual(self.site.get_index_fieldname('title'), 'title')
# Reset the internal state to test the invalid case.
self.site._cached_field_mapping = None
self.assertEqual(self.site._cached_field_mapping, None)
self.site.unregister(AnotherMockModel)
self.site.register(AnotherMockModel, AlternateValidSearchIndex)
self.assertRaises(SearchFieldError, self.site.get_index_fieldname, 'text')
def test_basic_get_facet_field_name(self):
self.assertEqual(self.site._cached_field_mapping, None)
self.site.register(MockModel, AlternateValidSearchIndex)
self.site.register(AnotherMockModel)
self.site.get_facet_field_name('text')
self.assertEqual(self.site._cached_field_mapping, {
'author': {'facet_fieldname': None, 'index_fieldname': 'author'},
'author_exact': {'facet_fieldname': 'author',
'index_fieldname': 'author_exact'},
'text': {'facet_fieldname': None, 'index_fieldname': 'text'},
'title': {'facet_fieldname': None, 'index_fieldname': 'title'},
'title_exact': {'facet_fieldname': 'title', 'index_fieldname': 'title_exact'},
})
self.assertEqual(self.site.get_index_fieldname('text'), 'text')
self.assertEqual(self.site.get_index_fieldname('author'), 'author')
self.assertEqual(self.site.get_index_fieldname('title'), 'title')
self.assertEqual(self.site.get_facet_field_name('text'), 'text')
self.assertEqual(self.site.get_facet_field_name('author'), 'author_exact')
self.assertEqual(self.site.get_facet_field_name('title'), 'title_exact')
def test_more_advanced_get_facet_field_name(self):
self.assertEqual(self.site._cached_field_mapping, None)
self.site.register(MockModel, ExplicitFacetSearchIndex)
self.site.register(AnotherMockModel)
self.site.get_facet_field_name('text')
self.assertEqual(self.site._cached_field_mapping, {
'author': {'facet_fieldname': None, 'index_fieldname': 'author'},
'author_exact': {'facet_fieldname': 'author', 'index_fieldname': 'author_exact'},
'bare_facet': {'facet_fieldname': 'bare_facet', 'index_fieldname': 'bare_facet'},
'text': {'facet_fieldname': None, 'index_fieldname': 'text'},
'title': {'facet_fieldname': None, 'index_fieldname': 'title'},
'title_facet': {'facet_fieldname': 'title', 'index_fieldname': 'title_facet'},
})
self.assertEqual(self.site.get_facet_field_name('title'), 'title_facet')
self.assertEqual(self.site.get_facet_field_name('bare_facet'), 'bare_facet')
def test_update_object(self):
self.site.register(MockModel, FakeSearchIndex)
mock = MockModel()
mock.pk = 20
mock.user = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(self.site.update_object(mock), True)
def test_remove_object(self):
self.site.register(MockModel, FakeSearchIndex)
mock = MockModel()
mock.pk = 20
self.assertEqual(self.site.remove_object(mock), True)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the Tensorboard debugger data plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import shutil
import numpy as np
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.debugger import debugger_plugin
class DebuggerPluginTest(test.TestCase):
def setUp(self):
# Populate the log directory with debugger event for run '.'.
self.log_dir = self.get_temp_dir()
file_prefix = compat.as_bytes(os.path.join(self.log_dir, 'events.debugger'))
writer = pywrap_tensorflow.EventsWriter(file_prefix)
writer.WriteEvent(
self._CreateEventWithDebugNumericSummary(
op_name='layers/Matmul',
output_slot=0,
wall_time=42,
step=2,
list_of_values=[1, 2, 3]))
writer.WriteEvent(
self._CreateEventWithDebugNumericSummary(
op_name='layers/Matmul',
output_slot=1,
wall_time=43,
step=3,
list_of_values=[4, 5, 6]))
writer.WriteEvent(
self._CreateEventWithDebugNumericSummary(
op_name='logits/Add',
output_slot=0,
wall_time=1337,
step=7,
list_of_values=[7, 8, 9]))
writer.WriteEvent(
self._CreateEventWithDebugNumericSummary(
op_name='logits/Add',
output_slot=0,
wall_time=1338,
step=8,
list_of_values=[10, 11, 12]))
writer.Close()
# Populate the log directory with debugger event for run 'run_foo'.
run_foo_directory = os.path.join(self.log_dir, 'run_foo')
os.mkdir(run_foo_directory)
file_prefix = compat.as_bytes(
os.path.join(run_foo_directory, 'events.debugger'))
writer = pywrap_tensorflow.EventsWriter(file_prefix)
writer.WriteEvent(
self._CreateEventWithDebugNumericSummary(
op_name='layers/Variable',
output_slot=0,
wall_time=4242,
step=42,
list_of_values=[13, 14, 15]))
writer.Close()
# Start a server that will receive requests and respond with health pills.
multiplexer = event_multiplexer.EventMultiplexer({
'.': self.log_dir,
'run_foo': run_foo_directory,
})
self.plugin = debugger_plugin.DebuggerPlugin(multiplexer)
wsgi_app = application.TensorBoardWSGIApp(
self.log_dir, {'debugger': self.plugin}, multiplexer, reload_interval=0)
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def tearDown(self):
# Remove the directory with debugger-related events files.
shutil.rmtree(self.log_dir, ignore_errors=True)
def _CreateEventWithDebugNumericSummary(
self, op_name, output_slot, wall_time, step, list_of_values):
"""Creates event with a health pill summary.
Args:
op_name: The name of the op to which a DebugNumericSummary was attached.
output_slot: The numeric output slot for the tensor.
wall_time: The numeric wall time of the event.
step: The step of the event.
list_of_values: A python list of values within the tensor.
Returns:
A event_pb2.Event with a health pill summary.
"""
event = event_pb2.Event(step=step, wall_time=wall_time)
value = event.summary.value.add(
tag='__health_pill__',
node_name='%s:%d:DebugNumericSummary' % (op_name, output_slot))
value.tensor.tensor_shape.dim.add(size=len(list_of_values))
value.tensor.tensor_content = np.array(
list_of_values, dtype=np.float64).tobytes()
return event
def _DeserializeResponse(self, byte_content):
"""Deserializes byte content that is a JSON encoding.
Args:
byte_content: The byte content of a JSON response.
Returns:
The deserialized python object decoded from JSON.
"""
return json.loads(byte_content.decode('utf-8'))
def testHealthPillsRouteProvided(self):
"""Tests that the plugin offers the route for requesting health pills."""
unused_run_paths = {}
apps = self.plugin.get_plugin_apps(unused_run_paths, self.log_dir)
self.assertIn('/health_pills', apps)
self.assertIsInstance(apps['/health_pills'], collections.Callable)
def testRequestHealthPillsForRunFoo(self):
"""Tests that the plugin produces health pills for a specified run."""
response = self.server.post(
'/data/plugin/debugger/health_pills',
data={
'node_names': json.dumps(['layers/Variable', 'unavailable_node']),
'run': 'run_foo',
})
self.assertEqual(200, response.status_code)
self.assertDictEqual({
'layers/Variable': [{
'wall_time': 4242,
'step': 42,
'node_name': 'layers/Variable',
'output_slot': 0,
'value': [13, 14, 15],
}],
}, self._DeserializeResponse(response.get_data()))
def testRequestHealthPillsForDefaultRun(self):
"""Tests that the plugin produces health pills for the default '.' run."""
# Do not provide a 'run' parameter in POST data.
response = self.server.post(
'/data/plugin/debugger/health_pills',
data={
'node_names': json.dumps(['logits/Add', 'unavailable_node']),
})
self.assertEqual(200, response.status_code)
# The health pills for 'layers/Matmul' should not be included since the
# request excluded that node name.
self.assertDictEqual({
'logits/Add': [
{
'wall_time': 1337,
'step': 7,
'node_name': 'logits/Add',
'output_slot': 0,
'value': [7, 8, 9],
},
{
'wall_time': 1338,
'step': 8,
'node_name': 'logits/Add',
'output_slot': 0,
'value': [10, 11, 12],
},
],
}, self._DeserializeResponse(response.get_data()))
def testGetRequestsUnsupported(self):
"""Tests that GET requests are unsupported."""
response = self.server.get('/data/plugin/debugger/health_pills')
self.assertEqual(405, response.status_code)
def testRequestsWithoutProperPostKeyUnsupported(self):
"""Tests that requests lacking the node_names POST key are unsupported."""
response = self.server.post('/data/plugin/debugger/health_pills')
self.assertEqual(400, response.status_code)
def testRequestsWithBadJsonUnsupported(self):
"""Tests that requests with undecodable JSON are unsupported."""
response = self.server.post(
'/data/plugin/debugger/health_pills',
data={
'node_names': 'some obviously non JSON text',
})
self.assertEqual(400, response.status_code)
def testRequestsWithNonListPostDataUnsupported(self):
"""Tests that requests with loads lacking lists of ops are unsupported."""
response = self.server.post(
'/data/plugin/debugger/health_pills',
data={
'node_names': json.dumps({
'this is a dict': 'and not a list.'
}),
})
self.assertEqual(400, response.status_code)
if __name__ == '__main__':
test.main()
|
|
##
#UserInterface
#Description: This class renders the game board through pygame library method calls,
# and handles user input events.
#
##
import pygame, os, sys, time
from pygame.locals import *
from Building import Building
from Ant import UNIT_STATS
from Constants import *
from GameState import addCoords, subtractCoords
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
OFF_BLACK = (255, 0, 204)
GREY = (195, 195, 195)
DARK_RED = (150, 0, 0)
LIGHT_RED = (255, 0, 0)
DARK_GREEN = (0, 150, 0)
LIGHT_GREEN = (0, 255, 0)
DARK_BLUE = (0, 0, 150)
LIGHT_BLUE = (0, 0, 255)
ROBIN_EGG_BLUE = (0, 204, 204)
GOLDENROD = (238, 173, 14)
CELL_SIZE = Rect(0,0,10,10)
BOARD_SIZE = Rect(0,0,10,10)
CELL_SPACING = 5
FIELD_SPACING = 10
##
#UserInterface
#Description: class that handles all drawing and key presses, and translates everything to something that can more easily be understood by a programmer.
#
#Variables:
# inputSize - An (x,y) tuple expressing the size of the aNTiCS window in pixels.((int,int))
##
class UserInterface(object):
##
#__init__
#Description: Creates a new UserInterface
#
#Parameters:
# inputSize - the size of the window to be created, in pixels.(int)
##
def __init__(self, inputSize):
self.screen = pygame.display.set_mode(inputSize)
pygame.display.set_caption("aNTiCS")
icon = pygame.image.load(os.path.join("Textures", "icon.bmp"))
pygame.display.set_icon(icon)
##
#submitBuild
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitBuild(self):
print "Clicked SUBMIT BUILD"
##
#submitEndTurn
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitEndTurn(self):
print "Clicked SUBMIT END TURN"
##
#gameModeTournament
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def gameModeTournament(self):
print "Clicked GAME MODE TOURNAMENT"
##
#gameModeHumanAI
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def gameModeHumanAI(self):
print "Clicked GAME MODE HUMAN AI"
##
#gameModeAIAI
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def gameModeAIAI(self):
print "Clicked GAME MODE AI AI"
##
#startGame
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def startGame(self):
print "Clicked START GAME"
##
#submitNext
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitNext(self):
print "Clicked NEXT"
##
#submitContinue
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitContinue(self):
print "Clicked CONTINUE"
##
#submitWorker
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitWorker(self):
print "Clicked WORKER"
##
#submitDrone
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitDrone(self):
print "Clicked DRONE"
##
#submitDSoldier
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitDSoldier(self):
print "Clicked DIRECT SOLDIER"
##
#submitISoldier
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitISoldier(self):
print "Clicked INDIRECT SOLDIER"
##
#submitNoBuild
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitNoBuild(self):
print "Clicked BUILD NOTHING"
##
#submitStartTournament
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitStartTournament(self):
print "Clicked START TOURNAMENT"
##
#submitStopTournament
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitStopTournament(self):
print "Clicked STOP TOURNAMENT"
##
#locationClicked
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
#
#Parameters:
# coords - the cell on the board that was clicked.((int,int))
##
def locationClicked(self, coords):
print "Clicked LOCATION " + str(coords)
##
#checkBoxClicked
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
#
#Parameters:
# index - the index into the array self.allAIs.(int)
##
def checkBoxClicked(self, index):
print "CLICKED CHECKBOX NUMBER " + str(index)
##
#submitSelectedAIs
#Description: Dummy method used as a placeholder for the event handling methods that will be passed in from Game.py.
##
def submitSelectedAIs(self):
print "CLICKED SUBMIT SELECTED AIS"
##
#notify
#Description: changes the message displayed in the notification box.
#
#Parameters:
# message - The message to be relayed to the user.(string)
##
def notify(self, message):
self.lastNotification = message
##
#getCaptureValues
#Description: determines whether any anthills are being captured.
#
#Parameters:
# state - the current gameState.(GameState)
#
#Returns: a tuple representing the anthills of each player. If a player's
# anthill is being captured, their space in the tuple will contain the
# health of their anthill. Otherwise that space will be set to -1.
##
def getCaptureValues(self, state):
#Find the health of player 1's anthill, and whether it's being captured.
player1Val = -1
player1Hill = state.inventories[PLAYER_ONE].getAnthill()
if player1Hill != None:
hCoords = player1Hill.coords
boardAnt = state.board[hCoords[0]][hCoords[1]].ant
if boardAnt != None and boardAnt.player != player1Hill.player:
player1Val = player1Hill.captureHealth
#Find the health of player 2's anthill, and whether it's being captured.
player2Val = -1
player2Hill = state.inventories[PLAYER_TWO].getAnthill()
if player2Hill != None:
hCoords = player2Hill.coords
boardAnt = state.board[hCoords[0]][hCoords[1]].ant
if boardAnt != None and boardAnt.player != player2Hill.player:
player2Val = player2Hill.captureHealth
#Return the values acquired from the above calculations.
return player1Val, player2Val
##
#getCaptureValue
#Description: determines whether the given location is being captured.
#
#Parameters:
# loc - a location containing an ant tunnel, which may or may not be getting captured.(Location)
#
#Returns: an integer containing the ant tunnel's health if it is being captured,
# or -1 otherwise.
##
def getCaptureValue(self, loc):
if loc.constr == None or loc.constr.type != TUNNEL:
return -1
elif loc.ant == None or loc.ant.player == loc.constr.player:
return -1
else:
return loc.constr.captureHealth
##
#drawNotification
#Description: draws the notification currently being relayed to the user.
# Breaks the notification into multiple lines if necessary.
##
def drawNotification(self):
#Draw a black box to encapsulate the notification
outerNoteBox = Rect(0, 0, self.buttonRect.width + 2 * CELL_SPACING, self.buttonRect.height + 2 * CELL_SPACING)
pygame.draw.rect(self.screen, BLACK, outerNoteBox.move(self.messageLocation).move(-CELL_SPACING, -CELL_SPACING))
#Draw a white box to make the black box appear empty
noteBox = Rect(0, 0, self.buttonRect.width + CELL_SPACING + 1, self.buttonRect.height + CELL_SPACING + 1)
pygame.draw.rect(self.screen, WHITE, noteBox.move(self.messageLocation).move(-CELL_SPACING / 2, -CELL_SPACING / 2))
#Chop up text by moving to a new line every time you get close to the edge of the notification area.
breakupIndex = 0
lineNum = 0
while self.notifyFont.size(self.lastNotification[breakupIndex:])[0] > self.buttonRect.width:
pctToNewline = float(self.buttonRect.width) / float(self.notifyFont.size(self.lastNotification[breakupIndex:])[0])
indexOfNewline = int(float(len(self.lastNotification[breakupIndex:])) * pctToNewline) - 1
while self.lastNotification[breakupIndex+indexOfNewline] != " " or indexOfNewline == breakupIndex:
indexOfNewline -= 1
if indexOfNewline == breakupIndex:
indexOfNewline = int(float(len(self.lastNotification)) * pctToNewline) - 1
messageSurface = self.notifyFont.render(self.lastNotification[breakupIndex:breakupIndex+indexOfNewline].lstrip(), True, DARK_RED)
self.screen.blit(messageSurface, (self.messageLocation[0], self.messageLocation[1] + lineNum * self.notifyFont.get_height()))
breakupIndex += indexOfNewline
lineNum += 1
messageSurface = self.notifyFont.render(self.lastNotification[breakupIndex:].lstrip(), True, DARK_RED)
self.screen.blit(messageSurface, (self.messageLocation[0], self.messageLocation[1] + lineNum * self.notifyFont.get_height()))
##
#drawConstruction
#Description: Draws a non-moving structure of the specified type to the specified location on the game board.
#
#Parameters:
# item - an object subclassed from Construction.(Construction or Building)
# position - a tuple that indicates a cell on the board. This will be converted to a pixel location.((int,int))
##
def drawConstruction(self, item, position):
Xpixel = CELL_SPACING * (position[0] + 1) + CELL_SIZE.width * position[0]
Ypixel = CELL_SPACING * (position[1] + 1) + CELL_SIZE.height * position[1]
constrTex = self.constructionTexs[item.type].copy()
background = pygame.Surface(CELL_SIZE.size)
if type(item) is Building:
background.fill(LIGHT_RED if item.player == PLAYER_ONE else LIGHT_BLUE)
constrTex.set_colorkey(self.playerAlpha)
else:
background.fill(WHITE)
background.blit(constrTex, (0, 0))
background.set_colorkey(WHITE)
self.screen.blit(background, (Xpixel, Ypixel))
##
#drawAnt
#Description: Draws an Ant of the specified type to the specified location on the game board.
#
#Parameters:
# ant - an Ant object.(Ant)
# position - a tuple that indicates a cell on the board. This will be converted to a pixel location.((int,int))
##
def drawAnt(self, ant, position):
Xpixel = CELL_SPACING * (position[0] + 1) + CELL_SIZE.width * position[0]
Ypixel = CELL_SPACING * (position[1] + 1) + CELL_SIZE.height * position[1]
#Start by drawing the ant itself onto a solid player color background.
#The player color should only show in areas of the playerAlpha color.
background = pygame.Surface(CELL_SIZE.size)
background.fill(LIGHT_RED if ant.player == PLAYER_ONE else LIGHT_BLUE)
background.blit(self.antTexs[ant.type], (0, 0))
background.set_colorkey(WHITE)
#Then draw the ant itself.
self.screen.blit(background, (Xpixel, Ypixel))
#Draw current health across the top from the left as a series of boxes
boxWidth = 7;
boxHeight = 6;
healthBox = Rect(0,0,boxWidth-2,boxHeight-2)
healthPerimiter = Rect(0,0,boxWidth,boxHeight)
for x in xrange(0, UNIT_STATS[ant.type][HEALTH]):
pygame.draw.rect(self.screen, DARK_GREEN, healthPerimiter.move(Xpixel + CELL_SIZE.width - boxWidth * (x + 1) - 1, Ypixel + 1))
for x in xrange(0, ant.health):
pygame.draw.rect(self.screen, LIGHT_GREEN, healthBox.move(Xpixel + CELL_SIZE.width - boxWidth * (x + 1), Ypixel + 2))
for x in xrange(ant.health, UNIT_STATS[ant.type][HEALTH]):
pygame.draw.rect(self.screen, DARK_RED, healthBox.move(Xpixel + CELL_SIZE.width - boxWidth * (x + 1), Ypixel + 2))
#Draw isCarrying marker in lower right
if ant.carrying:
XoffsetCarry = CELL_SIZE.width - self.isCarryingTex.get_width()
YoffsetCarry = CELL_SIZE.height - self.isCarryingTex.get_height()
self.screen.blit(self.isCarryingTex, (Xpixel + XoffsetCarry, Ypixel + YoffsetCarry))
#Draw hasMoved marker as a shade across the image
if ant.hasMoved:
self.shaderTex.fill(BLACK)
self.screen.blit(self.shaderTex, (Xpixel, Ypixel))
##
#drawCaptureHealths
#Description: draw the health of the anthill that's about to die.
#
#Parameters:
# health - the amount of health to draw.(int, int)
##
def drawCaptureHealths(self, health):
label1 = self.monsterFont.render(str(health[0]), True, DARK_BLUE, WHITE)
label2 = self.monsterFont.render(str(health[1]), True, DARK_RED, WHITE)
#Find out where to put the text onscreen.
label1Size = label1.get_size()
label2Size = label2.get_size()
label1Center = (label1Size[0] / 2, label1Size[1] / 2)
label2Center = (label2Size[0] / 2, label2Size[1] / 2)
boardCenter = ((self.screen.get_width() - self.buttonArea.width) / 2, self.screen.get_height() / 2)
#Initialize destinations for the sake of scope.
destination1 = 0
destination2 = 0
#Change the settings of the text surface to look the way it should.
label1.set_colorkey(WHITE)
label2.set_colorkey(WHITE)
label1.set_alpha(50)
label2.set_alpha(50)
#Find out which case occured. Both players have anthills getting captured, or just one.
if health[0] != -1 and health[1] != -1:
#Make destinations offset for both units.
destination1 = subtractCoords(boardCenter, (label1Size[0], label1Center[1]))
destination2 = subtractCoords(boardCenter, (0, label2Center[1]))
#Draw the text surface.
self.screen.blit(label1, destination1)
self.screen.blit(label2, destination2)
elif health[0] != -1:
destination1 = subtractCoords(boardCenter, label1Center)
destination2 = 0
#Draw the text surface.
self.screen.blit(label1, destination1)
elif health[1] != -1:
destination1 = 0
destination2 = subtractCoords(boardCenter, label2Center)
#Draw the text surface.
self.screen.blit(label2, destination2)
else:
print "Oh my gawd my code broke in UserInterface.drawCaptureHealth"
##
#drawCaptureHealth
#Description: draw the health of the ant tunnel that's about to die.
#
#Parameters:
# health - the amount of health to draw.(int)
# coords - the board coordinates to draw at.((int,int))
# player - the playerID of the player being drawn for.(int)
##
def drawCaptureHealth(self, health, coords, player):
#Create and add settings to the text we want to draw. Background needs to be set so we don't have per pixel alpha.
label = self.captureFont.render(str(health), True, LIGHT_RED if player == PLAYER_ONE else LIGHT_BLUE, WHITE)
label.set_colorkey(WHITE)
label.set_alpha(100)
#Find where to place the text.
sizeDiff = subtractCoords(CELL_SIZE.size, subtractCoords(label.get_size(), (0, 10)))
halfDiff = (sizeDiff[0] / 2, sizeDiff[1] / 2)
#Draw the text.
self.screen.blit(label, addCoords(coords, halfDiff))
##
#drawButton
#Description: Draws a button to the board. All necessary information is contained in self.buttons under the given key.
#
#Parameters:
# key - a key in the self.buttons hash table, known in Python as a Dictionary.(string)
##
def drawButton(self, key, buttons):
label = self.gameFont.render(key, True, BLACK)
offset = subtractCoords(self.buttonRect.center, label.get_rect().center)
self.screen.blit(self.buttonTextures[buttons[key][1]], buttons[key][0])
self.screen.blit(label, addCoords(buttons[key][0], offset))
##
#drawScoreBoard
#Description: Draws the scores of both players as given.
#
#Parameters:
# player1Score - the integer value of player 1's food stock.(int)
# player2Score - the integer value of player 2's food stock.(int)
##
def drawScoreBoard(self, player1Score, player2Score):
label1 = self.gameFont.render("Player 1: " + str(player1Score) + " food", True, BLACK)
label2 = self.gameFont.render("Player 2: " + str(player2Score) + " food", True, BLACK)
self.screen.blit(label1, self.scoreLocation)
self.screen.blit(label2, addCoords(self.scoreLocation, (0, label2.get_rect().height)))
##
#drawTextBox
#Description: Draws a box that holds text to the buttonArea.
##
def drawTextBox(self):
#Start by drawing the text box in the appropriate color.
pygame.draw.rect(self.screen, DARK_RED if self.textBoxContent == '' else LIGHT_GREEN, self.buttonRect.move(self.textPosition))
#Then draw the number in the text box.
label = self.gameFont.render(self.textBoxContent + ('|' if self.boxSelected else ''), True, BLACK)
offset = subtractCoords(self.buttonRect.center, label.get_rect().center)
self.screen.blit(label, addCoords(self.textPosition, offset))
#Finally, draw the text box title.
boxLabel = self.gameFont.render("Games to play:", True, BLACK)
boxLabelOffset = (0, - boxLabel.get_height() - FIELD_SPACING)
self.screen.blit(boxLabel, addCoords(self.textPosition, boxLabelOffset))
##
#drawTable
#Description: Draws the tournament score table from a list of (author string, wins int, losses int, ties int) tuples.
##
def drawTable(self):
XStartPixel = 50
YStartPixel = self.screen.get_height() / 2 - len(self.tournamentScores) * (self.tournFont.get_height() + FIELD_SPACING) / 2 - CELL_SPACING
if YStartPixel < 0:
YStartPixel = 0
#Prepend the column headers to the tournamentScores list, so that we can draw the entire table without special cases.
scores = [('Author', 'Wins', 'Losses')] + self.tournamentScores
#Find the longest string for each column
lengths = [0 for i in range(0, len(scores[0]) + 1)]
for score in scores:
for index in range(0, len(score)):
if self.tournFont.size(str(score[index]))[0] > lengths[index+1]:
lengths[index + 1] = self.tournFont.size(str(score[index]))[0]
#add some padding for readability
lengths = [x + 10 for x in lengths]
#Draw the table itself
for index in range(0, len(scores)):
for innerDex in range(0, len(scores[index])):
Xoffset = 0 if innerDex == 0 else reduce(lambda x,y: x+y, lengths[:innerDex+1])
tempX = XStartPixel + Xoffset + FIELD_SPACING * innerDex
tempY = YStartPixel + index * (self.tournFont.get_height() + FIELD_SPACING)
label = self.tournFont.render(str(scores[index][innerDex]), True, BLACK)
self.screen.blit(label, (tempX, tempY))
#Add some underlines under the table headers
Yoffset = YStartPixel + self.tournFont.get_height()
for index in range(1, len(lengths)):
Xoffset = 0 if index == 0 else reduce(lambda x,y: x+y, lengths[:index])
Xspacing = -1 * FIELD_SPACING if index == 1 else FIELD_SPACING * (index - 1)
startX = XStartPixel + Xoffset + Xspacing
endX = startX + lengths[index]
pygame.draw.line(self.screen, BLACK, (startX, Yoffset), (endX, Yoffset))
#Draw the elapsed time
Yoffset = YStartPixel + len(scores) * (self.tournFont.get_height() + FIELD_SPACING)
if (self.tournamentInProgress):
self.tournamentElapsed = time.clock() - self.tournamentStartTime
elapsedMessage = "Elapsed time: "
elapsedColor = DARK_RED
if (not self.tournamentInProgress):
elapsedMessage = "Final time: "
elapsedColor = DARK_GREEN
elapsedMessage += str(int(self.tournamentElapsed) / 60) + "m "
elapsedMessage += str(int(self.tournamentElapsed) % 60) + "s"
label = self.tournFont.render(elapsedMessage, True, elapsedColor)
self.screen.blit(label, (XStartPixel, Yoffset))
##
#drawAICheckList
#Description: draws a checklist of all AIs that are available to select.
#
#Parameters:
# mode - The current game mode.(int)
##
def drawAIChecklist(self, mode):
#Replace the AIList with a shorter one if in human mode because I don't want to draw the human player in the checklist.
safeList = self.allAIs[1:] if mode == HUMAN_MODE else self.allAIs
#Find out how many AIs can be put in a column.
maxRows = (self.screen.get_height() - 100) / (self.checkBoxRect.height + FIELD_SPACING)
secondColumnOffset = (self.screen.get_width() - self.buttonArea.width) / 2
#Prevent the list from overflowing on the screen.
safeList = safeList[:2 * maxRows] if len(safeList) > maxRows * 2 else safeList
#Decide on where the checkList should start on screen.
XStartPixel = 50
YStartPixel = self.screen.get_height() / 2 - min(len(safeList), maxRows) * (self.checkBoxRect.height + FIELD_SPACING) / 2
if YStartPixel < 0:
YStartPixel = 0
#Draw the checkList.
for index in range(0, len(safeList)):
tempX = XStartPixel + (secondColumnOffset if index >= maxRows else 0)
tempY = YStartPixel + index % maxRows * (self.checkBoxRect.height + FIELD_SPACING)
self.screen.blit(self.checkBoxTextures[safeList[index][1]], (tempX, tempY))
label = self.notifyFont.render(str(safeList[index][0].author), True, BLACK)
self.screen.blit(label, (tempX + self.checkBoxRect.width + FIELD_SPACING, tempY + (self.checkBoxRect.height - self.notifyFont.get_height()) / 2))
#Find out where the button should go.
buttonIndex = maxRows if maxRows < len(safeList) else len(safeList)
buttonY = YStartPixel + buttonIndex * (self.checkBoxRect.height + FIELD_SPACING)
#Reset the location of the button.
key = self.submitSelected.keys()[0]
self.submitSelected[key][0] = (XStartPixel, buttonY)
#And last but not least, draw the "Submit Selected" button below the end of the first column.
self.drawButton(key, self.submitSelected)
##
#drawCell
#Description: Draws a cell. The basic component of the board.
#
#Parameters:
# currentLoc - The Location to be drawn in this cell. Locations can have
# ants and buildings attached, so those will be drawn if present.(Location)
##
def drawCell(self, currentLoc):
col = currentLoc.coords[0]
row = currentLoc.coords[1]
#Find the x y coordinates that this column and row map to.
Xpixel = CELL_SPACING * (col + 1) + CELL_SIZE.width * col
Ypixel = CELL_SPACING * (row + 1) + CELL_SIZE.height * row
#Create a Rect that shows up if the square is selected.
shadeWidth = CELL_SPACING / 2 * 2 + CELL_SIZE.width
shadeHeight = CELL_SPACING / 2 * 2 + CELL_SIZE.height
shadeRect = Rect(0, 0, shadeWidth, shadeHeight)
#Find the X and Y coordinates to draw the shade at.
shadeXpixel = Xpixel - CELL_SPACING / 2
shadeYpixel = Ypixel - CELL_SPACING / 2
#Create a True/False list indicating which shaders should be drawn
drawList = []
colorList = [DARK_GREEN, LIGHT_GREEN, GOLDENROD, LIGHT_RED]
if self.coordList != []:
#Draw the shadeRect if currentLoc is in coordList
drawList.append(True if currentLoc.coords in self.coordList[:-1] else False)
#Draw brighter if the currentLoc is the last move selected
drawList.append(True if currentLoc.coords == self.coordList[-1] else False)
else:
drawList += [False, False]
#Also shade potential moves.
drawList.append(True if currentLoc.coords in self.validCoordList else False)
#Draw the shade for a cell highlighted for attacks if currentLoc is in attackList
drawList.append(True if currentLoc.coords in self.attackList else False)
#Draw the background shades
for index in xrange(0, len(drawList)):
if drawList[index]:
pygame.draw.rect(self.screen, colorList[index], shadeRect.move(shadeXpixel, shadeYpixel))
#Draw the cell itself.
self.screen.blit(self.terrainTex, CELL_SIZE.move(Xpixel, Ypixel))
#Draw what's in this cell
if currentLoc.constr != None:
self.drawConstruction(currentLoc.constr, (col, row))
if currentLoc.ant != None:
self.drawAnt(currentLoc.ant, (col, row))
#Draw the translucent foreground shades.
for index in xrange(0, len(drawList)):
if drawList[index]:
self.shaderTex.fill(colorList[index])
self.screen.blit(self.shaderTex, CELL_SIZE.move(Xpixel, Ypixel))
#Draw the captureHealth of any ant tunnel being captured.
captureVal = self.getCaptureValue(currentLoc)
if captureVal != -1:
self.drawCaptureHealth(captureVal, (Xpixel, Ypixel), currentLoc.constr.player)
##
#drawBoard
#Description: This is the bread and butter of the UserInterface class. Everything
# starts drawing from here.
#
#Parameters:
# currentState - The state of the board to draw as a GameState.(GameState)
# mode - The current game mode.(int)
##
def drawBoard(self, currentState, mode):
self.handleEvents(mode)
if self.choosingAIs:
self.screen.fill(WHITE)
self.drawAIChecklist(mode)
self.drawNotification()
elif mode == TOURNAMENT_MODE:
self.screen.fill(WHITE)
#Draw the box into which the user can enter the number of games they want to play.
self.drawTextBox()
#Draw the table with columns author/win/loss/tie
self.drawTable()
else:
self.screen.fill(BLACK)
#Draw the menu area.
pygame.draw.rect(self.screen, WHITE, self.buttonArea)
#Draw the player color indicator boxes.
pygame.draw.rect(self.screen, LIGHT_RED, self.outerRect)
pygame.draw.rect(self.screen, BLACK, self.innerRect.move((CELL_SPACING, CELL_SPACING)))
pygame.draw.rect(self.screen, LIGHT_BLUE, self.outerRect.move((0, self.p2RectYOffset)))
pygame.draw.rect(self.screen, BLACK, self.innerRect.move((CELL_SPACING, CELL_SPACING + self.p2RectYOffset)))
#Draw the cells themselves.
for col in xrange(0, len(currentState.board)):
for row in xrange(0, len(currentState.board[col])):
self.drawCell(currentState.board[col][row])
#Draw the captureHealth of any anthill being captured.
captureVals = self.getCaptureValues(currentState)
if captureVals[0] != -1 or captureVals[1] != -1:
self.drawCaptureHealths(captureVals)
#Make sure we draw the right buttons
relButtons = {} if mode == None else self.humanButtons if mode == HUMAN_MODE else self.aiButtons
if self.buildAntMenu == True:
relButtons = self.antButtons
#Draw the context buttons
for key in relButtons:
self.drawButton(key, relButtons)
#I can't put this draw method outside of drawBoard, but it shouldn't work this way.
self.drawScoreBoard(currentState.inventories[0].foodCount, currentState.inventories[1].foodCount)
#Draw notifications just above menu buttons.
self.drawNotification()
#Draw the basic buttons
for key in self.buttons:
self.drawButton(key, self.buttons)
#Show everything I've drawn by posting self.screen to the monitor.
pygame.display.flip()
##
#handleButton
#Description: Handles the finer details of what happens when a user is clicking on buttons.
# The button will only be counted as clicked if the user both presses and releases a mouse
# button while hovering over the game button. If clicked, a callback function will be used
# to notify Game.py.
#
#Parameters:
# key - a key in the self.buttons hash table, known in Python as a Dictionary.(string)
# released - an integer/boolean that represents the state of the button: 1 if the button
# is released, or 0 if the button is depressed.(int)
##
def handleButton(self, key, released, buttons):
if buttons[key][1] != released and released == 1:
buttons[key][2]()
buttons[key][1] = released
##
#handleAICheckList
#Description: handles any MOUSE_BUTTON_DOWN events pertaining to the AI
# check list.
#
#Parameters:
# event - All information about the event. We already know its type due
# to the fact we are in this method, but position of the click is
# also important.(pygame.Event)
# mode - The current game mode.(int)
##
def handleAICheckList(self, event, mode):
#Replace the AIList with a shorter one if in human mode because find the human player in the checklist.
safeList = self.allAIs[1:] if mode == HUMAN_MODE else self.allAIs
#Find out how many AIs can be in a column.
maxRows = (self.screen.get_height() - 100) / (self.checkBoxRect.height + FIELD_SPACING)
secondColumnOffset = (self.screen.get_width() - self.buttonArea.width) / 2
#The list can't overflow on the screen.
safeList = safeList[:2 * maxRows] if len(safeList) > maxRows * 2 else safeList
#Check if a column was clicked (x position of mouse may result in a checkbox being clicked)
columnClicked = -1
if event.pos[0] - 50 > 0 and event.pos[0] - 50 < self.checkBoxRect.width:
columnClicked = 0
elif event.pos[0] - 50 - secondColumnOffset > 0 and event.pos[0] - 50 - secondColumnOffset < self.checkBoxRect.width:
columnClicked = 1
#If a column was clicked, check if a row was clicked.
if columnClicked > -1:
yStart = self.screen.get_height() / 2 - min(len(safeList), maxRows) * (self.checkBoxRect.height + FIELD_SPACING) / 2
if (event.pos[1] - yStart + FIELD_SPACING) % (self.checkBoxRect.height + FIELD_SPACING) > FIELD_SPACING:
checkIndex = (event.pos[1] - yStart) / (self.checkBoxRect.height + FIELD_SPACING)
#If the checkbox clicked was in a column other than the first, add the implicit rows skipped.
checkIndex += columnClicked * maxRows
#If the checkindex falls within the list, go ahead and call the callback.
if checkIndex < len(safeList) and checkIndex >= 0:
#If the mode is human mode, there is an invisible player (the human) that I don't want clicked.
checkIndex += 1 if mode == HUMAN_MODE else 0
self.checkBoxClicked(checkIndex)
##
#handleHotkey
#Description: Handles any key presses in place of using the mouse to press
# buttons. All hotkeys are hard coded in here.
#
#Parameters:
# mode - The current game mode.(int)
# char - The key that was pressed. Actually passed as a string, but since
# each keyboard event is spawned by one key, the string length is
# always 1.(string)
##
def handleHotkey(self, mode, char):
if char == '\r':
self.buttons['Start'][-1]()
elif mode == HUMAN_MODE:
if not self.buildAntMenu:
if char == ' ':
self.humanButtons['End'][-1]()
elif char == 'b':
self.humanButtons['Build'][-1]()
else:
if char == 'w':
self.antButtons['Worker'][-1]()
elif char == 'd':
self.antButtons['Drone'][-1]()
elif char == 's':
self.antButtons['Soldier'][-1]()
elif char == 'r':
self.antButtons['Ranged Soldier'][-1]()
elif char == 'n':
self.antButtons['None'][-1]()
elif mode == AI_MODE:
if char == 'n':
self.aiButtons['Next'][-1]()
elif char == 'c':
self.aiButtons['Continue'][-1]()
##
#handleEvents
#Description: Handles the more generic mouse movements. Finds out what has been
# clicked, and either calls handleButton on the activated button, or uses a
# callback to tell the HumanPlayer what the human clicked.
#
#Pararmeters:
# mode - The current game mode.(int)
##
def handleEvents(self, mode):
#Make sure we check the right buttons
relButtons = {} if self.choosingAIs else self.humanButtons if mode == HUMAN_MODE else self.aiButtons if mode == AI_MODE else {}
#It should be impossible for self.buildAntMenu to be True unless mode is HUMAN_MODE and AIs have already been chosen.
if mode == HUMAN_MODE and self.buildAntMenu:
relButtons = self.antButtons
#Check what to do for each event
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN and time.clock() - self.lastClicked > self.clickCooldown:
self.lastClicked = time.clock()
#Start by checking the basic buttons that always get drawn
for key in self.buttons:
if self.buttonRect.move(self.buttons[key][0]).collidepoint(event.pos):
self.handleButton(key, 0, self.buttons)
#Then check the buttons that congregate at the top of the screen, and change based on context
for key in relButtons:
if self.buttonRect.move(relButtons[key][0]).collidepoint(event.pos):
self.handleButton(key, 0, relButtons)
#Check to see if text box should be selected or deselected
if mode == TOURNAMENT_MODE and self.buttonRect.move(self.textPosition).collidepoint(pygame.mouse.get_pos()):
self.boxSelected = True
else:
self.boxSelected = False
#Additionally, check if a cell on the board has been clicked.
if mode != TOURNAMENT_MODE and not self.choosingAIs:
if event.pos[0] % (CELL_SPACING + CELL_SIZE.width) > CELL_SPACING and event.pos[1] % (CELL_SPACING + CELL_SIZE.height) > CELL_SPACING:
x = event.pos[0] / (CELL_SPACING + CELL_SIZE.width)
y = event.pos[1] / (CELL_SPACING + CELL_SIZE.height)
if x < BOARD_SIZE.width and y < BOARD_SIZE.height:
self.locationClicked((x, y))
elif self.choosingAIs:
self.handleAICheckList(event, mode)
#Handle the AI selecting button.
AIKey = self.submitSelected.keys()[0]
if self.buttonRect.move(self.submitSelected[AIKey][0]).collidepoint(event.pos):
self.handleButton(AIKey, 0, self.submitSelected)
elif event.type == pygame.MOUSEBUTTONUP:
#Start by checking the basic buttons that always get drawn
for key in self.buttons:
if self.buttonRect.move(self.buttons[key][0]).collidepoint(event.pos):
self.handleButton(key, 1, self.buttons)
#Then check the buttons that congregate at the top of the screen, and change based on context
for key in relButtons:
if self.buttonRect.move(relButtons[key][0]).collidepoint(event.pos):
self.handleButton(key, 1, relButtons)
#Handle the AI selecting button.
if self.choosingAIs:
AIKey = self.submitSelected.keys()[0]
if self.buttonRect.move(self.submitSelected[AIKey][0]).collidepoint(event.pos):
self.handleButton(AIKey, 1, self.submitSelected)
#Check to see if text box should be selected or deselected
if mode == TOURNAMENT_MODE and self.buttonRect.move(self.textPosition).collidepoint(pygame.mouse.get_pos()):
boxSelected = True
else:
boxSelected = False
elif event.type == pygame.MOUSEMOTION and event.buttons[0]:
#Start by checking the basic buttons that always get drawn
for key in self.buttons:
if self.buttonRect.move(self.buttons[key][0]).collidepoint(addCoords(event.pos, event.rel)):
self.buttons[key][1] = 0
else:
self.buttons[key][1] = 1
#Then check the buttons that congregate at the top of the screen, and change based on context
for key in relButtons:
if self.buttonRect.move(relButtons[key][0]).collidepoint(addCoords(event.pos, event.rel)):
relButtons[key][1] = 0
else:
relButtons[key][1] = 1
#Handle the AI selecting button.
if self.choosingAIs:
AIKey = self.submitSelected.keys()[0]
if self.buttonRect.move(self.submitSelected[AIKey][0]).collidepoint(event.pos):
self.submitSelected[AIKey][1] = 0
else:
self.submitSelected[AIKey][1] = 1
elif self.boxSelected and event.type == KEYDOWN:
if str(event.unicode) in [str(i) for i in range(0, 10)]:
self.textBoxContent += str(event.unicode)
elif event.key == 8 and self.textBoxContent != '':
self.textBoxContent = self.textBoxContent[:-1]
elif event.type == KEYDOWN:
self.handleHotkey(mode, str(event.unicode))
##
#findButtonCoords
#Description: Finds the coordinates that button should be placed at based on its index from the top or bottom of the screen.
#
#Parameters:
# index - There are reserved spaces for buttons, allowing for a certain buffer zone between each button.
# This is the index from the top or bottom of the screen that this button should be placed at.(int)
# isTop - True if the index be counted from the top of the screen. False otherwise.(boolean)
#
#Returns: The coordinates of the button.
##
def findButtonCoords(self, index, isTop):
buttonSpacing = 2 * CELL_SPACING
buttonX = self.screen.get_width() - self.buttonRect.width - buttonSpacing
if isTop:
buttonY = (index + 1) * buttonSpacing + index * self.buttonRect.height
return buttonX, buttonY
else:
buttonY = self.screen.get_height() - (index + 1) * (buttonSpacing + self.buttonRect.height)
return buttonX, buttonY
##
#initAssets
#Description: initializes everything the UserInterface needs to render a game state properly.
##
def initAssets(self):
global CELL_SIZE
#Declare the name of the folder that all textures are in.
texFolder = "Textures"
#Load textures as Surfaces. Should convert these surfaces later for optimal speed.
self.constructionTexs = []
self.constructionTexs.append(pygame.image.load(os.path.join(texFolder, "anthill.bmp")))
self.constructionTexs.append(pygame.image.load(os.path.join(texFolder, "antTunnel.bmp")))
self.constructionTexs.append(pygame.image.load(os.path.join(texFolder, "grass.bmp")))
self.constructionTexs.append(pygame.image.load(os.path.join(texFolder, "food.bmp")))
self.antTexs = []
self.antTexs.append(pygame.image.load(os.path.join(texFolder, "queen.bmp")))
self.antTexs.append(pygame.image.load(os.path.join(texFolder, "worker.bmp")))
self.antTexs.append(pygame.image.load(os.path.join(texFolder, "drone.bmp")))
self.antTexs.append(pygame.image.load(os.path.join(texFolder, "direct.bmp")))
self.antTexs.append(pygame.image.load(os.path.join(texFolder, "indirect.bmp")))
#Load isCarrying and texture, which will allow players to see the conditions of their ants.
self.isCarryingTex = pygame.image.load(os.path.join(texFolder, "isCarrying.bmp"))
#Load the texture used for terrain (ground).
self.terrainTex = pygame.image.load(os.path.join(texFolder, "terrain.bmp"))
#CheckBox textures
self.checkBoxTextures = []
self.checkBoxTextures.append(pygame.image.load(os.path.join(texFolder, "unchecked.bmp")))
self.checkBoxTextures.append(pygame.image.load(os.path.join(texFolder, "checked.bmp")))
#CheckBox rectangle
self.checkBoxRect = self.checkBoxTextures[0].get_rect()
#Button textures
self.buttonTextures = []
self.buttonTextures.append(pygame.image.load(os.path.join(texFolder, "buttonDown.bmp")))
self.buttonTextures.append(pygame.image.load(os.path.join(texFolder, "buttonUp.bmp")))
#Button rectangle
self.buttonRect = self.buttonTextures[0].get_rect()
#Make CELL_SIZE equal to the size of an ant image.
CELL_SIZE = self.constructionTexs[0].get_rect()
#Create shaderTex, which will be used to translucently shade ants.
self.shaderTex = pygame.Surface((CELL_SIZE.width, CELL_SIZE.height))
#Set the color that will be used as an alpha transparency to let player colors shine through.
self.playerAlpha = OFF_BLACK
#Make White transparent (alpha 0) for most textures (well, buttons don't actually need it).
for construction in self.constructionTexs:
construction.set_colorkey(WHITE)
#Ants don't get posted directly to the board. They go to an intermediate
#texture of their player color, so they don't need a WHITE alpha.
for ant in self.antTexs:
ant.set_colorkey(self.playerAlpha)
self.isCarryingTex.set_colorkey(WHITE)
self.shaderTex.set_alpha(50)
#Set up fonts.
pygame.font.init()
self.statFont = pygame.font.Font(None, 15)
self.notifyFont = pygame.font.Font(None, 16)
self.gameFont = pygame.font.Font(None, 25)
self.tournFont = pygame.font.Font(None, 25)
self.captureFont = pygame.font.Font(None, 130)
self.monsterFont = pygame.font.Font(None, 300)
#Where should scores be drawn?
self.scoreLocation = self.findButtonCoords(0, True)
#Where should notifications be drawn?
self.messageLocation = self.findButtonCoords(5, False)
#Where should non-board stuff be placed (an area for buttons, notifications, and scores)?
buttonAreaWidth = self.buttonRect.width + 4 * CELL_SPACING
self.buttonArea = Rect(self.screen.get_width() - buttonAreaWidth, 0, buttonAreaWidth, self.screen.get_height())
#Button statistics for basic buttons in order: x, y, buttonState(pressed/released)
self.buttons = {
'Start':[self.findButtonCoords(3.5, False), 1, self.startGame],
'Tournament':[self.findButtonCoords(2, False), 1, self.gameModeTournament],
'Human vs AI':[self.findButtonCoords(1, False), 1, self.gameModeHumanAI],
'AI vs AI':[self.findButtonCoords(0, False), 1, self.gameModeAIAI]
}
#Initial values for buttons in human vs AI mode.
self.humanButtons = {
'Build':[self.findButtonCoords(1, True), 1, self.submitBuild],
'End':[self.findButtonCoords(2, True), 1, self.submitEndTurn]
}
#Initial values for buttons in human vs AI mode.
self.aiButtons = {
'Next':[self.findButtonCoords(1, True), 1, self.submitNext],
'Continue':[self.findButtonCoords(2, True), 1, self.submitContinue]
}
#Initial values for build ant buttons.
self.antButtons = {
'Worker':[self.findButtonCoords(1, True), 1, self.submitWorker],
'Drone':[self.findButtonCoords(2, True), 1, self.submitDrone],
'Soldier':[self.findButtonCoords(3, True), 1, self.submitDSoldier],
'Ranged Soldier':[self.findButtonCoords(4, True), 1, self.submitISoldier],
'None':[self.findButtonCoords(5, True), 1, self.submitNoBuild]
}
#Initial value for submit button for AI checklist.
self.submitSelected = {
'Submit AIs':[(0,0), 1, self.submitSelectedAIs]
}
#Define the player color indicator boxes.
bw = BOARD_SIZE.width
bh = BOARD_SIZE.height
cw = CELL_SIZE.width
ch = CELL_SIZE.height
cs = CELL_SPACING
self.outerRect = Rect(0, 0, bw * (cw + cs) + cs, (bh / 2 - 1) * (ch + cs) + cs)
self.innerRect = Rect(0, 0, bw * (cw + cs) - cs, (bh / 2 - 1) * (ch + cs) - cs)
self.p2RectYOffset = (bh / 2 + 1) * (cw + cs)
#Properties of our single text box
self.textPosition = self.findButtonCoords(2, True)
self.textBoxContent = ''
self.boxSelected = False
#Initial vaue for callback function that will be used to get cell clicks in game
self.locationCallback = self.locationClicked
#Draw the ant build menu?
self.buildAntMenu = False
#Initial user notification is empty, since we assume the user hasn't made a mistake in opening the program. Not that the program could detect that anyway.
self.lastNotification = ''
#Initial coordList so I know what to shade
self.coordList = []
#Initial "Where can the ant go" list, for the same reason as above.
self.validCoordList = []
#Cells that should be highlighted for attacks
self.attackList = []
#Initializing tournament scores
self.tournamentScores = []
#Variables used to track elapsed time during tournaments
self.tournamentStartTime = time.clock()
self.tournamentElapsed = 0.0
self.tournamentInProgress = False
#Find out if user is choosing AIs
self.choosingAIs = False
#Set an initial value for the list of AIs that the game uses.
self.allAIs = []
#Set a minimmum time between accepted clicks.
self.clickCooldown = 0.15
self.lastClicked = time.clock()
|
|
""" Defines the Legend, AbstractCompositeIconRenderer, and
CompositeIconRenderer classes.
"""
from __future__ import with_statement
from numpy import array, zeros_like
from enable.api import black_color_trait, white_color_trait
from enable.font_metrics_provider import font_metrics_provider
from kiva.trait_defs.kiva_font_trait import KivaFont
from traits.api import Any, Dict, Enum, Bool, HasTraits, Int, \
Instance, List, CList, Float, Str
# Local relative imports
from abstract_overlay import AbstractOverlay
from label import Label
from lineplot import LinePlot
from plot_component import PlotComponent
from scatterplot import ScatterPlot
class AbstractCompositeIconRenderer(HasTraits):
""" Abstract class for an icon renderer.
"""
def render_icon(self, plots, gc, x, y, width, height):
""" Renders an icon representing the given list of plots onto the
graphics context, using the given dimensions and at the specified
position.
"""
raise NotImplementedError
class CompositeIconRenderer(AbstractCompositeIconRenderer):
""" Renderer for composite icons.
"""
def render_icon(self, plots, *render_args):
""" Renders an icon for a list of plots. """
types = set(map(type, plots))
if types == set([ScatterPlot]):
self._render_scatterplots(plots, *render_args)
elif types == set([LinePlot]):
self._render_lineplots(plots, *render_args)
elif types == set([ScatterPlot, LinePlot]):
self._render_line_scatter(plots, *render_args)
else:
raise ValueError("Don't know how to render combination plot with " +\
"renderers " + str(types))
return
def _render_scatterplots(self, plots, gc, x, y, width, height):
# Don't support this for now
pass
def _render_lineplots(self, plots, gc, x, y, width, height):
# Assume they are all the same color/appearance and use the first one
plots[0]._render_icon(gc, x, y, width, height)
def _render_line_scatter(self, plots, gc, x, y, width, height):
# Separate plots into line and scatter renderers; render one of each
scatter = [p for p in plots if type(p) == ScatterPlot]
line = [p for p in plots if type(p) == LinePlot]
line[0]._render_icon(gc, x, y, width, height)
scatter[0]._render_icon(gc, x, y, width, height)
class Legend(AbstractOverlay):
""" A legend for a plot.
"""
# The font to use for the legend text.
font = KivaFont("modern 12")
# The amount of space between the content of the legend and the border.
border_padding = Int(10)
# The border is visible (overrides Enable Component).
border_visible = True
# The color of the text labels
color = black_color_trait
# The background color of the legend (overrides AbstractOverlay).
bgcolor = white_color_trait
# The position of the legend with respect to its overlaid component. (This
# attribute applies only if the legend is used as an overlay.)
#
# * ur = Upper Right
# * ul = Upper Left
# * ll = Lower Left
# * lr = Lower Right
align = Enum("ur", "ul", "ll", "lr")
# The amount of space between legend items.
line_spacing = Int(3)
# The size of the icon or marker area drawn next to the label.
icon_bounds = List([24, 24])
# Amount of spacing between each label and its icon.
icon_spacing = Int(5)
# Map of labels (strings) to plot instances or lists of plot instances. The
# Legend determines the appropriate rendering of each plot's marker/line.
plots = Dict
# The list of labels to show and the order to show them in. If this
# list is blank, then the keys of self.plots is used and displayed in
# alphabetical order. Otherwise, only the items in the **labels**
# list are drawn in the legend. Labels are ordered from top to bottom.
labels = List
# Whether or not to hide plots that are not visible. (This is checked during
# layout.) This option *will* filter out the items in **labels** above, so
# if you absolutely, positively want to set the items that will always
# display in the legend, regardless of anything else, then you should turn
# this option off. Otherwise, it usually makes sense that a plot renderer
# that is not visible will also not be in the legend.
hide_invisible_plots = Bool(True)
# If hide_invisible_plots is False, we can still choose to render the names
# of invisible plots with an alpha.
invisible_plot_alpha = Float(0.33)
# The renderer that draws the icons for the legend.
composite_icon_renderer = Instance(AbstractCompositeIconRenderer)
# Action that the legend takes when it encounters a plot whose icon it
# cannot render:
#
# * 'skip': skip it altogether and don't render its name
# * 'blank': render the name but leave the icon blank (color=self.bgcolor)
# * 'questionmark': render a "question mark" icon
error_icon = Enum("skip", "blank", "questionmark")
# Should the legend clip to the bounds it needs, or to its parent?
clip_to_component = Bool(False)
# The legend is not resizable (overrides PlotComponent).
resizable = "hv"
# An optional title string to show on the legend.
title = Str('')
# If True, title is at top, if False then at bottom.
title_at_top = Bool(True)
# The legend draws itself as in one pass when its parent is drawing
# the **draw_layer** (overrides PlotComponent).
unified_draw = True
# The legend is drawn on the overlay layer of its parent (overrides
# PlotComponent).
draw_layer = "overlay"
#------------------------------------------------------------------------
# Private Traits
#------------------------------------------------------------------------
# A cached list of Label instances
_cached_labels = List
# A cached array of label sizes.
_cached_label_sizes = Any
# A cached list of label names.
_cached_label_names = CList
# A list of the visible plots. Each plot corresponds to the label at
# the same index in _cached_label_names. This list does not necessarily
# correspond to self.plots.value() because it is sorted according to
# the plot name and it potentially excludes invisible plots.
_cached_visible_plots = CList
# A cached array of label positions relative to the legend's origin
_cached_label_positions = Any
def is_in(self, x, y):
""" overloads from parent class because legend alignment
and padding does not cooperatate with the basic implementation
This may just be caused byt a questionable implementation of the
legend tool, but it works by adjusting the padding. The Component
class implementation of is_in uses the outer positions which
includes the padding
"""
in_x = (x >= self.x) and (x <= self.x + self.width)
in_y = (y >= self.y) and (y <= self.y + self.height)
return in_x and in_y
def overlay(self, component, gc, view_bounds=None, mode="normal"):
""" Draws this component overlaid on another component.
Implements AbstractOverlay.
"""
self.do_layout()
valign, halign = self.align
if valign == "u":
y = component.y2 - self.outer_height
else:
y = component.y
if halign == "r":
x = component.x2 - self.outer_width
else:
x = component.x
self.outer_position = [x, y]
if self.clip_to_component:
c = self.component
with gc:
gc.clip_to_rect(c.x, c.y, c.width, c.height)
PlotComponent._draw(self, gc, view_bounds, mode)
else:
PlotComponent._draw(self, gc, view_bounds, mode)
return
# The following two methods implement the functionality of the Legend
# to act as a first-class component instead of merely as an overlay.
# The make the Legend use the normal PlotComponent render methods when
# it does not have a .component attribute, so that it can have its own
# overlays (e.g. a PlotLabel).
#
# The core legend rendering method is named _draw_as_overlay() so that
# it can be called from _draw_plot() when the Legend is not an overlay,
# and from _draw_overlay() when the Legend is an overlay.
def _draw_plot(self, gc, view_bounds=None, mode="normal"):
if self.component is None:
self._draw_as_overlay(gc, view_bounds, mode)
return
def _draw_overlay(self, gc, view_bounds=None, mode="normal"):
if self.component is not None:
self._draw_as_overlay(gc, view_bounds, mode)
else:
PlotComponent._draw_overlay(self, gc, view_bounds, mode)
return
def _draw_as_overlay(self, gc, view_bounds=None, mode="normal"):
""" Draws the overlay layer of a component.
Overrides PlotComponent.
"""
# Determine the position we are going to draw at from our alignment
# corner and the corresponding outer_padding parameters. (Position
# refers to the lower-left corner of our border.)
# First draw the border, if necesssary. This sort of duplicates
# the code in PlotComponent._draw_overlay, which is unfortunate;
# on the other hand, overlays of overlays seem like a rather obscure
# feature.
with gc:
gc.clip_to_rect(int(self.x), int(self.y),
int(self.width), int(self.height))
edge_space = self.border_width + self.border_padding
icon_width, icon_height = self.icon_bounds
icon_x = self.x + edge_space
text_x = icon_x + icon_width + self.icon_spacing
y = self.y2 - edge_space
if self._cached_label_positions is not None:
if len(self._cached_label_positions) > 0:
self._cached_label_positions[:,0] = icon_x
for i, label_name in enumerate(self._cached_label_names):
# Compute the current label's position
label_height = self._cached_label_sizes[i][1]
y -= label_height
self._cached_label_positions[i][1] = y
# Try to render the icon
icon_y = y + (label_height - icon_height) / 2
#plots = self.plots[label_name]
plots = self._cached_visible_plots[i]
render_args = (gc, icon_x, icon_y, icon_width, icon_height)
try:
if isinstance(plots, list) or isinstance(plots, tuple):
# TODO: How do we determine if a *group* of plots is
# visible or not? For now, just look at the first one
# and assume that applies to all of them
if not plots[0].visible:
# TODO: the get_alpha() method isn't supported on the Mac kiva backend
#old_alpha = gc.get_alpha()
old_alpha = 1.0
gc.set_alpha(self.invisible_plot_alpha)
else:
old_alpha = None
if len(plots) == 1:
plots[0]._render_icon(*render_args)
else:
self.composite_icon_renderer.render_icon(plots, *render_args)
elif plots is not None:
# Single plot
if not plots.visible:
#old_alpha = gc.get_alpha()
old_alpha = 1.0
gc.set_alpha(self.invisible_plot_alpha)
else:
old_alpha = None
plots._render_icon(*render_args)
else:
old_alpha = None # Or maybe 1.0?
icon_drawn = True
except:
icon_drawn = self._render_error(*render_args)
if icon_drawn:
# Render the text
gc.translate_ctm(text_x, y)
gc.set_antialias(0)
self._cached_labels[i].draw(gc)
gc.set_antialias(1)
gc.translate_ctm(-text_x, -y)
# Advance y to the next label's baseline
y -= self.line_spacing
if old_alpha is not None:
gc.set_alpha(old_alpha)
return
def _render_error(self, gc, icon_x, icon_y, icon_width, icon_height):
""" Renders an error icon or performs some other action when a
plot is unable to render its icon.
Returns True if something was actually drawn (and hence the legend
needs to advance the line) or False if nothing was drawn.
"""
if self.error_icon == "skip":
return False
elif self.error_icon == "blank" or self.error_icon == "questionmark":
with gc:
gc.set_fill_color(self.bgcolor_)
gc.rect(icon_x, icon_y, icon_width, icon_height)
gc.fill_path()
return True
else:
return False
def get_preferred_size(self):
"""
Computes the size and position of the legend based on the maximum size of
the labels, the alignment, and position of the component to overlay.
"""
# Gather the names of all the labels we will create
if len(self.plots) == 0:
return [0, 0]
plot_names, visible_plots = map(list, zip(*sorted(self.plots.items())))
label_names = self.labels
if len(label_names) == 0:
if len(self.plots) > 0:
label_names = plot_names
else:
self._cached_labels = []
self._cached_label_sizes = []
self._cached_label_names = []
self._cached_visible_plots = []
self.outer_bounds = [0, 0]
return [0, 0]
if self.hide_invisible_plots:
visible_labels = []
visible_plots = []
for name in label_names:
# If the user set self.labels, there might be a bad value,
# so ensure that each name is actually in the plots dict.
if name in self.plots:
val = self.plots[name]
# Rather than checking for a list/TraitListObject/etc., we just check
# for the attribute first
if hasattr(val, 'visible'):
if val.visible:
visible_labels.append(name)
visible_plots.append(val)
else:
# If we have a list of renderers, add the name if any of them are
# visible
for renderer in val:
if renderer.visible:
visible_labels.append(name)
visible_plots.append(val)
break
label_names = visible_labels
# Create the labels
labels = [self._create_label(text) for text in label_names]
# For the legend title
if self.title_at_top:
labels.insert(0, self._create_label(self.title))
label_names.insert(0, 'Legend Label')
visible_plots.insert(0, None)
else:
labels.append(self._create_label(self.title))
label_names.append(self.title)
visible_plots.append(None)
# We need a dummy GC in order to get font metrics
dummy_gc = font_metrics_provider()
label_sizes = array([label.get_width_height(dummy_gc) for label in labels])
if len(label_sizes) > 0:
max_label_width = max(label_sizes[:, 0])
total_label_height = sum(label_sizes[:, 1]) + (len(label_sizes)-1)*self.line_spacing
else:
max_label_width = 0
total_label_height = 0
legend_width = max_label_width + self.icon_spacing + self.icon_bounds[0] \
+ self.hpadding + 2*self.border_padding
legend_height = total_label_height + self.vpadding + 2*self.border_padding
self._cached_labels = labels
self._cached_label_sizes = label_sizes
self._cached_label_positions = zeros_like(label_sizes)
self._cached_label_names = label_names
self._cached_visible_plots = visible_plots
if "h" not in self.resizable:
legend_width = self.outer_width
if "v" not in self.resizable:
legend_height = self.outer_height
return [legend_width, legend_height]
def get_label_at(self, x, y):
""" Returns the label object at (x,y) """
for i, pos in enumerate(self._cached_label_positions):
size = self._cached_label_sizes[i]
corner = pos + size
if (pos[0] <= x <= corner[0]) and (pos[1] <= y <= corner[1]):
return self._cached_labels[i]
else:
return None
def _do_layout(self):
if self.component is not None or len(self._cached_labels) == 0 or \
self._cached_label_sizes is None or len(self._cached_label_names) == 0:
width, height = self.get_preferred_size()
self.outer_bounds = [width, height]
return
def _create_label(self, text):
""" Returns a new Label instance for the given text. Subclasses can
override this method to customize the creation of labels.
"""
return Label(text=text, font=self.font, margin=0, color=self.color_,
bgcolor="transparent", border_width=0)
def _composite_icon_renderer_default(self):
return CompositeIconRenderer()
#-- trait handlers --------------------------------------------------------
def _anytrait_changed(self, name, old, new):
if name in ("font", "border_padding", "padding", "line_spacing",
"icon_bounds", "icon_spacing", "labels", "plots",
"plots_items", "labels_items", "border_width", "align",
"position", "position_items", "bounds", "bounds_items",
"label_at_top"):
self._layout_needed = True
if name == "color":
self.get_preferred_size()
return
def _plots_changed(self):
""" Invalidate the caches.
"""
self._cached_labels = []
self._cached_label_sizes = None
self._cached_label_names = []
self._cached_visible_plots = []
self._cached_label_positions = None
def _title_at_top_changed(self, old, new):
""" Trait handler for when self.title_at_top changes. """
if old == True:
indx = 0
else:
indx = -1
if old != None:
self._cached_labels.pop(indx)
self._cached_label_names.pop(indx)
self._cached_visible_plots.pop(indx)
# For the legend title
if self.title_at_top:
self._cached_labels.insert(0, self._create_label(self.title))
self._cached_label_names.insert(0, '__legend_label__')
self._cached_visible_plots.insert(0, None)
else:
self._cached_labels.append(self._create_label(self.title))
self._cached_label_names.append(self.title)
self._cached_visible_plots.append(None)
#-- end Legend ----------------------------------------------------------------
|
|
import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import textwrap
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources
from distutils.errors import DistutilsError
from pkg_resources import working_set
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
os.makedirs(replacement, exist_ok=True)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
with save_path():
hide_setuptools()
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
_MODULES_TO_HIDE = {
'setuptools',
'distutils',
'pkg_resources',
'Cython',
'_distutils_hack',
}
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
base_module = mod_name.split('.', 1)[0]
return base_module in _MODULES_TO_HIDE
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
_distutils_hack = sys.modules.get('_distutils_hack', None)
if _distutils_hack is not None:
_distutils_hack.remove_shim()
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def __enter__(self):
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
def __exit__(self, exc_type, exc_value, traceback):
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func()
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull]
else:
_EXCEPTIONS = []
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
r'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [
getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
tmpl = textwrap.dedent("""
SandboxViolation: {cmd}{args!r} {kwargs}
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
""").lstrip()
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
|
|
#!/usr/bin/env python
"""System cron flows tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from future.builtins import range
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_server import client_report_utils
from grr_response_server import data_store
from grr_response_server.databases import db
from grr_response_server.flows.cron import system
from grr_response_server.rdfvalues import cronjobs as rdf_cronjobs
from grr.test_lib import test_lib
class SystemCronJobTest(test_lib.GRRBaseTest):
"""Test system cron jobs."""
def setUp(self):
super(SystemCronJobTest, self).setUp()
one_hour_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
1, rdfvalue.HOURS)
eight_day_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
8, rdfvalue.DAYS)
ancient_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
61, rdfvalue.DAYS)
self.SetupClientsWithIndices(
range(0, 10), system="Windows", ping=eight_day_ping)
self.SetupClientsWithIndices(
range(10, 20), system="Linux", ping=eight_day_ping)
self.SetupClientsWithIndices(
range(20, 22),
system="Darwin",
fleetspeak_enabled=True,
ping=one_hour_ping)
# These clients shouldn't be analyzed by any of the stats cronjobs.
self.SetupClientsWithIndices(
range(22, 24), system="Linux", ping=ancient_ping)
for i in range(0, 10):
client_id = "C.1%015x" % i
data_store.REL_DB.AddClientLabels(client_id, "GRR", ["Label1", "Label2"])
data_store.REL_DB.AddClientLabels(client_id, "jim", ["UserLabel"])
def _CheckVersionGraph(self, graph, expected_title, expected_count):
self.assertEqual(graph.title, expected_title)
if expected_count == 0:
self.assertEmpty(graph)
return
sample = graph[0]
self.assertEqual(sample.label,
"GRR Monitor %s" % config.CONFIG["Source.version_numeric"])
self.assertEqual(sample.y_value, expected_count)
def _CheckVersionStats(self, label, report_type, counts):
# We expect to have 1, 7, 14 and 30-day graphs for every label.
graph_series = client_report_utils.FetchMostRecentGraphSeries(
label, report_type)
self._CheckVersionGraph(graph_series.graphs[0],
"1 day actives for %s label" % label, counts[0])
self._CheckVersionGraph(graph_series.graphs[1],
"7 day actives for %s label" % label, counts[1])
self._CheckVersionGraph(graph_series.graphs[2],
"14 day actives for %s label" % label, counts[2])
self._CheckVersionGraph(graph_series.graphs[3],
"30 day actives for %s label" % label, counts[3])
def _CheckGRRVersionBreakDown(self):
"""Checks the result of the GRRVersionBreakDown cron job."""
# All machines should be in All once. Windows machines should be in Label1
# and Label2. There should be no stats for UserLabel.
report_type = rdf_stats.ClientGraphSeries.ReportType.GRR_VERSION
self._CheckVersionStats("All", report_type, [2, 2, 22, 22])
self._CheckVersionStats("Label1", report_type, [0, 0, 10, 10])
self._CheckVersionStats("Label2", report_type, [0, 0, 10, 10])
def _CheckOSGraph(self, graph, expected_title, expected_counts):
actual_counts = {s.label: s.y_value for s in graph}
self.assertEqual(graph.title, expected_title)
self.assertDictEqual(actual_counts, expected_counts)
def _CheckOSStats(self, label, report_type, counts):
# We expect to have 1, 7, 14 and 30-day graphs for every label.
graph_series = client_report_utils.FetchMostRecentGraphSeries(
label, report_type)
self._CheckOSGraph(graph_series.graphs[0],
"1 day actives for %s label" % label, counts[0])
self._CheckOSGraph(graph_series.graphs[1],
"7 day actives for %s label" % label, counts[1])
self._CheckOSGraph(graph_series.graphs[2],
"14 day actives for %s label" % label, counts[2])
self._CheckOSGraph(graph_series.graphs[3],
"30 day actives for %s label" % label, counts[3])
def _CheckOSBreakdown(self):
report_type = rdf_stats.ClientGraphSeries.ReportType.OS_TYPE
all_stats = [
{
"Darwin": 2
},
{
"Darwin": 2
},
{
"Linux": 10,
"Windows": 10,
"Darwin": 2
},
{
"Linux": 10,
"Windows": 10,
"Darwin": 2
},
]
label_stats = [{}, {}, {"Windows": 10}, {"Windows": 10}]
self._CheckOSStats("All", report_type, all_stats)
self._CheckOSStats("Label1", report_type, label_stats)
self._CheckOSStats("Label2", report_type, label_stats)
def _CheckAccessStats(self, label, expected):
graph_series = client_report_utils.FetchMostRecentGraphSeries(
label, rdf_stats.ClientGraphSeries.ReportType.N_DAY_ACTIVE)
histogram = graph_series.graphs[0]
data = [(x.x_value, x.y_value) for x in histogram]
self.assertEqual(data, expected)
def _ToMicros(self, duration_str):
return rdfvalue.Duration.FromHumanReadable(duration_str).microseconds
def _CheckLastAccessStats(self):
# pyformat: disable
all_counts = [
(self._ToMicros("1d"), 2),
(self._ToMicros("2d"), 2),
(self._ToMicros("3d"), 2),
(self._ToMicros("7d"), 2),
(self._ToMicros("14d"), 22),
(self._ToMicros("30d"), 22),
(self._ToMicros("60d"), 22)
]
label_counts = [
(self._ToMicros("1d"), 0),
(self._ToMicros("2d"), 0),
(self._ToMicros("3d"), 0),
(self._ToMicros("7d"), 0),
(self._ToMicros("14d"), 10),
(self._ToMicros("30d"), 10),
(self._ToMicros("60d"), 10)
]
# pyformat: enable
# All our clients appeared at the same time (and did not appear since).
self._CheckAccessStats("All", expected=all_counts)
# All our clients appeared at the same time but this label is only half.
self._CheckAccessStats("Label1", expected=label_counts)
# All our clients appeared at the same time but this label is only half.
self._CheckAccessStats("Label2", expected=label_counts)
def testPurgeClientStats(self):
client_id = test_lib.TEST_CLIENT_ID
max_age = db.CLIENT_STATS_RETENTION.ToInt(rdfvalue.SECONDS)
for t in [1 * max_age, 1.5 * max_age, 2 * max_age]:
with test_lib.FakeTime(t):
st = rdf_client_stats.ClientStats(RSS_size=int(t))
data_store.REL_DB.WriteClientStats(client_id, st)
stat_entries = data_store.REL_DB.ReadClientStats(
client_id=client_id,
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
self.assertCountEqual([1 * max_age, 1.5 * max_age, 2 * max_age],
[e.RSS_size for e in stat_entries])
with test_lib.FakeTime(2.51 * max_age):
self._RunPurgeClientStats()
stat_entries = data_store.REL_DB.ReadClientStats(
client_id=client_id,
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
self.assertLen(stat_entries, 1)
self.assertNotIn(max_age, [e.RSS_size for e in stat_entries])
def testGRRVersionBreakDown(self):
"""Check that all client stats cron jobs are run."""
cron_run = rdf_cronjobs.CronJobRun()
job_data = rdf_cronjobs.CronJob()
cron = system.GRRVersionBreakDownCronJob(cron_run, job_data)
cron.Run()
self._CheckGRRVersionBreakDown()
def testOSBreakdown(self):
"""Check that all client stats cron jobs are run."""
run = rdf_cronjobs.CronJobRun()
job = rdf_cronjobs.CronJob()
system.OSBreakDownCronJob(run, job).Run()
self._CheckOSBreakdown()
def testLastAccessStats(self):
"""Check that all client stats cron jobs are run."""
run = rdf_cronjobs.CronJobRun()
job = rdf_cronjobs.CronJob()
system.LastAccessStatsCronJob(run, job).Run()
self._CheckLastAccessStats()
def _RunPurgeClientStats(self):
run = rdf_cronjobs.CronJobRun()
job = rdf_cronjobs.CronJob()
system.PurgeClientStatsCronJob(run, job).Run()
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
|
"""
This is a test/demo of the terrain system.
"""
from panda3d.core import *
loadPrcFile("TerrainConfig.prc")
from direct.showbase.ShowBase import ShowBase
from direct.showbase.DirectObject import DirectObject
import direct.directbase.DirectStart
from direct.filter.CommonFilters import CommonFilters
import math
from direct.task import Task
from direct.gui.OnscreenText import OnscreenText
from direct.actor.Actor import Actor
import sys
import terrain
import terrain.bakery.animate_dreams_bakery
import terrain.bakery.gpuBakery
from terrain.renderer.renderer import RenderNode
from terrain.renderer.renderTiler import RenderTileBakery,RenderNodeTiler
from terrain.renderer.geoClipMapper import GeoClipMapper
from terrain.bakery.bakery import loadTex
import water
import terrain.meshManager.meshManager
import terrain.meshManager.treeFactory
import terrain.meshManager.fernFactory
import terrain.meshManager.groundFactory
print PandaSystem.getVersionString()
backBinName="background"
dataDir="data/"
############## Configure! ##############
#rendererClass=GeoClipMapper
rendererClass=RenderTileBakery
if rendererClass==RenderTileBakery:
#selectedBakery = terrain.bakery.animate_dreams_bakery.ADBakery ; rendererFolder=dataDir+'renderTilerSimple'
selectedBakery = terrain.bakery.gpuBakery.GpuBakery ; rendererFolder=dataDir+'renderTiler'
mouseControl=False
enableWater=True
############## Configure! ##############
# Init camera
base.disableMouse()
camLens=base.camLens
camLens.setNear(1)
maxDist=10000
camLens.setFar(maxDist*20)
base.cam.node().setLens(camLens)
tileSize=200.0
terrainScale=1.0
focus=NodePath("tilerFocuse")
if rendererClass is GeoClipMapper:
# Create a bakery that uses the "bakery2" folder for its resources
b=terrain.bakery.gpuBakery.GpuBakery(None,dataDir+"bakeryData")
n=GeoClipMapper(dataDir+'renderData',b,tileSize/4.0,focus)
if enableWater: waterNode = water.WaterNode( -10, -10, 20, 20, .01)
else:
# Create a bakery that uses the "bakeryTiler" folder for its resources
b = selectedBakery(None,dataDir+"bakeryTiler")
#Make the main (highest LOD) tiler
barkTexture=loader.loadTexture(dataDir+"textures/barkTexture.jpg")
leafTexture=loader.loadTexture(dataDir+"textures/material-10-cl.png")
tf=terrain.meshManager.treeFactory.TreeFactory(barkTexture=barkTexture,leafTexture=leafTexture)
ff=terrain.meshManager.fernFactory.FernFactory(leafTexture)
heightScale=300
gf=terrain.meshManager.groundFactory.GroundFactory(rendererFolder,heightScale=heightScale)
factories=[gf,ff,tf]
LODCutoffs=[float('inf'),2000,1000,500,300]
meshManager=terrain.meshManager.meshManager.MeshManager(factories)
rtb=RenderTileBakery(b,tileSize,meshManager,heightScale)
n=RenderNodeTiler(rtb,tileSize,focus,forceRenderedCount=2,maxRenderedCount=6,)
#x=RenderNode(rendererFolder,n)
#n=rendererClass(rendererFolder,b,tileSize,focus,factories,2,3,heightScale=300)
if enableWater: waterNode = water.WaterNode( -100, -100, 200, 200, 0.1*heightScale)
n.reparentTo(render)
n.setScale(terrainScale)
base.setBackgroundColor(.3,.3,.8,0)
# Make a little UI input handeling class
class UI(DirectObject):
def __init__(self):
self.accept("v", base.bufferViewer.toggleEnable)
self.accept("x", self.analize)
self.accept("o", base.toggleWireframe)
self.accept("u", base.oobe)
self.accept("y", base.oobeCull)
base.bufferViewer.setPosition("llcorner")
base.bufferViewer.setCardSize(.25, 0.0)
def analize(self):
print ""
render.analyze()
print ""
render.ls()
print ""
ui=UI()
dlight = DirectionalLight('dlight')
dlnp = render.attachNewNode(dlight)
dlnp.setHpr(0, 0, 0)
render.setLight(dlnp)
alight = AmbientLight('alight')
alnp = render.attachNewNode(alight)
render.setLight(alnp)
#rotating light to show that normals are calculated correctly
def updateLight(task):
h=task.time/30.0*360+180
dlnp.setHpr(0,h,0)
h=h+90
h=h%360
h=min(h,360-h)
#h is now angle from straight up
hv=h/180.0
hv=1-hv
sunset=max(0,1.0-abs(hv-.5)*8)
sunset=min(1,sunset)
if hv>.5: sunset=1
#sunset=sunset**.2
sunset=VBase4(0.8, 0.5, 0.0, 1)*sunset
sun=max(0,hv-.5)*2*4
sun=min(sun,1)
dColor=(VBase4(0.8, 0.7, 0.7, 1)*sun*2+sunset)
dlight.setColor(dColor)
aColor=VBase4(0.3, 0.3, 0.8, 1)*sun*2.6+VBase4(0.2, 0.2, 0.4, 1)*2.0
alight.setColor(aColor*(8-dColor.length())*(1.0/8))
return Task.cont
taskMgr.add(updateLight, "rotating Light")
# skybox
skybox = loader.loadModel(dataDir+'models/skybox.egg')
# make big enough to cover whole terrain, else there'll be problems with the water reflections
skybox.setScale(maxDist*3)
skybox.setBin('background', 1)
skybox.setDepthWrite(0)
skybox.setLightOff()
skybox.reparentTo(render)
# Filter to display the glow map's glow via bloom.
filters = CommonFilters(base.win, base.cam)
#filterok = filters.setBloom(blend=(0,0,0,1), desat=0.5, intensity=2.5, size="small",mintrigger=0.0, maxtrigger=1.0)
font = TextNode.getDefaultFont()
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1,1,1,1), font = font,
pos=(-1.3, pos), align=TextNode.ALeft, scale = .05)
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1,1,1,1), font = font,
pos=(1.3,-0.95), align=TextNode.ARight, scale = .07)
#A simple function to make sure a value is in a given range, -1 to 1 by default
def restrain(i, mn = -1, mx = 1): return min(max(i, mn), mx)
class keyTracker(DirectObject):
"""
Class for tracking the state of keys. keyMap holds if a key is down
Multiple keys can map to one name, though the value will be set to false when the first is relased
"""
def __init__(self):
DirectObject.__init__(self)
self.keyMap = {}
def setKey(self, key, value):
"""Records the state of key"""
self.keyMap[key] = value
def addKey(self,key,name,allowShift=True):
self.accept(key, self.setKey, [name,True])
self.accept(key+"-up", self.setKey, [name,False])
self.accept(key.upper()+"-up", self.setKey, [name,False])
if allowShift:
self.addKey("shift-"+key,name,False)
self.keyMap[name]=False
class World(keyTracker):
def __init__(self):
keyTracker.__init__(self)
base.win.setClearColor(Vec4(0,0,0,1))
# Post the instructions
self.title = addTitle("Infinite Ralph")
self.inst1 = addInstructions(0.95, "[ESC]: Quit")
self.inst2 = addInstructions(0.90, "WASD + Mouse (Or arrow Keys)")
self.inst3 = addInstructions(0.85, "Shift for hyper")
self.inst3 = addInstructions(0.80, "X for analyze")
self.inst3 = addInstructions(0.70, "V toggles buffer viewer")
self.inst3 = addInstructions(0.65, "U toggles oobe")
self.inst3 = addInstructions(0.60, "Y toggles oobeCull")
self.inst3 = addInstructions(0.55, "O toggles Wireframe")
# Create the main character, Ralph
ralphStartPos = Vec3(0,0,0)
self.ralph = Actor(dataDir+"models/ralph",{"run":dataDir+"models/ralph-run"})
self.ralph.reparentTo(render)
self.ralph.setScale(.4)
self.ralph.setPos(ralphStartPos)
#self.ralph.setShaderAuto()
focus.reparentTo(self.ralph)
# Create a floater object. We use the "floater" as a temporary
# variable in a variety of calculations.
self.floater = NodePath(PandaNode("floater"))
self.floater.reparentTo(self.ralph)
# Accept the control keys for movement and rotation
self.accept("escape", sys.exit)
self.addKey("w","forward")
self.addKey("a","left")
self.addKey("s","backward")
self.addKey("d","right")
self.addKey("arrow_left","turnLeft")
self.addKey("arrow_right","turnRight")
self.addKey("arrow_down","turnDown")
self.addKey("arrow_up","turnUp")
self.setKey('zoom',0)
self.accept("wheel_up", self.setKey, ['zoom',1])
self.accept("wheel_down", self.setKey, ['zoom',-1])
#addKey("wheel_down","zoomOut")
#addKey("wheel_up","zoomIn")
self.addKey("shift","hyper")
taskMgr.add(self.move,"moveTask")
# Game state variables
self.isMoving = False
# Set up the camera
base.disableMouse()
base.camera.setH(180)
base.camera.reparentTo(self.ralph)
self.camDist=0.0
self.floater.setZ(6)
self.floater.setY(-1)
n.setShaderAuto()
def move(self, task):
# Get the time elapsed since last frame. We need this
# for framerate-independent movement.
elapsed = globalClock.getDt()
if enableWater: waterNode.setShaderInput('time', task.time)
# move the skybox with the camera
campos = base.camera.getPos()
skybox.setPos(campos)
if enableWater: waterNode.update()
turnRightAmount=self.keyMap["turnRight"]-self.keyMap["turnLeft"]
turnUpAmmount=self.keyMap["turnUp"]-self.keyMap["turnDown"]
turnRightAmount*=elapsed*100
turnUpAmmount*=elapsed*100
# Use mouse input to turn both Ralph and the Camera
if mouseControl and base.mouseWatcherNode.hasMouse():
# get changes in mouse position
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
deltaX = md.getX() - 200
deltaY = md.getY() - 200
# reset mouse cursor position
base.win.movePointer(0, 200, 200)
turnRightAmount+=0.2* deltaX
turnUpAmmount-= 0.2 * deltaY
zoomOut=self.keyMap["zoom"]
self.camDist=max(min(maxDist,self.camDist+zoomOut*elapsed*50+zoomOut*self.camDist*elapsed*.5),.5)
self.keyMap["zoom"]*=2.7**(-elapsed*4)# Smooth fade out of zoom speed
self.ralph.setH(self.ralph.getH() - turnRightAmount)
base.camera.setP(base.camera.getP() + turnUpAmmount)
# save ralph's initial position so that we can restore it,
# in case he falls off the map or runs into something.
startpos = self.ralph.getPos()
# If a move-key is pressed, move ralph in the specified direction.
# Adding, subtracting and multiplying booleans (which get a value of 0 or 1)
# for the keys here.
forwardMove=self.keyMap["forward"]-.5*self.keyMap["backward"]
rightMove=.5*(self.keyMap["right"]-self.keyMap["left"])
# Slow forward when moving diagonal
forwardMove*=1.0-abs(rightMove)
# Hyper mode. Prabably just for debug
speed=1+4*self.keyMap["hyper"]
rightMove*=speed
forwardMove*=speed
self.ralph.setX(self.ralph, -elapsed*25*rightMove)
self.ralph.setY(self.ralph, -elapsed*25*forwardMove)
h=n.height(self.ralph.getX(n),self.ralph.getY(n))
self.ralph.setZ(n,h)
def sign(n):
if n>=0: return 1
#if n==0: return 0
return -1
# If ralph is moving, loop the run animation.
# If he is standing still, stop the animation.
if rightMove or forwardMove:
self.ralph.setPlayRate(forwardMove+abs(rightMove)*sign(forwardMove), 'run')
if self.isMoving is False:
self.ralph.loop("run")
#self.ralph.loop("walk")
self.isMoving = True
else:
if self.isMoving:
self.ralph.stop()
self.ralph.pose("walk",5)
self.isMoving = False
# The camera should look in ralph's direction,
# but it should also try to stay horizontal, so look at
# a floater which hovers above ralph's head.
base.camera.setPos(self.floater,0,0,0)
base.camera.setPos(base.camera,0,-self.camDist,0)
return Task.cont
w = World()
run()
|
|
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import interface
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@interface.volumedriver
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
1.1.0 - Add chap support and minor bug fixes
1.1.1 - Add wait logic for delete volumes
1.1.2 - Update ig to None before delete volume
1.2.0 - Add retype support
"""
VERSION = '1.2.0'
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_update_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.cb_use_chap = self.configuration.use_chap_auth
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.parse.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = http_client.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except http_client.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _add_qos_group_request(self, volume, tsmid, volume_name,
qos_group_params):
# Prepare the user input params
params = {
"name": "QoS_" + volume_name,
"tsmid": tsmid
}
# Get qos related params from configuration
params.update(self.configuration.cb_add_qosgroup)
# Override the default configuration by qos specs
if qos_group_params:
params.update(qos_group_params)
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name, file_system_params):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params.update(self.configuration.cb_create_volume)
# Override the default configuration by qos specs
if file_system_params:
params.update(file_system_params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _retry_volume_operation(self, operation, retries,
max_retries, jobid,
cb_volume):
"""CloudByte async calls via the FixedIntervalLoopingCall."""
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
count = retries['count']
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if result_res is None:
msg = (_(
"Null response received while querying "
"for [%(operation)s] based job [%(job)s] "
"at CloudByte storage.") %
{'operation': operation, 'job': jobid})
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for "
"volume [%(cb_volume)s]."),
{'operation': operation, 'cb_volume': cb_volume})
raise loopingcall.LoopingCallDone()
elif status == 2:
job_result = result_res.get("jobresult")
err_msg = job_result.get("errortext")
err_code = job_result.get("errorcode")
msg = (_(
"Error in Operation [%(operation)s] "
"for volume [%(cb_volume)s] in CloudByte "
"storage: [%(cb_error)s], "
"error code: [%(error_code)s]."),
{'cb_error': err_msg,
'error_code': err_code,
'cb_volume': cb_volume,
'operation': operation})
raise exception.VolumeBackendAPIException(data=msg)
elif count == max_retries:
# All attempts exhausted
LOG.error(_LE("CloudByte operation [%(operation)s] failed"
" for volume [%(vol)s]. Exhausted all"
" [%(max)s] attempts."),
{'operation': operation,
'vol': cb_volume,
'max': max_retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
count += 1
retries['count'] = count
LOG.debug("CloudByte operation [%(operation)s] for"
" volume [%(vol)s]: retry [%(retry)s] of [%(max)s].",
{'operation': operation,
'vol': cb_volume,
'retry': count,
'max': max_retries})
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Create Volume',
retries,
max_retries,
jobid,
cb_volume_name)
timer.start(interval=retry_interval).wait()
def _wait_for_volume_deletion(self, volume_response, cb_volume_id):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('deleteFileSystemResponse')
if vol_res is None:
msg = _("Null response received while deleting volume [%s] "
"at CloudByte storage.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"delete volume [%s] response.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_delete_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_delete_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Delete Volume',
retries,
max_retries,
jobid,
cb_volume_id)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['iqnname'], 'proid': volume['id']})
return model_update
def _build_provider_details_from_response(self,
cb_volumes,
volume_name,
chap):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol,
chap)
break
return model_update
def _get_initiator_group_id_from_response(self, data, filter):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == filter:
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
if ag_id:
params['authgroupid'] = ag_id
params['authmethod'] = "CHAP"
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot_name, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot_name:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def _get_auth_group_id_from_response(self, data):
"""Find iSCSI auth group id."""
chap_group = self.configuration.cb_auth_group
ag_list_res = data.get('listiSCSIAuthGroupResponse')
if ag_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi auth groups.")
raise exception.VolumeBackendAPIException(data=msg)
ag_list = ag_list_res.get('authgroup')
if ag_list is None:
msg = _('No iscsi auth groups were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ag_id = None
for ag in ag_list:
if ag.get('name') == chap_group:
ag_id = ag['id']
break
else:
msg = _("Auth group [%s] details not found in "
"CloudByte storage.") % chap_group
raise exception.VolumeBackendAPIException(data=msg)
return ag_id
def _get_auth_group_info(self, account_id, ag_id):
"""Fetch the auth group details."""
params = {"accountid": account_id, "authgroupid": ag_id}
auth_users = self._api_request_for_cloudbyte(
'listiSCSIAuthUser', params)
auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse')
if auth_user_details_res is None:
msg = _("No response was received from CloudByte storage "
"list iSCSI auth user API call.")
raise exception.VolumeBackendAPIException(data=msg)
auth_user_details = auth_user_details_res.get('authuser')
if auth_user_details is None:
msg = _("Auth user details not found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
chapuser = auth_user_details[0].get('chapusername')
chappassword = auth_user_details[0].get('chappassword')
if chapuser is None or chappassword is None:
msg = _("Invalid chap user details found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id}
return data
def _get_chap_info(self, account_id):
"""Fetch the chap details."""
params = {"accountid": account_id}
iscsi_auth_data = self._api_request_for_cloudbyte(
'listiSCSIAuthGroup', params)
ag_id = self._get_auth_group_id_from_response(
iscsi_auth_data)
return self._get_auth_group_info(account_id, ag_id)
def _export(self):
model_update = {'provider_auth': None}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
chap = self._get_chap_info(account_id)
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
return model_update
def _update_initiator_group(self, volume_id, ig_name):
# Get account id of this account
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
# Filter the list of initiator groups with the name
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, ig_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Update the iscsi service with above fetched iscsi_id
self._request_update_iscsi_service(iscsi_id, ig_id, None)
LOG.debug("CloudByte initiator group updated successfully for volume "
"[%(vol)s] with ig [%(ig)s].",
{'vol': volume_id,
'ig': ig_name})
def _get_qos_by_volume_type(self, ctxt, type_id):
"""Get the properties which can be QoS or file system related."""
update_qos_group_params = {}
update_file_system_params = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
extra_specs = volume_type.get('extra_specs')
if qos_specs_id is not None:
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Override extra specs with specs
# Hence specs will prefer QoS than extra specs
extra_specs.update(specs)
for key, value in extra_specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.configuration.cb_update_qos_group:
update_qos_group_params[key] = value
elif key in self.configuration.cb_update_file_system:
update_file_system_params[key] = value
return update_qos_group_params, update_file_system_params
def create_volume(self, volume):
qos_group_params = {}
file_system_params = {}
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
qos_group_params, file_system_params = (
self._get_qos_by_volume_type(ctxt, type_id))
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name, qos_group_params)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name, file_system_params)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, 'ALL')
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
# Need to set the initiator group to None before deleting
self._update_initiator_group(cb_volume_id, 'None')
params = {"id": cb_volume_id}
del_res = self._api_request_for_cloudbyte('deleteFileSystem',
params)
self._wait_for_volume_deletion(del_res, cb_volume_id)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
# Set backend storage snapshot name using OpenStack snapshot id
snapshot_name = "snap_" + snapshot['id'].replace("-", "")
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = src_volume.get('id')
# Generating id for snapshot
# as this is not user entered in this particular usecase
snapshot_id = six.text_type(uuid.uuid1())
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
chap_info = {}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
chap_info = self._get_chap_info(account_id)
model_update = self._build_provider_details_from_volume(cb_vol,
chap_info)
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume, connector):
"""Setup the iscsi export info."""
return self._export()
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
return self._export()
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
def retype(self, ctxt, volume, new_type, diff, host):
"""Retypes a volume, QoS and file system update is only done."""
cb_volume_id = volume.get('provider_id')
if cb_volume_id is None:
message = _("Provider information w.r.t CloudByte storage "
"was not found for OpenStack "
"volume [%s].") % volume['id']
raise exception.VolumeBackendAPIException(message)
update_qos_group_params, update_file_system_params = (
self._get_qos_by_volume_type(ctxt, new_type['id']))
if update_qos_group_params:
list_file_sys_params = {'id': cb_volume_id}
response = self._api_request_for_cloudbyte(
'listFileSystem', list_file_sys_params)
response = response['listFilesystemResponse']
cb_volume_list = response['filesystem']
cb_volume = cb_volume_list[0]
if not cb_volume:
msg = (_("Volume [%(cb_vol)s] was not found at "
"CloudByte storage corresponding to OpenStack "
"volume [%(ops_vol)s].") %
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
raise exception.VolumeBackendAPIException(data=msg)
update_qos_group_params['id'] = cb_volume.get('groupid')
self._api_request_for_cloudbyte(
'updateQosGroup', update_qos_group_params)
if update_file_system_params:
update_file_system_params['id'] = cb_volume_id
self._api_request_for_cloudbyte(
'updateFileSystem', update_file_system_params)
LOG.info(_LI("Successfully updated CloudByte volume [%(cb_vol)s] "
"corresponding to OpenStack volume [%(ops_vol)s]."),
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
return True
|
|
from kgb import SpyAgency
from reviewboard.diffviewer.chunk_generator import DiffChunkGenerator
from reviewboard.scmtools.core import PRE_CREATION
from reviewboard.testing import TestCase
class DiffChunkGeneratorTests(SpyAgency, TestCase):
"""Unit tests for DiffChunkGenerator."""
fixtures = ['test_scmtools']
COMMIT_1_DIFF = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..197009f 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hello, world!\n'
b'+Hi, world!\n'
)
COMMIT_2_DIFF = (
b'diff --git a/README b/README\n'
b'index 197009f..87abad9 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hi, world!\n'
b'+Yo, world.\n'
)
COMMIT_3_DIFF = (
b'diff --git a/README b/README\n'
b'index 87abad9..fe1678a 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Yo, world.\n'
b'+Yo, dog.\n'
)
COMMIT_1_2_SQUASHED_DIFF = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..87abad9 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hello, world!\n'
b'+Yo, world.\n'
)
COMMIT_1_3_SQUASHED_DIFF = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..fe1678a 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hello, world!\n'
b'+Yo, dog.\n'
)
def setUp(self):
super(DiffChunkGeneratorTests, self).setUp()
self.repository = self.create_repository(tool_name='Test')
self.diffset = self.create_diffset(repository=self.repository)
self.filediff = self.create_filediff(diffset=self.diffset)
self.generator = DiffChunkGenerator(None, self.filediff)
def test_get_chunks_with_empty_added_file(self):
"""Testing DiffChunkGenerator.get_chunks with empty added file"""
self.filediff.source_revision = PRE_CREATION
self.filediff.extra_data.update({
'raw_insert_count': 0,
'raw_delete_count': 0,
})
self.assertEqual(len(list(self.generator.get_chunks())), 0)
def test_get_chunks_with_explicit_encoding(self):
"""Testing DiffChunkGenerator.get_chunks with explicit encoding on
FileDiff
"""
self.filediff.diff = (
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-%s\n'
b'+%s\n'
% ('Hello, world!'.encode('utf-16'),
'Hi, everybody!'.encode('utf-16'))
)
self.filediff.source_file = '/test-file;encoding=utf-16'
self.filediff.extra_data['encoding'] = 'utf-16'
self.spy_on(self.repository.get_file)
chunks = list(self.generator.get_chunks())
self.assertEqual(len(chunks), 1)
self.assertEqual(
chunks[0],
{
'change': 'replace',
'collapsable': False,
'index': 0,
'lines': [
[
1, 1,
'Hello, world!',
None,
1,
'Hi, everybody!',
None,
False,
]
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertTrue(self.repository.get_file.last_returned(
'Hello, world!\n'.encode('utf-16')))
def test_get_chunks_with_replace_in_added_file_with_parent_diff(self):
"""Testing DiffChunkGenerator.get_chunks with replace chunks in
added file with parent diff
"""
self.filediff.diff = (
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-line\n'
b'+line.\n'
)
self.filediff.parent_diff = (
b'--- README\n'
b'+++ README\n'
b'@@ -0,0 +1,1 @@\n'
b'+line\n'
)
self.filediff.source_revision = PRE_CREATION
self.filediff.extra_data.update({
'raw_insert_count': 1,
'raw_delete_count': 1,
'insert_count': 0,
'delete_count': 0,
})
self.assertEqual(len(list(self.generator.get_chunks())), 1)
def test_get_chunks_with_commit_and_base_filediff(self):
"""Testing DiffChunkGenerator.get_chunks with commit using base_filediff
"""
commit1 = self.create_diffcommit(
commit_id='abc1234',
parent_id='abc1233',
diffset=self.diffset,
diff_contents=self.COMMIT_1_DIFF)
commit2 = self.create_diffcommit(
commit_id='abc1235',
parent_id='abc1234',
diffset=self.diffset,
diff_contents=self.COMMIT_2_DIFF)
self.diffset.finalize_commit_series(
cumulative_diff=self.COMMIT_1_2_SQUASHED_DIFF,
validation_info=None,
validate=False,
save=True)
base_filediff = commit1.files.get()
tip_filediff = commit2.files.get()
generator = DiffChunkGenerator(request=None,
filediff=tip_filediff,
base_filediff=base_filediff)
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
chunk = chunks[0]
self.assertEqual(chunk['index'], 0)
self.assertEqual(chunk['change'], 'replace')
self.assertEqual(
chunk['lines'],
[[
1,
1, 'Hi, world!', [(0, 2), (9, 10)],
1, 'Yo, world.', [(0, 2), (9, 10)],
False,
]])
def test_get_chunks_with_commit_and_no_base_filediff(self):
"""Testing DiffChunkGenerator.get_chunks with commit and no
base_filediff
"""
self.create_diffcommit(
commit_id='abc1234',
parent_id='abc1233',
diffset=self.diffset,
diff_contents=self.COMMIT_1_DIFF)
self.create_diffcommit(
commit_id='abc1235',
parent_id='abc1234',
diffset=self.diffset,
diff_contents=self.COMMIT_2_DIFF)
commit3 = self.create_diffcommit(
commit_id='abc1236',
parent_id='abc1235',
diffset=self.diffset,
diff_contents=self.COMMIT_3_DIFF)
self.diffset.finalize_commit_series(
cumulative_diff=self.COMMIT_1_3_SQUASHED_DIFF,
validation_info=None,
validate=False,
save=True)
filediff = commit3.files.get()
generator = DiffChunkGenerator(request=None,
filediff=filediff)
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
chunk = chunks[0]
self.assertEqual(chunk['index'], 0)
self.assertEqual(chunk['change'], 'replace')
self.assertEqual(
chunk['lines'],
[[
1,
1, 'Hello, world!', None,
1, 'Yo, dog.', None,
False,
]])
def test_get_chunks_with_commit_and_base_tip_same(self):
"""Testing DiffChunkGenerator.get_chunks with commit and base_filediff
same as filediff
"""
commit = self.create_diffcommit(
commit_id='abc1234',
parent_id='abc1233',
diffset=self.diffset,
diff_contents=self.COMMIT_1_DIFF)
self.diffset.finalize_commit_series(
cumulative_diff=self.COMMIT_1_DIFF,
validation_info=None,
validate=False,
save=True)
filediff = commit.files.get()
generator = DiffChunkGenerator(request=None,
filediff=filediff,
base_filediff=filediff)
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
chunk = chunks[0]
self.assertEqual(chunk['index'], 0)
self.assertEqual(chunk['change'], 'equal')
self.assertEqual(
chunk['lines'],
[[
1,
1, 'Hi, world!', [],
1, 'Hi, world!', [],
False,
]])
def test_get_chunks_with_commit_and_file_recreated(self):
"""Testing DiffChunkGenerator.get_chunks with commit and file recreated
in prior commit
"""
commit1, commit2 = self._make_delete_recreate_commits()
filediff = commit2.files.get()
generator = DiffChunkGenerator(request=None,
filediff=filediff)
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
chunk = chunks[0]
self.assertEqual(chunk['index'], 0)
self.assertEqual(chunk['change'], 'insert')
self.assertEqual(
chunk['lines'],
[[
1,
'', '', [],
1, 'This is a new file.', [],
False,
]])
def test_get_chunks_with_commit_and_file_recreated_and_base_deleted(self):
"""Testing DiffChunkGenerator.get_chunks with commit and file recreated
in prior commit with base_filediff as deleted file
"""
commit1, commit2 = self._make_delete_recreate_commits()
base_filediff = commit1.files.get()
filediff = commit2.files.get()
generator = DiffChunkGenerator(request=None,
filediff=filediff,
base_filediff=base_filediff)
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
chunk = chunks[0]
self.assertEqual(chunk['index'], 0)
self.assertEqual(chunk['change'], 'insert')
self.assertEqual(
chunk['lines'],
[[
1,
'', '', [],
1, 'This is a new file.', [],
False,
]])
def test_line_counts_unmodified_by_interdiff(self):
"""Testing that line counts are not modified by interdiffs where the
changes are reverted
"""
self.filediff.source_revision = PRE_CREATION
self.filediff.diff = (
b'--- README\n'
b'+++ README\n'
b'@@ -0,0 +1,1 @@\n'
b'+line\n'
)
# We have to consume everything from the get_chunks generator in order
# for the line counts to be set on the FileDiff.
self.assertEqual(len(list(self.generator.get_chunks())), 1)
line_counts = self.filediff.get_line_counts()
# Simulate an interdiff where the changes are reverted.
interdiff_generator = DiffChunkGenerator(request=None,
filediff=self.filediff,
interfilediff=None,
force_interdiff=True)
# Again, just consuming the generator.
self.assertEqual(len(list(interdiff_generator.get_chunks())), 1)
self.assertEqual(line_counts, self.filediff.get_line_counts())
def _make_delete_recreate_commits(self):
"""Finalize and return commits for a delete/re-create test.
This creates two commits. In the first one, an upstream file is
deleted. In the second, it's re-created.
The commits are finalized before being returned.
Returns:
list of reviewboard.diffviewer.models.diffcommit.DiffCommit:
The newly-created commits.
"""
commits = [
self.create_diffcommit(
commit_id='abc1234',
parent_id='abc1233',
diffset=self.diffset,
diff_contents=(
b'diff --git a/README b/README\n'
b'deleted file mode 100644\n'
b'index 94bdd3e..0000000\n'
b'--- README\n'
b'+++ /dev/null\n'
b'@@ -1,1 +0,0 @@\n'
b'-Hello, world!\n'
)),
self.create_diffcommit(
commit_id='abc1235',
parent_id='abc1234',
diffset=self.diffset,
diff_contents=(
b'diff --git a/README b/README\n'
b'new file mode 100644\n'
b'index 0000000..ba178ca\n'
b'--- /dev/null\n'
b'+++ README\n'
b'@@ -0,0 +1,1 @@\n'
b'+This is a new file.\n'
)),
]
self.diffset.finalize_commit_series(
cumulative_diff=(
b'diff --git a/README b/README\n'
b'index 94bdd3e..0000000 10644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -1,1 +1,1 @@\n'
b'-Hello, world!\n'
b'+This is a new file.\n'
),
validation_info=None,
validate=False,
save=True)
return commits
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Mimic World lettuce object
"""
import os
import shutil
import time
import pkg_resources
from bigml.api import BigML
from bigml.api import HTTP_OK, HTTP_NO_CONTENT, HTTP_UNAUTHORIZED
MAX_RETRIES = 10
RESOURCE_TYPES = [
'cluster',
'source',
'dataset',
'model',
'prediction',
'evaluation',
'ensemble',
'batchprediction',
'centroid',
'batchcentroid',
'anomaly',
'anomalyscore',
'batchanomalyscore',
'project',
'sample']
IRREGULAR_PLURALS = {
'anomaly': 'anomalies',
'batchprediction': 'batch_predictions',
'batchcentroid': 'batch_centroids',
'anomalyscore': 'anomaly_scores',
'batchanomalyscore': 'batch_anomaly_scores'}
TRANSLATED_RESOURCES = {
'batchprediction': 'batch_prediction',
'batchcentroid': 'batch_centroid',
'anomalyscore': 'anomaly_score',
'batchanomalyscore': 'batch_anomaly_score'}
def plural(resource_type):
"""Creates the plural form of a resource type
"""
return IRREGULAR_PLURALS.get(resource_type, "%ss" % resource_type)
class World(object):
def __init__(self):
self.USERNAME = None
self.API_KEY = None
self.api = None
self.api_dev_mode = None
self.reset_api()
self.clear()
self.dataset_ids = []
self.fields_properties_dict = {}
self.counters = {}
self.print_connection_info()
def print_connection_info(self):
self.USERNAME = os.environ.get('BIGML_USERNAME')
self.API_KEY = os.environ.get('BIGML_API_KEY')
if self.USERNAME is None or self.API_KEY is None:
assert False, ("Tests use the BIGML_USERNAME and BIGML_API_KEY"
" environment variables to authenticate the"
" connection, but they seem to be unset. Please,"
"set them before testing.")
else:
assert True
self.api = BigML(self.USERNAME, self.API_KEY)
print self.api.connection_info()
def count_resources(self, time_tag, changed=False):
"""Counts the existing resources and stores it keyed by time_tag.
If changed is set to True, only resources that are logged as
changed are listed.
"""
print "Counting resources (%s)." % time_tag
for resource_type in RESOURCE_TYPES:
resource_type = plural(resource_type)
if (not changed or len(getattr(self, resource_type))) > 0:
resources = getattr(self.api,"list_%s" % resource_type)()
if resource_type == 'source' and resources['code'] != HTTP_OK:
assert False, (
"Unable to list your sources. Please check the"
" BigML domain and credentials to be:\n\n%s" %
self.api.connection_info())
else:
if resources['code'] == HTTP_OK:
assert True
else:
assert False, ("HTTP returned code %s" %
resources['code'])
if (not resource_type in self.counters):
self.counters[resource_type] = {}
self.counters[resource_type][time_tag] = resources[
'meta']['total_count']
def clear(self):
"""Clears the stored resources' ids
"""
for resource_type in RESOURCE_TYPES:
setattr(self, plural(resource_type), [])
setattr(self, TRANSLATED_RESOURCES.get(resource_type,
resource_type), None)
def reset_api(self):
"""Reset the api connection values
"""
self.api = BigML(self.USERNAME, self.API_KEY)
self.api_dev_mode = BigML(self.USERNAME, self.API_KEY, dev_mode=True)
def delete_resources(self):
"""Deletes the created objects
"""
for resource_type in RESOURCE_TYPES:
object_list = getattr(self, plural(resource_type))
if object_list:
print "Deleting %s %s" % (len(object_list),
plural(resource_type))
delete_method = self.api.deleters[resource_type]
for obj_id in object_list:
counter = 0
result = delete_method(obj_id)
while (result['code'] != HTTP_NO_CONTENT and
counter < MAX_RETRIES):
print "Delete failed for %s. Retrying" % obj_id
time.sleep(3)
counter += 1
result = delete_method(obj_id)
if counter == MAX_RETRIES:
print ("Retries to delete the created resources are"
" exhausted. Failed to delete.")
world = World()
def res_filename(file):
return pkg_resources.resource_filename('bigml', "../../../%s" % file)
def setup_module():
"""Operations to be performed before each module
"""
world.reset_api()
world.count_resources('init')
world.clear()
def teardown_module():
"""Operations to be performed after each module
"""
if os.path.exists('./tmp'):
shutil.rmtree('./tmp')
world.delete_resources()
world.count_resources('final', changed=True)
for resource_type in RESOURCE_TYPES:
resource_type = plural(resource_type)
if getattr(world, resource_type):
counters = world.counters[resource_type]
if counters['final'] == counters['init']:
assert True
else:
assert False , (
"init: %s, final: %s" %
(counters['init'], counters['final']))
def teardown_class():
"""Operations to be performed after each class
"""
world.dataset_ids = []
|
|
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
import re
import logging
import datetime
from google.appengine.api import memcache
from google.appengine.dist import use_library
use_library('django', '1.2')
from django.utils import simplejson as json
from django.utils import feedgenerator
from django.utils.html import strip_tags
from datetime import datetime
class MainPage(webapp.RequestHandler):
def get(self):
list = memcache.get('list')
#logging.info(list)
self.response.out.write("""
<html>
<head>
<title>Google Plus Feed</title>
<style>
body{
font-family: sans-serif;
font-size: 14px;
}
li{
font-size: 11px;
}
</style>
<script type="text/javascript" src="https://apis.google.com/js/plusone.js"></script>
</head>
<body>
<h1>Unofficial Google+ User Feed</h1>
<p>
Add the Google+ user number at the end of this URL for their profile feed. Like this: <a href="http://plusfeed.appspot.com/104961845171318028721">http://plusfeed.appspot.com/104961845171318028721</a>.
</p>
<p>
If this site is useful, remember to give it a <g:plusone></g:plusone>
</p>
<p>
Note: The feed will only display *public* items - if none of your posts are public, the feed won't work.
</p>
<p>
You can grab the source for this app here: <a href="https://github.com/russellbeattie/plusfeed">https://github.com/russellbeattie/plusfeed</a>.
</p>
<p>
<em>Originally created by <a href="http://www.russellbeattie.com">Russell Beattie</a></em>
</p>
<p>""")
if list:
self.response.out.write('<h3>')
self.response.out.write(len(list))
self.response.out.write("""
Google+ profiles currently being served:</h3>
<ol>""")
for k, v in list.iteritems():
self.response.out.write('<li><a href="https://plus.google.com/' + k + '">' + v + '</a> [<a href="/' + k + '">feed</a>]</li>')
self.response.out.write('</ol>')
self.response.out.write("""
</p>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-24604146-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</body>
</html>""")
class FeedPage(webapp.RequestHandler):
def get(self, p):
HTTP_DATE_FMT = "%a, %d %b %Y %H:%M:%S GMT"
if 'If-Modified-Since' in self.request.headers:
try:
last_seen = datetime.strptime(self.request.headers['If-Modified-Since'], HTTP_DATE_FMT)
ud = memcache.get('time_' + p)
if ud and last_seen and ud <= last_seen:
logging.info('returning 304')
self.response.set_status(304)
return
except:
test = 1
#logging.info(self.request.headers)
op = memcache.get(p)
if op is not None:
logging.info('delivering from cache')
self.response.headers['Content-Type'] = 'application/atom+xml'
self.response.out.write(op)
return
try:
logging.info('re-requesting feed')
url = 'https://plus.google.com/_/stream/getactivities/' + p + '/?sp=[1,2,"' + p + '",null,null,null,null,"social.google.com",[]]'
result = urlfetch.fetch(url)
if result.status_code == 200:
regex = re.compile(',,',re.M)
txt = result.content
txt = txt[5:]
txt = regex.sub(',null,',txt)
txt = regex.sub(',null,',txt)
txt = txt.replace('[,','[null,')
txt = txt.replace(',]',',null]')
obj = json.loads(txt)
posts = obj[1][0]
if not posts:
self.error(400)
self.response.out.write('<h1>400 - No Public Items Found</h1>')
return
author = posts[0][3]
updated = datetime.fromtimestamp(float(posts[0][5])/1000)
feed = feedgenerator.Atom1Feed(
title = "Google Plus User Feed - " + author,
link = "https://plus.google.com/" + p,
description = "Unofficial feed for Google Plus",
language = "en",
author_name = author,
feed_url = "http://plusfeeds.appspot.com/" + p)
count = 0
for post in posts:
#logging.info('post ' + post[21])
count = count + 1
if count > 10:
break
dt = datetime.fromtimestamp(float(post[5])/1000)
permalink = "https://plus.google.com/" + post[21]
desc = ''
if post[47]:
desc = post[47]
elif post[4]:
desc = post[4]
if post[44]:
desc = desc + ' <br/><br/><a href="https://plus.google.com/' + post[44][1] + '">' + post[44][0] + '</a> originally shared this post: ';
if post[66]:
if post[66][0][1]:
desc = desc + ' <br/><br/><a href="' + post[66][0][1] + '">' + post[66][0][3] + '</a>'
if post[66][0][6]:
if post[66][0][6][0][1].find('image') > -1:
desc = desc + ' <p><img src="http:' + post[66][0][6][0][2] + '"/></p>'
else:
desc = desc + ' <a href="' + post[66][0][6][0][8] + '">' + post[66][0][6][0][8] + '</a>'
if desc == '':
desc = permalink
ptitle = desc
ptitle = htmldecode(ptitle)
ptitle = strip_tags(ptitle)[:75]
feed.add_item(
title = ptitle,
link = permalink,
pubdate = dt,
description = desc
)
output = feed.writeString('UTF-8')
memcache.set(p, output, 10 * 60)
memcache.set('time_' + p, updated)
list = {}
mlist = memcache.get('list')
if mlist:
for k,v in mlist.iteritems():
list[k] = v
list[p] = author
memcache.set('list', list)
self.response.headers['Last-Modified'] = updated.strftime(HTTP_DATE_FMT)
#self.response.headers['ETag'] = '"%s"' % (content.etag,)
self.response.headers['Content-Type'] = 'application/atom+xml'
self.response.out.write(output)
else:
self.error(404)
self.response.out.write('<h1>404 Not Found</h1>')
except Exception, err:
self.error(500)
self.response.out.write('<h1>500 Server Error</h1><p>' + str(err) + '</p>')
from htmlentitydefs import name2codepoint
def htmldecode(text):
"""Decode HTML entities in the given text."""
if type(text) is unicode:
uchr = unichr
else:
uchr = lambda value: value > 255 and unichr(value) or chr(value)
def entitydecode(match, uchr=uchr):
entity = match.group(1)
if entity.startswith('#x'):
return uchr(int(entity[2:], 16))
elif entity.startswith('#'):
return uchr(int(entity[1:]))
elif entity in name2codepoint:
return uchr(name2codepoint[entity])
else:
return match.group(0)
charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?')
return charrefpat.sub(entitydecode, text)
application = webapp.WSGIApplication([('/', MainPage), (r'/(.+)', FeedPage)],debug=False)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for check_collectd_mlab check."""
import io
import os
import socket
import time
import unittest2 as unittest
# Third-party modules.
import mock
# Module under test.
import check_collectd_mlab
# R0904: Too many public methods. Hard to avoid for unit tests.
# pylint: disable=R0904
class FakeSocketIO(object):
"""An in-memory, IO object with a socket-like interface."""
def __init__(self, initial_value=''):
self._writer = io.BytesIO()
self._reader = io.BytesIO(initial_value)
def recv(self, count=-1):
"""Reads count bytes from socket, or until EOF when count is -1."""
return self._reader.read(count)
def send(self, message):
"""Writes message to socket."""
return self._writer.write(message)
def getvalue(self):
"""Returns all bytes written to the socket."""
return self._writer.getvalue()
class MLabNagiosSocketTests(unittest.TestCase):
def testunit_sock_sendcmd_RETURNS_successfully(self):
fake_sock = FakeSocketIO('1 default reply\n')
returned_value = check_collectd_mlab.sock_sendcmd(fake_sock,
'GETVAL "whatever"')
self.assertEqual(returned_value, 1)
self.assertEqual(fake_sock.getvalue(), 'GETVAL "whatever"\n')
def testunit_sock_sendcmd_WHEN_receive_bad_reply_RETURNS_zero(self):
fake_sock = FakeSocketIO('not-a-number junk\n')
returned_value = check_collectd_mlab.sock_sendcmd(fake_sock,
'GETVAL "whatever"')
self.assertEqual(returned_value, 0)
self.assertEqual(fake_sock.getvalue(), 'GETVAL "whatever"\n')
def testunit_sock_connect_WHEN_invalid_socket_name_RAISES_error(self):
with self.assertRaises(check_collectd_mlab.SocketConnectionError):
check_collectd_mlab.sock_connect('invalid_socket_name')
def testunit_sock_readline_WHEN_socket_error_RAISES_error(self):
mock_sock = mock.Mock(spec_set=socket.socket)
mock_sock.recv.side_effect = socket.error('fake error')
with self.assertRaises(check_collectd_mlab.SocketReadlineError):
check_collectd_mlab.sock_readline(mock_sock)
class MLabCollectdAssertionTests(unittest.TestCase):
def setUp(self):
self.testdata_dir = os.path.join(
os.path.dirname(check_collectd_mlab.__file__), 'testdata')
check_collectd_mlab.COLLECTD_BIN = (
os.path.join(self.testdata_dir, 'fake_collectd_bin'))
check_collectd_mlab.COLLECTD_NAGIOS = (
os.path.join(self.testdata_dir, 'fake_nagios_bin'))
check_collectd_mlab.COLLECTD_PID = (
os.path.join(self.testdata_dir, 'fake_pid'))
check_collectd_mlab.COLLECTD_UNIXSOCK = (
os.path.join(self.testdata_dir, 'fake_socket'))
check_collectd_mlab.VSYSPATH_BACKEND = (
os.path.join(self.testdata_dir, 'fake_backend'))
check_collectd_mlab.VSYSPATH_SLICE = (
os.path.join(self.testdata_dir, 'fake_slice'))
check_collectd_mlab.SNMP_COMMUNITY = (
os.path.join(self.testdata_dir, 'fake_snmp_community'))
@mock.patch('check_collectd_mlab.sock_connect')
def testunit_assert_collectd_responds_WHEN_sock_sendcmd_fails(
self, mock_sock_connect):
"""Fails when sending a command after successfully creating a connection."""
mock_sock = mock.Mock(spec_set=socket.socket)
mock_sock.send.side_effect = socket.error('fake socket error')
mock_sock_connect.return_value = mock_sock
with self.assertRaises(check_collectd_mlab.SocketSendCommandError):
check_collectd_mlab.assert_collectd_responds()
mock_sock_connect.assert_called_with(
check_collectd_mlab.COLLECTD_UNIXSOCK)
@mock.patch('check_collectd_mlab.sock_connect')
def testunit_assert_collectd_responds_WHEN_sock_readline_fails(
self, mock_sock_connect):
"""Fails when reading from socket after a successful connection."""
# After sending a default reply, the fake socket reaches EOF.
mock_sock_connect.return_value = FakeSocketIO('1 default reply\n')
with self.assertRaises(check_collectd_mlab.SocketReadlineError):
check_collectd_mlab.assert_collectd_responds()
mock_sock_connect.assert_called_with(
check_collectd_mlab.COLLECTD_UNIXSOCK)
@mock.patch('os.access')
def testunit_assert_collectd_responds_WHEN_filesystem_is_readonly(
self, mock_access):
# Mock out os.access to guarantee that write access is rejected.
mock_access.return_value = False
with self.assertRaises(check_collectd_mlab.ReadonlyFilesystemError):
check_collectd_mlab.assert_collectd_responds()
self.assertTrue(mock_access.called)
def testunit_assert_collectd_responds_WHEN_sock_connect_fails(self):
"""Fails when creating a socket connection."""
check_collectd_mlab.COLLECTD_UNIXSOCK = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.SocketConnectionError):
check_collectd_mlab.assert_collectd_responds()
def testunit_assert_collectd_installed_WHEN_bin_missing_RAISES_error(self):
check_collectd_mlab.COLLECTD_BIN = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.MissingBinaryError):
check_collectd_mlab.assert_collectd_installed()
def testunit_assert_collectd_installed_WHEN_nagios_bin_missing_error(self):
check_collectd_mlab.COLLECTD_NAGIOS = (
os.path.join(self.testdata_dir, 'does_not_exist'))
with self.assertRaises(check_collectd_mlab.MissingNagiosBinaryError):
check_collectd_mlab.assert_collectd_installed()
def testunit_assert_collectd_installed_WHEN_bad_socket_RAISES_error(self):
check_collectd_mlab.COLLECTD_UNIXSOCK = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.MissingSocketError):
check_collectd_mlab.assert_collectd_installed()
def testunit_assert_collectd_installed_WHEN_missing_community_RAISES_error(
self):
check_collectd_mlab.SNMP_COMMUNITY = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.MissingSNMPCommunityError):
check_collectd_mlab.assert_collectd_installed()
def testunit_assert_collectd_installed_WHEN_missing_updated_RAISES_error(
self):
# Use default value for check_collectd_mlab.SNMP_COMMUNITY.
with self.assertRaises(
check_collectd_mlab.MissingUpdatedSNMPCommunityError):
check_collectd_mlab.assert_collectd_installed()
def testunit_assert_collectd_vsys_setup_WHEN_vsys_backend_is_missing(self):
check_collectd_mlab.VSYSPATH_BACKEND = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.MissingVsysBackendError):
check_collectd_mlab.assert_collectd_vsys_setup()
def testunit_assert_collectd_vsys_setup_WHEN_vsys_slice_is_missing(self):
check_collectd_mlab.VSYSPATH_SLICE = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.MissingVsysFrontendError):
check_collectd_mlab.assert_collectd_vsys_setup()
def testunit_assert_collectd_vsys_setup_WHEN_vsys_acl_is_missing(self):
check_collectd_mlab.VSYSPATH_ACL = 'does_not_exist'
with self.assertRaises(check_collectd_mlab.MissingVsysAclError):
check_collectd_mlab.assert_collectd_vsys_setup()
def testunit_assert_collectd_vsys_setup_WHEN_acl_incomplete(self):
check_collectd_mlab.VSYSPATH_ACL = os.path.join(
self.testdata_dir, 'acl_missing_slice_name')
with self.assertRaises(
check_collectd_mlab.MissingSliceFromVsysAclError):
check_collectd_mlab.assert_collectd_vsys_setup()
class MLabNagiosTests(unittest.TestCase):
@mock.patch('check_collectd_mlab.run_collectd_nagios')
def testcover_assert_collectd_nagios_levels(self, mock_run_collectd_nagios):
# This is not ideal. But, it's just a coverage test.
# Non-zero values cause a failure. So, cause each call to fail in
# sequence.
mock_run_collectd_nagios.side_effect = [1]
with self.assertRaises(check_collectd_mlab.NagiosStateError):
check_collectd_mlab.assert_collectd_nagios_levels()
@mock.patch('subprocess.Popen')
def testcover_run_collectd_nagios(self, mock_popen):
mock_popen.return_value.wait.return_value = 2
returned_value = check_collectd_mlab.run_collectd_nagios(
'host', 'metric', 'value', 'warning', 'critical')
self.assertEqual(returned_value, 2)
self.assertTrue(mock_popen.called)
@mock.patch('check_collectd_mlab.assert_collectd_installed')
@mock.patch('check_collectd_mlab.assert_collectd_responds')
@mock.patch('check_collectd_mlab.assert_collectd_vsys_setup')
@mock.patch('check_collectd_mlab.assert_collectd_nagios_levels')
@mock.patch('check_collectd_mlab.assert_disk_last_sync_time')
def testcover_check_collectd(
self, mock_last_sync_time, mock_collectd_nagios_levels,
mock_vsys_setup, mock_collectd_responds, mock_collectd_installed):
state, _ = check_collectd_mlab.check_collectd()
self.assertEqual(state, check_collectd_mlab.STATE_OK)
self.assertTrue(mock_collectd_installed.called)
self.assertTrue(mock_collectd_responds.called)
self.assertTrue(mock_collectd_nagios_levels.called)
self.assertTrue(mock_last_sync_time.called)
@mock.patch('check_collectd_mlab.assert_collectd_installed')
@mock.patch('check_collectd_mlab.assert_collectd_responds')
@mock.patch('check_collectd_mlab.assert_collectd_vsys_setup')
@mock.patch('check_collectd_mlab.assert_collectd_nagios_levels')
@mock.patch('check_collectd_mlab.assert_disk_last_sync_time')
def testcover_check_collectd_WHEN_nagios_error(
self, mock_last_sync_time, mock_collectd_nagios_levels,
mock_vsys_setup, mock_collectd_responds, mock_collectd_installed):
mock_collectd_nagios_levels.side_effect = (
check_collectd_mlab.NagiosStateError('error'))
state, message = check_collectd_mlab.check_collectd()
self.assertEqual(state, check_collectd_mlab.STATE_UNKNOWN)
self.assertEqual(message, 'error')
self.assertTrue(mock_collectd_installed.called)
self.assertTrue(mock_collectd_responds.called)
self.assertTrue(mock_collectd_nagios_levels.called)
self.assertTrue(mock_last_sync_time.called)
@mock.patch('check_collectd_mlab.assert_collectd_installed')
def testcover_check_collectd_WHEN_state_critical(self,
mock_collectd_installed):
mock_collectd_installed.side_effect = (
check_collectd_mlab.CriticalError('fail'))
state, _ = check_collectd_mlab.check_collectd()
self.assertEqual(state, check_collectd_mlab.STATE_CRITICAL)
self.assertTrue(mock_collectd_installed.called)
def testunit_alarm(self):
with check_collectd_mlab.AlarmAfterTimeout(1):
try:
time.sleep(5)
self.fail('Alarm did not trigger.') # pragma: no cover.
except check_collectd_mlab.TimeoutError:
pass
@mock.patch('sys.stdout')
@mock.patch('check_collectd_mlab.check_collectd')
def testcover_main(self, mock_check_status, mock_stdout):
mock_check_status.return_value = (check_collectd_mlab.STATE_OK, 'ok')
with self.assertRaises(SystemExit):
check_collectd_mlab.main()
self.assertTrue(mock_stdout.write.called)
self.assertTrue(mock_check_status.called)
@mock.patch('sys.stdout')
@mock.patch('check_collectd_mlab.check_collectd')
def testcover_main_WHEN_timeout(self, mock_check_status, mock_stdout):
mock_check_status.side_effect = check_collectd_mlab.TimeoutError(
'timeout')
with self.assertRaises(SystemExit):
check_collectd_mlab.main()
self.assertTrue(mock_stdout.write.called)
self.assertTrue(mock_check_status.called)
if __name__ == "__main__":
unittest.main()
|
|
import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
# to_aggregator("max"),
# to_aggregator("min"),
# to_aggregator("median"),
# to_aggregator("mode"),
# to_aggregator("mean"),
# to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = True
CONVERT_TO_CATEGORICAL = True
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=lambda x, *args: x, # identity function
B=lambda x, *args: x, # identity function
C=lambda x, *args: LabelBinarizer().fit_transform(x),
)
CATEGORICAL_CONVERTERS = dict(
N=lambda x, *args: Discretizer().fit_transform(x).flatten(),
B=lambda x, *args: x, # identity function
C=lambda x, *args: x, # identity function
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = True
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
Ridge(),
LinearRegression(),
DecisionTreeRegressor(random_state=0),
RandomForestRegressor(random_state=0),
GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
LogisticRegression(random_state=0),
DecisionTreeClassifier(random_state=0),
RandomForestClassifier(random_state=0),
GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsClassifier(),
GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
|
|
"""SCons.Job
This module defines the Serial and Parallel classes that execute tasks to
complete a build. The Jobs class provides a higher level interface to start,
stop, and wait on jobs.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Job.py 3897 2009/01/13 06:45:54 scons"
import os
import signal
import SCons.Errors
# The default stack size (in kilobytes) of the threads used to execute
# jobs in parallel.
#
# We use a stack size of 256 kilobytes. The default on some platforms
# is too large and prevents us from creating enough threads to fully
# parallelized the build. For example, the default stack size on linux
# is 8 MBytes.
explicit_stack_size = None
default_stack_size = 256
interrupt_msg = 'Build interrupted.'
class InterruptState:
def __init__(self):
self.interrupted = False
def set(self):
self.interrupted = True
def __call__(self):
return self.interrupted
class Jobs:
"""An instance of this class initializes N jobs, and provides
methods for starting, stopping, and waiting on all N jobs.
"""
def __init__(self, num, taskmaster):
"""
create 'num' jobs using the given taskmaster.
If 'num' is 1 or less, then a serial job will be used,
otherwise a parallel job with 'num' worker threads will
be used.
The 'num_jobs' attribute will be set to the actual number of jobs
allocated. If more than one job is requested but the Parallel
class can't do it, it gets reset to 1. Wrapping interfaces that
care should check the value of 'num_jobs' after initialization.
"""
self.job = None
if num > 1:
stack_size = explicit_stack_size
if stack_size is None:
stack_size = default_stack_size
try:
self.job = Parallel(taskmaster, num, stack_size)
self.num_jobs = num
except NameError:
pass
if self.job is None:
self.job = Serial(taskmaster)
self.num_jobs = 1
def run(self, postfunc=lambda: None):
"""Run the jobs.
postfunc() will be invoked after the jobs has run. It will be
invoked even if the jobs are interrupted by a keyboard
interrupt (well, in fact by a signal such as either SIGINT,
SIGTERM or SIGHUP). The execution of postfunc() is protected
against keyboard interrupts and is guaranteed to run to
completion."""
self._setup_sig_handler()
try:
self.job.start()
finally:
postfunc()
self._reset_sig_handler()
def were_interrupted(self):
"""Returns whether the jobs were interrupted by a signal."""
return self.job.interrupted()
def _setup_sig_handler(self):
"""Setup an interrupt handler so that SCons can shutdown cleanly in
various conditions:
a) SIGINT: Keyboard interrupt
b) SIGTERM: kill or system shutdown
c) SIGHUP: Controlling shell exiting
We handle all of these cases by stopping the taskmaster. It
turns out that it very difficult to stop the build process
by throwing asynchronously an exception such as
KeyboardInterrupt. For example, the python Condition
variables (threading.Condition) and Queue's do not seem to
asynchronous-exception-safe. It would require adding a whole
bunch of try/finally block and except KeyboardInterrupt all
over the place.
Note also that we have to be careful to handle the case when
SCons forks before executing another process. In that case, we
want the child to exit immediately.
"""
def handler(signum, stack, self=self, parentpid=os.getpid()):
if os.getpid() == parentpid:
self.job.taskmaster.stop()
self.job.interrupted.set()
else:
os._exit(2)
self.old_sigint = signal.signal(signal.SIGINT, handler)
self.old_sigterm = signal.signal(signal.SIGTERM, handler)
try:
self.old_sighup = signal.signal(signal.SIGHUP, handler)
except AttributeError:
pass
def _reset_sig_handler(self):
"""Restore the signal handlers to their previous state (before the
call to _setup_sig_handler()."""
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
try:
signal.signal(signal.SIGHUP, self.old_sighup)
except AttributeError:
pass
class Serial:
"""This class is used to execute tasks in series, and is more efficient
than Parallel, but is only appropriate for non-parallel builds. Only
one instance of this class should be in existence at a time.
This class is not thread safe.
"""
def __init__(self, taskmaster):
"""Create a new serial job given a taskmaster.
The taskmaster's next_task() method should return the next task
that needs to be executed, or None if there are no more tasks. The
taskmaster's executed() method will be called for each task when it
is successfully executed or failed() will be called if it failed to
execute (e.g. execute() raised an exception)."""
self.taskmaster = taskmaster
self.interrupted = InterruptState()
def start(self):
"""Start the job. This will begin pulling tasks from the taskmaster
and executing them, and return when there are no more tasks. If a task
fails to execute (i.e. execute() raises an exception), then the job will
stop."""
while 1:
task = self.taskmaster.next_task()
if task is None:
break
try:
task.prepare()
if task.needs_execute():
task.execute()
except:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
else:
task.exception_set()
# Let the failed() callback function arrange for the
# build to stop if that's appropriate.
task.failed()
else:
task.executed()
task.postprocess()
self.taskmaster.cleanup()
# Trap import failure so that everything in the Job module but the
# Parallel class (and its dependent classes) will work if the interpreter
# doesn't support threads.
try:
import Queue
import threading
except ImportError:
pass
else:
class Worker(threading.Thread):
"""A worker thread waits on a task to be posted to its request queue,
dequeues the task, executes it, and posts a tuple including the task
and a boolean indicating whether the task executed successfully. """
def __init__(self, requestQueue, resultsQueue, interrupted):
threading.Thread.__init__(self)
self.setDaemon(1)
self.requestQueue = requestQueue
self.resultsQueue = resultsQueue
self.interrupted = interrupted
self.start()
def run(self):
while 1:
task = self.requestQueue.get()
if task is None:
# The "None" value is used as a sentinel by
# ThreadPool.cleanup(). This indicates that there
# are no more tasks, so we should quit.
break
try:
if self.interrupted():
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
task.execute()
except:
task.exception_set()
ok = False
else:
ok = True
self.resultsQueue.put((task, ok))
class ThreadPool:
"""This class is responsible for spawning and managing worker threads."""
def __init__(self, num, stack_size, interrupted):
"""Create the request and reply queues, and 'num' worker threads.
One must specify the stack size of the worker threads. The
stack size is specified in kilobytes.
"""
self.requestQueue = Queue.Queue(0)
self.resultsQueue = Queue.Queue(0)
try:
prev_size = threading.stack_size(stack_size*1024)
except AttributeError, e:
# Only print a warning if the stack size has been
# explicitly set.
if not explicit_stack_size is None:
msg = "Setting stack size is unsupported by this version of Python:\n " + \
e.args[0]
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
except ValueError, e:
msg = "Setting stack size failed:\n " + str(e)
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
# Create worker threads
self.workers = []
for _ in range(num):
worker = Worker(self.requestQueue, self.resultsQueue, interrupted)
self.workers.append(worker)
# Once we drop Python 1.5 we can change the following to:
#if 'prev_size' in locals():
if 'prev_size' in locals().keys():
threading.stack_size(prev_size)
def put(self, task):
"""Put task into request queue."""
self.requestQueue.put(task)
def get(self):
"""Remove and return a result tuple from the results queue."""
return self.resultsQueue.get()
def preparation_failed(self, task):
self.resultsQueue.put((task, False))
def cleanup(self):
"""
Shuts down the thread pool, giving each worker thread a
chance to shut down gracefully.
"""
# For each worker thread, put a sentinel "None" value
# on the requestQueue (indicating that there's no work
# to be done) so that each worker thread will get one and
# terminate gracefully.
for _ in self.workers:
self.requestQueue.put(None)
# Wait for all of the workers to terminate.
#
# If we don't do this, later Python versions (2.4, 2.5) often
# seem to raise exceptions during shutdown. This happens
# in requestQueue.get(), as an assertion failure that
# requestQueue.not_full is notified while not acquired,
# seemingly because the main thread has shut down (or is
# in the process of doing so) while the workers are still
# trying to pull sentinels off the requestQueue.
#
# Normally these terminations should happen fairly quickly,
# but we'll stick a one-second timeout on here just in case
# someone gets hung.
for worker in self.workers:
worker.join(1.0)
self.workers = []
class Parallel:
"""This class is used to execute tasks in parallel, and is somewhat
less efficient than Serial, but is appropriate for parallel builds.
This class is thread safe.
"""
def __init__(self, taskmaster, num, stack_size):
"""Create a new parallel job given a taskmaster.
The taskmaster's next_task() method should return the next
task that needs to be executed, or None if there are no more
tasks. The taskmaster's executed() method will be called
for each task when it is successfully executed or failed()
will be called if the task failed to execute (i.e. execute()
raised an exception).
Note: calls to taskmaster are serialized, but calls to
execute() on distinct tasks are not serialized, because
that is the whole point of parallel jobs: they can execute
multiple tasks simultaneously. """
self.taskmaster = taskmaster
self.interrupted = InterruptState()
self.tp = ThreadPool(num, stack_size, self.interrupted)
self.maxjobs = num
def start(self):
"""Start the job. This will begin pulling tasks from the
taskmaster and executing them, and return when there are no
more tasks. If a task fails to execute (i.e. execute() raises
an exception), then the job will stop."""
jobs = 0
while 1:
# Start up as many available tasks as we're
# allowed to.
while jobs < self.maxjobs:
task = self.taskmaster.next_task()
if task is None:
break
try:
# prepare task for execution
task.prepare()
except:
task.exception_set()
task.failed()
task.postprocess()
else:
if task.needs_execute():
# dispatch task
self.tp.put(task)
jobs = jobs + 1
else:
task.executed()
task.postprocess()
if not task and not jobs: break
# Let any/all completed tasks finish up before we go
# back and put the next batch of tasks on the queue.
while 1:
task, ok = self.tp.get()
jobs = jobs - 1
if ok:
task.executed()
else:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
# Let the failed() callback function arrange
# for the build to stop if that's appropriate.
task.failed()
task.postprocess()
if self.tp.resultsQueue.empty():
break
self.tp.cleanup()
self.taskmaster.cleanup()
|
|
from __future__ import unicode_literals
import datetime
import os
import djcelery
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from .utils import * # noqa
djcelery.setup_loader()
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
))
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '127.0.0.1:6379:10',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tracpro',
'CONN_MAX_AGE': 60,
'ATOMIC_REQUESTS': True,
'OPTIONS': {},
},
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'compressor',
'djcelery',
'guardian',
'mptt',
'sorl.thumbnail',
'smart_selects',
'smartmin',
'smartmin.csv_imports',
'smartmin.users',
'tracpro.orgs_ext.apps.DashOrgConfig',
'dash.utils',
'tracpro.baseline',
'tracpro.charts',
'tracpro.contacts',
'tracpro.groups',
'tracpro.home',
'tracpro.msgs',
'tracpro.orgs_ext',
'tracpro.polls',
'tracpro.profiles',
]
LANGUAGE_CODE = 'en'
LANGUAGES = [
('en', _("English")),
('fr', _("French")),
('es', _("Spanish")),
('ps', _("Pashto")),
('fa', _("Persian")),
]
LOCALE_PATHS = [
os.path.join(PROJECT_ROOT, 'locale'),
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'celery': {
'level': 'ERROR'
},
'django': {
'level': 'ERROR',
'propagate': True,
},
'django.request': {
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'level': 'ERROR',
},
'tracpro': {
'level': 'INFO',
},
},
'root': {
'handlers': ['console', 'mail_admins'],
}
}
LOGIN_REDIRECT_URL = reverse_lazy('home.home')
LOGIN_URL = reverse_lazy('users.user_login')
LOGOUT_REDIRECT_URL = reverse_lazy('home.home')
LOGOUT_URL = reverse_lazy('users.user_logout')
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public', 'media')
MEDIA_URL = "/media/"
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'smartmin.middleware.AjaxRedirect',
'tracpro.orgs_ext.middleware.TracproOrgMiddleware',
'tracpro.profiles.middleware.ForcePasswordChangeMiddleware',
'tracpro.groups.middleware.UserRegionsMiddleware',
'tracpro.orgs_ext.middleware.HandleTembaAPIError',
)
ROOT_URLCONF = 'tracpro.urls'
SESSION_COOKIE_NAME = 'tracpro'
SITE_DATE_FORMAT = r'%b %d, %Y'
SITE_ID = 1
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'tracpro', 'static'),
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')
STATIC_URL = '/sitestatic/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'dash.orgs.context_processors.user_group_perms_processor',
'dash.orgs.context_processors.set_org_processor',
'dash.context_processors.lang_direction',
'tracpro.orgs_ext.context_processors.user_is_admin',
'tracpro.orgs_ext.context_processors.available_languages',
'tracpro.groups.context_processors.show_subregions_toggle_form',
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'tracpro', 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TIME_ZONE = 'GMT'
USER_TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# === Third-party settings. === #
ANONYMOUS_USER_ID = -1
BROKER_URL = CELERY_RESULT_BACKEND = 'redis://localhost:6379/4'
CELERYD_HIJACK_ROOT_LOGGER = False
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_TIMEZONE = 'UTC'
ORG_TASK_TIMEOUT = datetime.timedelta(minutes=10)
def _org_scheduler_task(task_name):
return {
'task': 'tracpro.orgs_ext.tasks.ScheduleTaskForActiveOrgs',
'schedule': ORG_TASK_TIMEOUT,
'kwargs': {
'task_name': task_name,
},
}
CELERYBEAT_SCHEDULE = {
'sync-polls': _org_scheduler_task('tracpro.polls.tasks.SyncOrgPolls'),
'sync-contacts': _org_scheduler_task('tracpro.contacts.tasks.SyncOrgContacts'),
'sync-data-fields': _org_scheduler_task('tracpro.contacts.tasks.SyncOrgDataFields'),
'sync-boundaries': _org_scheduler_task('tracpro.groups.tasks.SyncOrgBoundaries'),
'fetch-runs': _org_scheduler_task('tracpro.polls.tasks.FetchOrgRuns'),
'fetch-inbox-messages': _org_scheduler_task('tracpro.msgs.tasks.FetchOrgInboxMessages'),
}
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/less', 'tracpro.compress.LessFilter'),
)
DEFAULT_LANGUAGE = 'en'
GROUP_PERMISSIONS = {
"Administrators": (
'orgs.org_home',
'orgs.org_edit',
'baseline.baselineterm.*',
'contacts.contact.*',
'groups.boundary.*',
'groups.group.*',
'groups.region.*',
'msgs.message.*',
'msgs.inboxmessage.*',
'polls.poll.*',
'polls.pollrun.*',
'polls.response.*',
'profiles.profile.*',
),
"Editors": (
'baseline.baselineterm.*',
'contacts.contact.*',
'groups.boundary.*',
'groups.group_most_active',
'groups.region_most_active',
'msgs.inboxmessage.*',
'msgs.message_send',
'msgs.message_by_contact',
'polls.poll_read',
'polls.pollrun_create',
'polls.pollrun_restart',
'polls.pollrun_read',
'polls.pollrun_participation',
'polls.pollrun_latest',
'polls.pollrun_list',
'polls.pollrun_by_poll',
'polls.response_by_contact',
'polls.response_by_pollrun',
'profiles.profile_user_read',
),
"Viewers": (),
}
ORG_CONFIG_FIELDS = [
{
'name': 'available_languages',
'field': {
'help_text': _("The languages used by administrators in your organization"),
'required': True,
},
},
{
'name': 'show_spoof_data',
'field': {
'help_text': _("Whether to show spoof data for this organization"),
'required': False,
},
},
]
PERMISSIONS = {
'*': (
'create', # can create an object
'read', # can view an object's details
'update', # can update an object
'delete', # can delete an object
'list', # can view a list of the objects
),
'orgs.org': ('create', 'update', 'list', 'edit', 'home'),
'baseline.baselineterm': ('create', 'read', 'update', 'delete', 'list', 'data_spoof', 'clear_spoof'),
'contacts.contact': ('create', 'read', 'update', 'delete', 'list'),
'groups.group': ('list', 'most_active', 'select'),
'groups.region': ('list', 'most_active', 'select', 'update_all'),
'msgs.message': ('list', 'send', 'by_contact'),
'msgs.inboxmessage': ('read', 'list', 'conversation'),
'polls.poll': ('read', 'update', 'list', 'select'),
'polls.pollrun': ('create', 'restart', 'read', 'participation', 'list', 'by_poll', 'latest'),
'polls.response': ('by_pollrun', 'by_contact'),
# can't create profiles.user.* permissions because we don't own User
'profiles.profile': ('user_create', 'user_read', 'user_update', 'user_list'),
}
RTL_LANGUAGES = ['ps', 'fa']
SITE_ALLOW_NO_ORG = (
'orgs_ext.org_create',
'orgs_ext.org_update',
'orgs_ext.org_list',
'profiles.admin_create',
'profiles.admin_update',
'profiles.admin_list',
'set_language',
)
SITE_API_HOST = 'rapidpro.io'
SITE_API_USER_AGENT = 'tracpro/1.0'
SITE_CHOOSER_URL_NAME = 'orgs_ext.org_chooser'
SITE_CHOOSER_TEMPLATE = 'org_chooser.html'
SITE_HOST_PATTERN = 'http://%s.localhost:8000'
SITE_USER_HOME = '/'
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import itertools
import ddt
import glanceclient.exc
import mock
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.image import glance
from cinder import test
from cinder.tests.unit.glance import stubs as glance_stubs
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.TestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'visibility': 'public',
'protected': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'device': 'bbb'},
{'device': 'yyy'}],
'block_device_mapping': [
{'device_name': '/dev/fake'},
{'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'visibility': 'public',
'protected': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb"}, '
'{"device": "yyy"}]',
'block_device_mapping':
'[{"device_name": "/dev/fake"}, '
'{"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted_expected, converted)
self.assertEqual(metadata, glance._convert_from_string(converted))
@ddt.ddt
class TestGlanceImageService(test.TestCase):
"""Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multiple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multiple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
client = glance_stubs.StubGlanceClient()
service_catalog = [{u'type': u'image', u'name': u'glance',
u'endpoints': [{
u'publicURL': u'http://example.com:9292'}]}]
self.service = self._create_image_service(client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.context.service_catalog = service_catalog
self.mock_object(glance.time, 'sleep', return_value=None)
def _create_image_service(self, client):
def _fake_create_glance_client(context, netloc, use_ssl, version):
return client
self.mock_object(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'visibility': None,
'protected': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_get_api_servers(self):
result = glance.get_api_servers(self.context)
expected = (u'example.com:9292', False)
self.assertEqual(expected, next(result))
def test_get_api_servers_not_mounted_at_root_and_ssl(self):
service_catalog = [{u'type': u'image', u'name': u'glance',
u'endpoints': [{
u'publicURL': u'https://example.com/image'}]}]
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.context.service_catalog = service_catalog
result = glance.get_api_servers(self.context)
expected = (u'example.com/image', True)
self.assertEqual(expected, next(result))
def test_create_with_instance_id(self):
"""Ensure instance_id is persisted as an image-property."""
fixture = {'name': 'test image',
'is_public': False,
'protected': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'protected': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'is_public': False,
'user_id': 'fake'},
'owner': None,
'visibility': None,
}
self.assertDictEqual(expected, image_meta)
image_metas = self.service.detail(self.context)
self.assertDictEqual(expected, image_metas[0])
def test_create_without_instance_id(self):
"""Test Creating images without instance_id.
Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False,
'protected': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'protected': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted': None,
'status': None,
'properties': {'is_public': False},
'owner': None,
'visibility': None,
}
actual = self.service.show(self.context, image_id)
self.assertDictEqual(expected, actual)
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertEqual(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['visibility'] = 'private'
fixture['protected'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
self.service.create(self.context, fixture)
proj = self.context.project_id
self.context.project_id = 'proj1'
image_metas = self.service.detail(self.context)
self.context.project_id = proj
self.assertEqual(1, len(image_metas))
self.assertEqual('test image', image_metas[0]['name'])
self.assertEqual('private', image_metas[0]['visibility'])
def test_detail_v1(self):
"""Confirm we send is_public = None as default when using Glance v1."""
self.override_config('glance_api_version', 1)
with mock.patch.object(self.service, '_client') as client_mock:
client_mock.return_value = []
result = self.service.detail(self.context)
self.assertListEqual([], result)
client_mock.call.assert_called_once_with(self.context, 'list',
filters={'is_public': 'none'})
def test_detail_v2(self):
"""Check we don't send is_public key by default with Glance v2."""
self.override_config('glance_api_version', 2)
with mock.patch.object(self.service, '_client') as client_mock:
client_mock.return_value = []
result = self.service.detail(self.context)
self.assertListEqual([], result)
client_mock.call.assert_called_once_with(self.context, 'list')
def test_detail_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[1])
self.assertEqual(8, len(image_metas))
i = 2
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'protected': None,
'name': 'TestImage %d' % (i),
'properties': {'properties': {}},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted': None,
'owner': None,
'visibility': None,
}
self.assertDictEqual(expected, meta)
i = i + 1
def test_detail_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, limit=5)
self.assertEqual(5, len(image_metas))
def test_detail_default_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context)
for i, meta in enumerate(image_metas):
self.assertEqual(meta['name'], 'TestImage %d' % (i))
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[3], limit=5)
self.assertEqual(5, len(image_metas))
i = 4
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'protected': None,
'name': 'TestImage %d' % (i),
'properties': {'properties': {}},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted': None,
'owner': None,
'visibility': None,
}
self.assertDictEqual(expected, meta)
i = i + 1
def test_detail_invalid_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
self.assertRaises(exception.Invalid, self.service.detail,
self.context, marker='invalidmarker')
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEqual('new image name', new_image_data['name'])
def test_update_v2(self):
self.flags(glance_api_version=2)
self.test_update()
def test_update_with_data(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
data = '*' * 256
self.service.update(self.context, image_id, fixture, data=data)
new_image_data = self.service.show(self.context, image_id)
self.assertEqual(256, new_image_data['size'])
self.assertEqual('new image name', new_image_data['name'])
def test_update_with_data_v2(self):
self.flags(glance_api_version=2)
self.test_update_with_data()
@mock.patch.object(glance.GlanceImageService, '_translate_from_glance')
@mock.patch.object(glance.GlanceImageService, 'show')
@ddt.data(1, 2)
def test_update_purge_props(self, ver, show, translate_from_glance):
self.flags(glance_api_version=ver)
image_id = mock.sentinel.image_id
client = mock.Mock(call=mock.Mock())
service = glance.GlanceImageService(client=client)
image_meta = {'properties': {'k1': 'v1'}}
client.call.return_value = {'k1': 'v1'}
if ver == 2:
show.return_value = {'properties': {'k2': 'v2'}}
translate_from_glance.return_value = image_meta.copy()
ret = service.update(self.context, image_id, image_meta)
self.assertDictEqual(image_meta, ret)
if ver == 2:
client.call.assert_called_once_with(
self.context, 'update', image_id, k1='v1', remove_props=['k2'])
else:
client.call.assert_called_once_with(
self.context, 'update', image_id, properties={'k1': 'v1'},
purge_props=True)
translate_from_glance.assert_called_once_with(self.context,
{'k1': 'v1'})
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEqual(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEqual(2, num_images)
self.service.delete(self.context, ids[0])
num_images = len(self.service.detail(self.context))
self.assertEqual(1, num_images)
def test_show_passes_through_to_client(self):
fixture = self._make_fixture(name='image1', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'image1',
'protected': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted': None,
'status': None,
'properties': {'is_public': True, 'properties': {}},
'owner': None,
'visibility': None
}
self.assertEqual(expected, image_meta)
def test_show_raises_when_no_authtoken_in_the_context(self):
fixture = self._make_fixture(name='image1',
is_public=False,
protected=False)
image_id = self.service.create(self.context, fixture)['id']
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
image_id)
def test_detail_passes_through_to_client(self):
fixture = self._make_fixture(name='image10', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_metas = self.service.detail(self.context)
expected = [
{
'id': image_id,
'name': 'image10',
'protected': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted': None,
'status': None,
'properties': {'is_public': True, 'properties': {}},
'owner': None,
'visibility': None
},
]
self.assertEqual(expected, image_metas)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download,
self.context,
image_id,
writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, writer)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
@mock.patch('six.moves.builtins.open')
@mock.patch('shutil.copyfileobj')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_download_from_direct_file(self, api_servers,
mock_copyfileobj, mock_open):
fixture = self._make_fixture(name='test image',
locations=[{'url': 'file:///tmp/test'}])
image_id = self.service.create(self.context, fixture)['id']
writer = NullWriter()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(glance_api_version=2)
self.service.download(self.context, image_id, writer)
mock_copyfileobj.assert_called_once_with(mock.ANY, writer)
@mock.patch('six.moves.builtins.open')
@mock.patch('shutil.copyfileobj')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_download_from_direct_file_non_file(self, api_servers,
mock_copyfileobj, mock_open):
fixture = self._make_fixture(name='test image',
direct_url='swift+http://test/image')
image_id = self.service.create(self.context, fixture)['id']
writer = NullWriter()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(glance_api_version=2)
self.service.download(self.context, image_id, writer)
self.assertIsNone(mock_copyfileobj.call_args)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(_service, same_id) = glance.get_remote_image_service(self.context,
image_id)
self.assertEqual(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(self.context,
image_url)
self.assertEqual(same_id, image_id)
self.assertEqual('something-less-likely', service._client.netloc)
for ipv6_url in ('[::1]', '::1', '[::1]:444'):
image_url = 'http://%s/%s' % (ipv6_url, image_id)
(service, same_id) = glance.get_remote_image_service(self.context,
image_url)
self.assertEqual(same_id, image_id)
self.assertEqual(ipv6_url, service._client.netloc)
def test_extracting_missing_attributes(self):
"""Verify behavior from glance objects that are missing attributes
This fakes the image class and is missing the checksum and name
attribute as the client would return if they're not set in the
database. Regression test for bug #1308058.
"""
class MyFakeGlanceImage(glance_stubs.FakeImage):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'id', 'created_at',
'updated_at', 'deleted', 'status',
'min_disk', 'min_ram', 'is_public',
'visibility', 'protected']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
metadata = {
'id': 1,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
actual = glance._extract_attributes(image)
expected = {
'id': 1,
'name': None,
'protected': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
'visibility': None,
}
self.assertEqual(expected, actual)
@mock.patch('cinder.image.glance.CONF')
def test_v2_passes_visibility_param(self, config):
config.glance_api_version = 2
config.glance_num_retries = 0
metadata = {
'id': 1,
'size': 2,
'visibility': 'public',
}
image = glance_stubs.FakeImage(metadata)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
service._image_schema = glance_stubs.FakeSchema()
actual = service._translate_from_glance('fake_context', image)
expected = {
'id': 1,
'name': None,
'visibility': 'public',
'protected': None,
'size': 2,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
'created_at': None,
'updated_at': None
}
self.assertEqual(expected, actual)
@mock.patch('cinder.image.glance.CONF')
def test_extracting_v2_boot_properties(self, config):
config.glance_api_version = 2
config.glance_num_retries = 0
metadata = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'kernel_id': 'foo',
'ramdisk_id': 'bar',
}
image = glance_stubs.FakeImage(metadata)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
service._image_schema = glance_stubs.FakeSchema()
actual = service._translate_from_glance('fake_context', image)
expected = {
'id': 1,
'name': None,
'visibility': None,
'protected': None,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'disk_format': None,
'container_format': None,
'checksum': None,
'deleted': None,
'status': None,
'properties': {'kernel_id': 'foo',
'ramdisk_id': 'bar'},
'owner': None,
'created_at': None,
'updated_at': None
}
self.assertEqual(expected, actual)
def test_translate_to_glance(self):
self.flags(glance_api_version=1)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
metadata = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'properties': {'kernel_id': 'foo',
'ramdisk_id': 'bar',
'x_billinginfo': '123'},
}
actual = service._translate_to_glance(metadata)
expected = metadata
self.assertEqual(expected, actual)
def test_translate_to_glance_v2(self):
self.flags(glance_api_version=2)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
metadata = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'properties': {'kernel_id': 'foo',
'ramdisk_id': 'bar',
'x_billinginfo': '123'},
}
actual = service._translate_to_glance(metadata)
expected = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'kernel_id': 'foo',
'ramdisk_id': 'bar',
'x_billinginfo': '123',
}
self.assertEqual(expected, actual)
class TestGlanceClientVersion(test.TestCase):
"""Tests the version of the glance client generated."""
@mock.patch('cinder.image.glance.glanceclient.Client')
def test_glance_version_by_flag(self, _mockglanceclient):
"""Test glance version set by flag is honoured."""
ctx = mock.MagicMock()
glance.GlanceClientWrapper(ctx, 'fake_host', 9292)
self.assertEqual('2', _mockglanceclient.call_args[0][0])
self.flags(glance_api_version=1)
glance.GlanceClientWrapper(ctx, 'fake_host', 9292)
self.assertEqual('1', _mockglanceclient.call_args[0][0])
CONF.reset()
@mock.patch('cinder.image.glance.glanceclient.Client')
def test_glance_version_by_arg(self, _mockglanceclient):
"""Test glance version set by arg to GlanceClientWrapper"""
ctx = mock.MagicMock()
glance.GlanceClientWrapper(ctx, 'fake_host', 9292, version=1)
self.assertEqual('1', _mockglanceclient.call_args[0][0])
glance.GlanceClientWrapper(ctx, 'fake_host', 9292, version=2)
self.assertEqual('2', _mockglanceclient.call_args[0][0])
@mock.patch('cinder.image.glance.glanceclient.Client')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_call_glance_version_by_arg(self, api_servers, _mockglanceclient):
"""Test glance version set by arg to GlanceClientWrapper"""
glance_wrapper = glance.GlanceClientWrapper()
ctx = mock.MagicMock()
glance_wrapper.call(ctx, 'method', version=2)
self.assertEqual('2', _mockglanceclient.call_args[0][0])
@mock.patch('cinder.image.glance.glanceclient.Client')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_call_glance_over_quota(self, api_servers, _mockglanceclient):
"""Test glance version set by arg to GlanceClientWrapper"""
glance_wrapper = glance.GlanceClientWrapper()
fake_client = mock.Mock()
fake_client.images.method = mock.Mock(
side_effect=glanceclient.exc.HTTPOverLimit)
self.mock_object(glance_wrapper, 'client', fake_client)
self.assertRaises(exception.ImageLimitExceeded,
glance_wrapper.call, 'fake_context', 'method',
version=2)
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceImageServiceClient(test.TestCase):
def setUp(self):
super(TestGlanceImageServiceClient, self).setUp()
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.mock_object(glance.time, 'sleep', return_value=None)
def test_create_glance_client(self):
self.flags(auth_strategy='keystone')
self.flags(glance_request_timeout=60)
class MyGlanceStubClient(object):
def __init__(inst, version, *args, **kwargs):
self.assertEqual('2', version)
self.assertEqual("http://fake_host:9292", args[0])
self.assertTrue(kwargs['token'])
self.assertEqual(60, kwargs['timeout'])
self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient)
client = glance._create_glance_client(self.context, 'fake_host:9292',
False)
self.assertIsInstance(client, MyGlanceStubClient)
def test_create_glance_client_auth_strategy_is_not_keystone(self):
self.flags(auth_strategy='noauth')
self.flags(glance_request_timeout=60)
class MyGlanceStubClient(object):
def __init__(inst, version, *args, **kwargs):
self.assertEqual('2', version)
self.assertEqual('http://fake_host:9292', args[0])
self.assertNotIn('token', kwargs)
self.assertEqual(60, kwargs['timeout'])
self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient)
client = glance._create_glance_client(self.context, 'fake_host:9292',
False)
self.assertIsInstance(client, MyGlanceStubClient)
def test_create_glance_client_glance_request_default_timeout(self):
self.flags(auth_strategy='keystone')
self.flags(glance_request_timeout=None)
class MyGlanceStubClient(object):
def __init__(inst, version, *args, **kwargs):
self.assertEqual("2", version)
self.assertEqual("http://fake_host:9292", args[0])
self.assertTrue(kwargs['token'])
self.assertNotIn('timeout', kwargs)
self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient)
client = glance._create_glance_client(self.context, 'fake_host:9292',
False)
self.assertIsInstance(client, MyGlanceStubClient)
|
|
from xdict.quote import get_quotes_pairs,get_lrquotes
from xdict.block import get_block_op_pairs
from xdict import escape
from xdict import fsm
import xdict.quote as quote
import xdict.block as block
import re
import efuntool.efuntool as eftl
def get_jdict_token_set(**kwargs):
'''
from xdict.jprint import get_jdict_token_set
get_jdict_token_set(quotes_pairs_dict={1: ('"', '"'), 2: ("<", ">")})
'''
spaces = eftl.dflt_kwargs("spaces",[' ','\t'],**kwargs)
colons = eftl.dflt_kwargs("colons",[':'],**kwargs)
commas = eftl.dflt_kwargs("commas",[','],**kwargs)
line_sps = eftl.dflt_kwargs("line_sps",['\r','\n'],**kwargs)
block_op_pairs_dict = eftl.dflt_kwargs("block_op_pairs_dict",block.get_block_op_pairs('{}[]()'),**kwargs)
quotes_pairs_dict = eftl.dflt_kwargs("quotes_pairs_dict",quote.get_quotes_pairs('""\'\''),**kwargs)
lquotes,rquotes,quotes = quote.get_lrquotes(quotes_pairs_dict)
path_sps = eftl.dflt_kwargs("path_sps",['/'])
#######
d = {}
s = set({})
def add_bi_table(s,d,x):
for each in x:
k = each
v = escape.html_number_escape_str(k)
d[k] = v
d[v] = k
s.add(k)
add_bi_table(s,d,spaces)
add_bi_table(s,d,colons)
add_bi_table(s,d,commas)
add_bi_table(s,d,line_sps)
add_bi_table(s,d,lquotes)
add_bi_table(s,d,rquotes)
add_bi_table(s,d,path_sps)
for i in range(1,block_op_pairs_dict.__len__()+1):
s.add(block_op_pairs_dict[i][0])
s.add(block_op_pairs_dict[i][1])
recover_token_l = escape.html_number_escape_str(block_op_pairs_dict[i][0])
recover_token_r = escape.html_number_escape_str(block_op_pairs_dict[i][1])
d[block_op_pairs_dict[i][0]] = recover_token_l
d[block_op_pairs_dict[i][1]] = recover_token_r
d[recover_token_l] = block_op_pairs_dict[i][0]
d[recover_token_r] = block_op_pairs_dict[i][1]
return({'token_set':s,'replace_ref_dict':d})
def prepare_quotes_token_machine(j_str,**kwargs):
'''
>>> from xdict.jprint import convert_token_in_quote
>>> from xdict.jprint import help
>>>
>>> convert_token_in_quote('"a b":"cd"')
'"a b":"cd"'
>>> import html
>>> html.unescape('"a b":"cd"')
'"a b":"cd"'
>>> convert_token_in_quote('"a b":cd')
'"a b":cd'
>>>
>>> #help(convert_token_in_quote)
convert_token_in_quote('<a b>:"cd"',quotes_pairs_dict={1: ('"', '"'), 2: ("<", ">")})
'<a b>:"cd"'
'''
####
spaces = eftl.dflt_kwargs("spaces",[' ','\t'],**kwargs)
colons = eftl.dflt_kwargs("colons",[':'],**kwargs)
commas = eftl.dflt_kwargs("commas",[','],**kwargs)
line_sps = eftl.dflt_kwargs("line_sps",['\r','\n'],**kwargs)
block_op_pairs_dict = eftl.dflt_kwargs("block_op_pairs_dict",get_block_op_pairs('{}[]()'),**kwargs)
quotes_pairs_dict = eftl.dflt_kwargs("quotes_pairs_dict",get_quotes_pairs('""\'\''),**kwargs)
lquotes,rquotes,quotes = get_lrquotes(quotes_pairs_dict)
path_sps = eftl.dflt_kwargs("path_sps",['/'])
####
temp = get_jdict_token_set(**kwargs)
token_set = temp['token_set']
replace_ref_dict = temp['replace_ref_dict']
####
# ----------------------------------------------------------------- #
####
def do_replace(ch):
if(ch in token_set):
ch = replace_ref_dict[ch]
return(ch)
def do_throw(curr_state,trigger_checker,input_symbol):
msg = "curr_state: " + curr_state + "\n"
msg = msg + "trigger_checker: "+trigger_checker.__str__() + "\n"
msg = msg + "input_symbol: "+ input_symbol.__str__() + "\n"
msg = msg + "triggered ERROR" + "\n"
raise Exception(msg)
####
machine = fsm.FSM()
regex_lquotes = fsm.creat_regex_from_arr(lquotes)
regex_rquotes = fsm.creat_regex_from_arr(rquotes)
regex_b = re.compile('b')
regex_spaces = fsm.creat_regex_from_arr(spaces)
regex_colons = fsm.creat_regex_from_arr(colons)
regex_commas = fsm.creat_regex_from_arr(commas)
regex_slash = re.compile("\\\\")
######
ops = []
for i in range(1,block_op_pairs_dict.__len__()+1):
ops.append(block_op_pairs_dict[i][0])
ops.append(block_op_pairs_dict[i][1])
######
regex_ops = fsm.creat_regex_from_arr(ops)
LqRqBSpColComSlOp_arr = ['b','\\\\']
LqRqBSpColComSlOp_arr = LqRqBSpColComSlOp_arr + lquotes + rquotes + spaces + colons+commas + ops
regex_not_LqRqBSpColComSlOp = fsm.creat_regex_not_from_arr(LqRqBSpColComSlOp_arr)
# ############################
# ############################
machine.add("INIT",regex_b,None,"BYTES")
machine.add("INIT",regex_spaces,None,"INIT")
machine.add("INIT",regex_ops,None,"INIT")
machine.add("INIT",regex_colons,None,"INIT")
machine.add("INIT",regex_commas,None,"INIT")
machine.add("INIT",regex_slash,None,"SLASHINIT")
machine.add("INIT",regex_not_LqRqBSpColComSlOp,do_replace,"OTHER")
####
machine.add("BYTES",regex_b,None,"OTHER")
machine.add("BYTES",regex_spaces,None,"BYTES")
machine.add("BYTES",regex_ops,None,"INIT")
machine.add("BYTES",regex_colons,None,"INIT")
machine.add("BYTES",regex_commas,None,"INIT")
machine.add("BYTES",regex_slash,None,"SLASHBYTES")
machine.add("BYTES",regex_not_LqRqBSpColComSlOp,do_replace,"OTHER")
####
machine.add("SLASHINIT",re.compile("."),do_replace,"OTHER")
####
machine.add("SLASHBYTES",re.compile("."),do_replace,"OTHER")
####
machine.add("OTHER",regex_b,None,"OTHER")
machine.add("OTHER",regex_spaces,None,"OTHER")
machine.add("OTHER",regex_ops,None,"INIT")
machine.add("OTHER",regex_colons,None,"INIT")
machine.add("OTHER",regex_commas,None,"INIT")
machine.add("OTHER",regex_slash,None,"SLASHOTHER")
machine.add("OTHER",regex_not_LqRqBSpColComSlOp,do_replace,"OTHER")
####
machine.add("SLASHOTHER",re.compile("."),do_replace,"OTHER")
####
regex_lquote_array = fsm.creat_regexes_array(lquotes)
regex_rquote_array = fsm.creat_regexes_array(rquotes)
###@@@@@@@@@@@@@@@
for i in range(0,lquotes.__len__()):
####INIT -lq_n-> LQ_n
sn = ''.join(("LQ",'_',str(i)))
machine.add("INIT",regex_lquote_array[i],None,sn)
for i in range(0,rquotes.__len__()):
####INIT -rq_n-> ERROR
if(regex_rquote_array[i] == regex_lquote_array[i]):
pass
else:
sn = ''.join(("LQ",'_',str(i)))
machine.add("INIT",regex_rquote_array[i],do_throw,'ERROR')
####
for i in range(0,lquotes.__len__()):
####BYTES -lq_n-> LQ_n
sn = ''.join(("LQ",'_',str(i)))
machine.add("BYTES",regex_lquote_array[i],None,sn)
for i in range(0,rquotes.__len__()):
####BYTES -rq_n-> ERROR
if(rquotes[i] == lquotes[i]):
pass
else:
sn = ''.join(("LQ",'_',str(i)))
machine.add("BYTES",regex_rquote_array[i],do_throw,'ERROR')
####
for i in range(0,lquotes.__len__()):
####OTHER -lq_n-> LQ_n
sn = ''.join(("LQ",'_',str(i)))
machine.add("OTHER",regex_lquote_array[i],None,sn)
for i in range(0,rquotes.__len__()):
####OTHER -rq_n-> ERROR
if(rquotes[i] == lquotes[i]):
pass
else:
sn = ''.join(("LQ",'_',str(i)))
machine.add("OTHER",regex_rquote_array[i],do_throw,'ERROR')
####
for i in range(0,lquotes.__len__()):
####LQ_n -lq_n-> ERROR
sn = ''.join(("LQ",'_',str(i)))
if(lquotes[i] == rquotes[i]):
pass
else:
machine.add(sn,regex_lquote_array[i],do_throw,'ERROR')
####LQ_n -rq_n-> READY
machine.add(sn,regex_rquote_array[i],None,'INIT')
#####LQ_n -b-> LQ_n
machine.add(sn,regex_b,None,sn)
#####LQ_n -spaces-> LQ_n
machine.add(sn,regex_spaces,do_replace,sn)
#####LQ_n -ops-> LQ_n
machine.add(sn,regex_ops,do_replace,sn)
####LQ_n -colons-> LQ_n
machine.add(sn,regex_colons,do_replace,sn)
####LQ_n -commas-> LQ_n
machine.add(sn,regex_commas,do_replace,sn)
#####LQ_n -slash -> SLASHLQ_n
slashlq = ''.join(("SLASHLQ",'_',str(i)))
machine.add(sn,re.compile("\\\\"),None,slashlq)
####SLASHLQ_n -any-> LQ_n
machine.add(slashlq,re.compile("."),do_replace,sn)
#####LQ_n -others-> LQ_n
tmp_arr = ['b','\\\\'] + ops + colons + commas + spaces
tmp_arr_rq = [rquotes[i]]
tmp_arr_lq = [lquotes[i]]
if(lquotes[i] == rquotes[i]):
tmp_arr_rq = []
else:
pass
tmp_final_arr = tmp_arr + tmp_arr_rq + tmp_arr_lq
####
tmp_regex = fsm.creat_regex_not_from_arr(tmp_final_arr)
machine.add(sn,tmp_regex,do_replace,sn)
####
machine.orig_str = j_str
machine.do_replace = do_replace
machine.do_throw = do_throw
####
return(machine)
|
|
#!/opt/conmon-agent/embedded/bin/python
"""
Datadog
www.datadoghq.com
----
Cloud-Scale Monitoring. Monitoring that tracks your dynamic infrastructure.
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2016 all rights reserved
"""
# set up logging before importing any other components
from config import get_version, initialize_logging # noqa
initialize_logging('collector')
# stdlib
import logging
import os
import signal
import sys
import time
# For pickle & PID files, see issue 293
os.umask(022)
# project
from checks.check_status import CollectorStatus
from checks.collector import Collector
from config import (
get_config,
get_parsed_args,
get_system_stats,
load_check_directory,
)
from daemon import AgentSupervisor, Daemon
from emitter import http_emitter
from util import (
EC2,
get_hostname,
Watchdog,
)
from utils.flare import configcheck, Flare
from utils.jmx import jmx_command
from utils.pidfile import PidFile
from utils.profile import AgentProfiler
from utils.service_discovery.configcheck import sd_configcheck
from utils.service_discovery.config_stores import get_config_store, TRACE_CONFIG
from utils.service_discovery.sd_backend import get_sd_backend
# Constants
PID_NAME = "cm-agent"
PID_DIR = None
WATCHDOG_MULTIPLIER = 10
RESTART_INTERVAL = 4 * 24 * 60 * 60 # Defaults to 4 days
START_COMMANDS = ['start', 'restart', 'foreground']
CM_AGENT_COMMANDS = ['check', 'flare', 'jmx']
DEFAULT_COLLECTOR_PROFILE_INTERVAL = 20
# Globals
log = logging.getLogger('collector')
class Agent(Daemon):
"""
The agent class is a daemon that runs the collector in a background process.
"""
def __init__(self, pidfile, autorestart, start_event=True, in_developer_mode=False):
Daemon.__init__(self, pidfile, autorestart=autorestart)
self.run_forever = True
self.collector = None
self.start_event = start_event
self.in_developer_mode = in_developer_mode
self._agentConfig = {}
self._checksd = []
self.collector_profile_interval = DEFAULT_COLLECTOR_PROFILE_INTERVAL
self.check_frequency = None
self.configs_reloaded = False
self.sd_backend = None
def _handle_sigterm(self, signum, frame):
"""Handles SIGTERM and SIGINT, which gracefully stops the agent."""
log.debug("Caught sigterm. Stopping run loop.")
self.run_forever = False
if self.collector:
self.collector.stop()
log.debug("Collector is stopped.")
def _handle_sigusr1(self, signum, frame):
"""Handles SIGUSR1, which signals an exit with an autorestart."""
self._handle_sigterm(signum, frame)
self._do_restart()
def _handle_sighup(self, signum, frame):
"""Handles SIGHUP, which signals a configuration reload."""
log.info("SIGHUP caught!")
self.reload_configs()
self.configs_reloaded = True
def reload_configs(self):
"""Reloads the agent configuration and checksd configurations."""
log.info("Attempting a configuration reload...")
# Reload checksd configs
hostname = get_hostname(self._agentConfig)
self._checksd = load_check_directory(self._agentConfig, hostname)
# Logging
num_checks = len(self._checksd['initialized_checks'])
if num_checks > 0:
log.info("Successfully reloaded {num_checks} checks".
format(num_checks=num_checks))
else:
log.info("No checksd configs found")
@classmethod
def info(cls, verbose=None):
logging.getLogger().setLevel(logging.ERROR)
return CollectorStatus.print_latest_status(verbose=verbose)
def run(self, config=None):
"""Main loop of the collector"""
# Gracefully exit on sigterm.
signal.signal(signal.SIGTERM, self._handle_sigterm)
# A SIGUSR1 signals an exit with an autorestart
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
# A SIGHUP signals a configuration reload
signal.signal(signal.SIGHUP, self._handle_sighup)
# Save the agent start-up stats.
CollectorStatus().persist()
# Intialize the collector.
if not config:
config = get_config(parse_args=True)
self._agentConfig = self._set_agent_config_hostname(config)
hostname = get_hostname(self._agentConfig)
systemStats = get_system_stats()
emitters = self._get_emitters()
# Initialize service discovery
if self._agentConfig.get('service_discovery'):
self.sd_backend = get_sd_backend(self._agentConfig)
# Load the checks.d checks
self._checksd = load_check_directory(self._agentConfig, hostname)
# Initialize the Collector
self.collector = Collector(self._agentConfig, emitters, systemStats, hostname)
# In developer mode, the number of runs to be included in a single collector profile
self.collector_profile_interval = self._agentConfig.get('collector_profile_interval',
DEFAULT_COLLECTOR_PROFILE_INTERVAL)
# Configure the watchdog.
self.check_frequency = int(self._agentConfig['check_freq'])
watchdog = self._get_watchdog(self.check_frequency)
# Initialize the auto-restarter
self.restart_interval = int(self._agentConfig.get('restart_interval', RESTART_INTERVAL))
self.agent_start = time.time()
profiled = False
collector_profiled_runs = 0
# Run the main loop.
while self.run_forever:
log.debug("Found {num_checks} checks".format(num_checks=len(self._checksd['initialized_checks'])))
# Setup profiling if necessary
if self.in_developer_mode and not profiled:
try:
profiler = AgentProfiler()
profiler.enable_profiling()
profiled = True
except Exception as e:
log.warn("Cannot enable profiler: %s" % str(e))
# Do the work.
self.collector.run(checksd=self._checksd,
start_event=self.start_event,
configs_reloaded=self.configs_reloaded)
# This flag is used to know if the check configs have been reloaded at the current
# run of the agent yet or not. It's used by the collector to know if it needs to
# look for the AgentMetrics check and pop it out.
# See: https://github.com/DataDog/dd-agent/blob/5.6.x/checks/collector.py#L265-L272
self.configs_reloaded = False
# Look for change in the config template store.
# The self.sd_backend.reload_check_configs flag is set
# to True if a config reload is needed.
if self._agentConfig.get('service_discovery') and self.sd_backend and \
not self.sd_backend.reload_check_configs:
try:
self.sd_backend.reload_check_configs = get_config_store(
self._agentConfig).crawl_config_template()
except Exception as e:
log.warn('Something went wrong while looking for config template changes: %s' % str(e))
# Check if we should run service discovery
# The `reload_check_configs` flag can be set through the docker_daemon check or
# using ConfigStore.crawl_config_template
if self._agentConfig.get('service_discovery') and self.sd_backend and \
self.sd_backend.reload_check_configs:
self.reload_configs()
self.configs_reloaded = True
self.sd_backend.reload_check_configs = False
if profiled:
if collector_profiled_runs >= self.collector_profile_interval:
try:
profiler.disable_profiling()
profiled = False
collector_profiled_runs = 0
except Exception as e:
log.warn("Cannot disable profiler: %s" % str(e))
# Check if we should restart.
if self.autorestart and self._should_restart():
self._do_restart()
# Only plan for next loop if we will continue, otherwise exit quickly.
if self.run_forever:
if watchdog:
watchdog.reset()
if profiled:
collector_profiled_runs += 1
log.debug("Sleeping for {0} seconds".format(self.check_frequency))
time.sleep(self.check_frequency)
# Now clean-up.
try:
CollectorStatus.remove_latest_status()
except Exception:
pass
# Explicitly kill the process, because it might be running as a daemon.
log.info("Exiting. Bye bye.")
sys.exit(0)
def _get_emitters(self):
return [http_emitter]
def _get_watchdog(self, check_freq):
watchdog = None
if self._agentConfig.get("watchdog", True):
watchdog = Watchdog(check_freq * WATCHDOG_MULTIPLIER,
max_mem_mb=self._agentConfig.get('limit_memory_consumption', None))
watchdog.reset()
return watchdog
def _set_agent_config_hostname(self, agentConfig):
# Try to fetch instance Id from EC2 if not hostname has been set
# in the config file.
# DEPRECATED
if agentConfig.get('hostname') is None and agentConfig.get('use_ec2_instance_id'):
instanceId = EC2.get_instance_id(agentConfig)
if instanceId is not None:
log.info("Running on EC2, instanceId: %s" % instanceId)
agentConfig['hostname'] = instanceId
else:
log.info('Not running on EC2, using hostname to identify this server')
return agentConfig
def _should_restart(self):
if time.time() - self.agent_start > self.restart_interval:
return True
return False
def _do_restart(self):
log.info("Running an auto-restart.")
if self.collector:
self.collector.stop()
sys.exit(AgentSupervisor.RESTART_EXIT_STATUS)
def main():
options, args = get_parsed_args()
agentConfig = get_config(options=options)
autorestart = agentConfig.get('autorestart', False)
hostname = get_hostname(agentConfig)
in_developer_mode = agentConfig.get('developer_mode')
COMMANDS_AGENT = [
'start',
'stop',
'restart',
'status',
'foreground',
]
COMMANDS_NO_AGENT = [
'info',
'check',
'configcheck',
'jmx',
'flare',
]
COMMANDS = COMMANDS_AGENT + COMMANDS_NO_AGENT
if len(args) < 1:
sys.stderr.write("Usage: %s %s\n" % (sys.argv[0], "|".join(COMMANDS)))
return 2
command = args[0]
if command not in COMMANDS:
sys.stderr.write("Unknown command: %s\n" % command)
return 3
# Deprecation notice
if command not in CM_AGENT_COMMANDS:
# Will become an error message and exit after deprecation period
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
if command in COMMANDS_AGENT:
agent = Agent(PidFile(PID_NAME, PID_DIR).get_path(), autorestart, in_developer_mode=in_developer_mode)
if command in START_COMMANDS:
log.info('Agent version %s' % get_version())
if 'start' == command:
log.info('Start daemon')
agent.start()
elif 'stop' == command:
log.info('Stop daemon')
agent.stop()
elif 'restart' == command:
log.info('Restart daemon')
agent.restart()
elif 'status' == command:
agent.status()
elif 'info' == command:
return Agent.info(verbose=options.verbose)
elif 'foreground' == command:
logging.info('Running in foreground')
if autorestart:
# Set-up the supervisor callbacks and fork it.
logging.info('Running Agent with auto-restart ON')
def child_func():
agent.start(foreground=True)
def parent_func():
agent.start_event = False
AgentSupervisor.start(parent_func, child_func)
else:
# Run in the standard foreground.
agent.start(foreground=True)
elif 'check' == command:
if len(args) < 2:
sys.stderr.write(
"Usage: %s check <check_name> [check_rate]\n"
"Add check_rate as last argument to compute rates\n"
% sys.argv[0]
)
return 1
check_name = args[1]
try:
import checks.collector
# Try the old-style check first
print getattr(checks.collector, check_name)(log).check(agentConfig)
except Exception:
# If not an old-style check, try checks.d
checks = load_check_directory(agentConfig, hostname)
for check in checks['initialized_checks']:
if check.name == check_name:
if in_developer_mode:
check.run = AgentProfiler.wrap_profiling(check.run)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
if len(args) == 3 and args[2] == 'check_rate':
print "Running 2nd iteration to capture rate metrics"
time.sleep(1)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
check.stop()
elif 'configcheck' == command or 'configtest' == command:
configcheck()
if agentConfig.get('service_discovery', False):
# set the TRACE_CONFIG flag to True to make load_check_directory return
# the source of config objects.
# Then call load_check_directory here and pass the result to sd_configcheck
# to avoid circular imports
agentConfig[TRACE_CONFIG] = True
configs = {
# check_name: (config_source, config)
}
print("\nLoading check configurations...\n\n")
configs = load_check_directory(agentConfig, hostname)
sd_configcheck(agentConfig, configs)
elif 'jmx' == command:
jmx_command(args[1:], agentConfig)
elif 'flare' == command:
Flare.check_user_rights()
case_id = int(args[1]) if len(args) > 1 else None
f = Flare(True, case_id)
f.collect()
try:
f.upload()
except Exception, e:
print 'The upload failed:\n{0}'.format(str(e))
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except StandardError:
# Try our best to log the error.
try:
log.exception("Uncaught error running the Agent")
except Exception:
pass
raise
|
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import itertools
import os
import re
import sys
import urlparse
import xml.dom.minidom
from git_config import GitConfig
from git_refs import R_HEADS, HEAD
from project import RemoteSpec, Project, MetaProject
from error import ManifestParseError
MANIFEST_FILE_NAME = 'manifest.xml'
LOCAL_MANIFEST_NAME = 'local_manifest.xml'
LOCAL_MANIFESTS_DIR_NAME = 'local_manifests'
urlparse.uses_relative.extend(['ssh', 'git'])
urlparse.uses_netloc.extend(['ssh', 'git'])
class _Default(object):
"""Project defaults within the manifest."""
revisionExpr = None
remote = None
sync_j = 1
sync_c = False
sync_s = False
class _XmlRemote(object):
def __init__(self,
name,
alias=None,
fetch=None,
manifestUrl=None,
review=None):
self.name = name
self.fetchUrl = fetch
self.manifestUrl = manifestUrl
self.remoteAlias = alias
self.reviewUrl = review
self.resolvedFetchUrl = self._resolveFetchUrl()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
def _resolveFetchUrl(self):
url = self.fetchUrl.rstrip('/')
manifestUrl = self.manifestUrl.rstrip('/')
# urljoin will get confused if there is no scheme in the base url
# ie, if manifestUrl is of the form <hostname:port>
if manifestUrl.find(':') != manifestUrl.find('/') - 1:
manifestUrl = 'gopher://' + manifestUrl
url = urlparse.urljoin(manifestUrl, url)
return re.sub(r'^gopher://', '', url)
def ToRemoteSpec(self, projectName):
url = self.resolvedFetchUrl.rstrip('/') + '/' + projectName
remoteName = self.name
if self.remoteAlias:
remoteName = self.remoteAlias
return RemoteSpec(remoteName, url, self.reviewUrl)
class XmlManifest(object):
"""manages the repo configuration file"""
def __init__(self, repodir):
self.repodir = os.path.abspath(repodir)
self.topdir = os.path.dirname(self.repodir)
self.manifestFile = os.path.join(self.repodir, MANIFEST_FILE_NAME)
self.globalConfig = GitConfig.ForUser()
self.repoProject = MetaProject(self, 'repo',
gitdir = os.path.join(repodir, 'repo/.git'),
worktree = os.path.join(repodir, 'repo'))
self.manifestProject = MetaProject(self, 'manifests',
gitdir = os.path.join(repodir, 'manifests.git'),
worktree = os.path.join(repodir, 'manifests'))
self._Unload()
def Override(self, name):
"""Use a different manifest, just for the current instantiation.
"""
path = os.path.join(self.manifestProject.worktree, name)
if not os.path.isfile(path):
raise ManifestParseError('manifest %s not found' % name)
old = self.manifestFile
try:
self.manifestFile = path
self._Unload()
self._Load()
finally:
self.manifestFile = old
def Link(self, name):
"""Update the repo metadata to use a different manifest.
"""
self.Override(name)
try:
if os.path.exists(self.manifestFile):
os.remove(self.manifestFile)
os.symlink('manifests/%s' % name, self.manifestFile)
except OSError:
raise ManifestParseError('cannot link manifest %s' % name)
def _RemoteToXml(self, r, doc, root):
e = doc.createElement('remote')
root.appendChild(e)
e.setAttribute('name', r.name)
e.setAttribute('fetch', r.fetchUrl)
if r.reviewUrl is not None:
e.setAttribute('review', r.reviewUrl)
def Save(self, fd, peg_rev=False, peg_rev_upstream=True):
"""Write the current manifest out to the given file descriptor.
"""
mp = self.manifestProject
groups = mp.config.GetString('manifest.groups')
if not groups:
groups = 'all'
groups = [x for x in re.split(r'[,\s]+', groups) if x]
doc = xml.dom.minidom.Document()
root = doc.createElement('manifest')
doc.appendChild(root)
# Save out the notice. There's a little bit of work here to give it the
# right whitespace, which assumes that the notice is automatically indented
# by 4 by minidom.
if self.notice:
notice_element = root.appendChild(doc.createElement('notice'))
notice_lines = self.notice.splitlines()
indented_notice = ('\n'.join(" "*4 + line for line in notice_lines))[4:]
notice_element.appendChild(doc.createTextNode(indented_notice))
d = self.default
sort_remotes = list(self.remotes.keys())
sort_remotes.sort()
for r in sort_remotes:
self._RemoteToXml(self.remotes[r], doc, root)
if self.remotes:
root.appendChild(doc.createTextNode(''))
have_default = False
e = doc.createElement('default')
if d.remote:
have_default = True
e.setAttribute('remote', d.remote.name)
if d.revisionExpr:
have_default = True
e.setAttribute('revision', d.revisionExpr)
if d.sync_j > 1:
have_default = True
e.setAttribute('sync-j', '%d' % d.sync_j)
if d.sync_c:
have_default = True
e.setAttribute('sync-c', 'true')
if d.sync_s:
have_default = True
e.setAttribute('sync-s', 'true')
if have_default:
root.appendChild(e)
root.appendChild(doc.createTextNode(''))
if self._manifest_server:
e = doc.createElement('manifest-server')
e.setAttribute('url', self._manifest_server)
root.appendChild(e)
root.appendChild(doc.createTextNode(''))
def output_projects(parent, parent_node, projects):
for p in projects:
output_project(parent, parent_node, self.projects[p])
def output_project(parent, parent_node, p):
if not p.MatchesGroups(groups):
return
name = p.name
relpath = p.relpath
if parent:
name = self._UnjoinName(parent.name, name)
relpath = self._UnjoinRelpath(parent.relpath, relpath)
e = doc.createElement('project')
parent_node.appendChild(e)
e.setAttribute('name', name)
if relpath != name:
e.setAttribute('path', relpath)
if not d.remote or p.remote.name != d.remote.name:
e.setAttribute('remote', p.remote.name)
if peg_rev:
if self.IsMirror:
value = p.bare_git.rev_parse(p.revisionExpr + '^0')
else:
value = p.work_git.rev_parse(HEAD + '^0')
e.setAttribute('revision', value)
if peg_rev_upstream and value != p.revisionExpr:
# Only save the origin if the origin is not a sha1, and the default
# isn't our value, and the if the default doesn't already have that
# covered.
e.setAttribute('upstream', p.revisionExpr)
elif not d.revisionExpr or p.revisionExpr != d.revisionExpr:
e.setAttribute('revision', p.revisionExpr)
for c in p.copyfiles:
ce = doc.createElement('copyfile')
ce.setAttribute('src', c.src)
ce.setAttribute('dest', c.dest)
e.appendChild(ce)
default_groups = ['all', 'name:%s' % p.name, 'path:%s' % p.relpath]
egroups = [g for g in p.groups if g not in default_groups]
if egroups:
e.setAttribute('groups', ','.join(egroups))
for a in p.annotations:
if a.keep == "true":
ae = doc.createElement('annotation')
ae.setAttribute('name', a.name)
ae.setAttribute('value', a.value)
e.appendChild(ae)
if p.sync_c:
e.setAttribute('sync-c', 'true')
if p.sync_s:
e.setAttribute('sync-s', 'true')
if p.subprojects:
sort_projects = [subp.name for subp in p.subprojects]
sort_projects.sort()
output_projects(p, e, sort_projects)
sort_projects = [key for key in self.projects.keys()
if not self.projects[key].parent]
sort_projects.sort()
output_projects(None, root, sort_projects)
if self._repo_hooks_project:
root.appendChild(doc.createTextNode(''))
e = doc.createElement('repo-hooks')
e.setAttribute('in-project', self._repo_hooks_project.name)
e.setAttribute('enabled-list',
' '.join(self._repo_hooks_project.enabled_repo_hooks))
root.appendChild(e)
doc.writexml(fd, '', ' ', '\n', 'UTF-8')
@property
def projects(self):
self._Load()
return self._projects
@property
def remotes(self):
self._Load()
return self._remotes
@property
def default(self):
self._Load()
return self._default
@property
def repo_hooks_project(self):
self._Load()
return self._repo_hooks_project
@property
def notice(self):
self._Load()
return self._notice
@property
def manifest_server(self):
self._Load()
return self._manifest_server
@property
def IsMirror(self):
return self.manifestProject.config.GetBoolean('repo.mirror')
def _Unload(self):
self._loaded = False
self._projects = {}
self._remotes = {}
self._default = None
self._repo_hooks_project = None
self._notice = None
self.branch = None
self._manifest_server = None
def _Load(self):
if not self._loaded:
m = self.manifestProject
b = m.GetBranch(m.CurrentBranch).merge
if b is not None and b.startswith(R_HEADS):
b = b[len(R_HEADS):]
self.branch = b
nodes = []
nodes.append(self._ParseManifestXml(self.manifestFile,
self.manifestProject.worktree))
local = os.path.join(self.repodir, LOCAL_MANIFEST_NAME)
if os.path.exists(local):
print('warning: %s is deprecated; put local manifests in %s instead'
% (LOCAL_MANIFEST_NAME, LOCAL_MANIFESTS_DIR_NAME),
file=sys.stderr)
nodes.append(self._ParseManifestXml(local, self.repodir))
local_dir = os.path.abspath(os.path.join(self.repodir, LOCAL_MANIFESTS_DIR_NAME))
try:
for local_file in sorted(os.listdir(local_dir)):
if local_file.endswith('.xml'):
try:
nodes.append(self._ParseManifestXml(local_file, self.repodir))
except ManifestParseError as e:
print('%s' % str(e), file=sys.stderr)
except OSError:
pass
self._ParseManifest(nodes)
if self.IsMirror:
self._AddMetaProjectMirror(self.repoProject)
self._AddMetaProjectMirror(self.manifestProject)
self._loaded = True
def _ParseManifestXml(self, path, include_root):
try:
root = xml.dom.minidom.parse(path)
except (OSError, xml.parsers.expat.ExpatError) as e:
raise ManifestParseError("error parsing manifest %s: %s" % (path, e))
if not root or not root.childNodes:
raise ManifestParseError("no root node in %s" % (path,))
for manifest in root.childNodes:
if manifest.nodeName == 'manifest':
break
else:
raise ManifestParseError("no <manifest> in %s" % (path,))
nodes = []
for node in manifest.childNodes: # pylint:disable=W0631
# We only get here if manifest is initialised
if node.nodeName == 'include':
name = self._reqatt(node, 'name')
fp = os.path.join(include_root, name)
if not os.path.isfile(fp):
raise ManifestParseError, \
"include %s doesn't exist or isn't a file" % \
(name,)
try:
nodes.extend(self._ParseManifestXml(fp, include_root))
# should isolate this to the exact exception, but that's
# tricky. actual parsing implementation may vary.
except (KeyboardInterrupt, RuntimeError, SystemExit):
raise
except Exception as e:
raise ManifestParseError(
"failed parsing included manifest %s: %s", (name, e))
else:
nodes.append(node)
return nodes
def _ParseManifest(self, node_list):
for node in itertools.chain(*node_list):
if node.nodeName == 'remote':
remote = self._ParseRemote(node)
if remote:
if remote.name in self._remotes:
if remote != self._remotes[remote.name]:
raise ManifestParseError(
'remote %s already exists with different attributes' %
(remote.name))
else:
self._remotes[remote.name] = remote
for node in itertools.chain(*node_list):
if node.nodeName == 'default':
if self._default is not None:
raise ManifestParseError(
'duplicate default in %s' %
(self.manifestFile))
self._default = self._ParseDefault(node)
if self._default is None:
self._default = _Default()
for node in itertools.chain(*node_list):
if node.nodeName == 'notice':
if self._notice is not None:
raise ManifestParseError(
'duplicate notice in %s' %
(self.manifestFile))
self._notice = self._ParseNotice(node)
for node in itertools.chain(*node_list):
if node.nodeName == 'manifest-server':
url = self._reqatt(node, 'url')
if self._manifest_server is not None:
raise ManifestParseError(
'duplicate manifest-server in %s' %
(self.manifestFile))
self._manifest_server = url
def recursively_add_projects(project):
if self._projects.get(project.name):
raise ManifestParseError(
'duplicate project %s in %s' %
(project.name, self.manifestFile))
self._projects[project.name] = project
for subproject in project.subprojects:
recursively_add_projects(subproject)
for node in itertools.chain(*node_list):
if node.nodeName == 'project':
project = self._ParseProject(node)
recursively_add_projects(project)
if node.nodeName == 'repo-hooks':
# Get the name of the project and the (space-separated) list of enabled.
repo_hooks_project = self._reqatt(node, 'in-project')
enabled_repo_hooks = self._reqatt(node, 'enabled-list').split()
# Only one project can be the hooks project
if self._repo_hooks_project is not None:
raise ManifestParseError(
'duplicate repo-hooks in %s' %
(self.manifestFile))
# Store a reference to the Project.
try:
self._repo_hooks_project = self._projects[repo_hooks_project]
except KeyError:
raise ManifestParseError(
'project %s not found for repo-hooks' %
(repo_hooks_project))
# Store the enabled hooks in the Project object.
self._repo_hooks_project.enabled_repo_hooks = enabled_repo_hooks
if node.nodeName == 'remove-project':
name = self._reqatt(node, 'name')
try:
del self._projects[name]
except KeyError:
raise ManifestParseError('remove-project element specifies non-existent '
'project: %s' % name)
# If the manifest removes the hooks project, treat it as if it deleted
# the repo-hooks element too.
if self._repo_hooks_project and (self._repo_hooks_project.name == name):
self._repo_hooks_project = None
def _AddMetaProjectMirror(self, m):
name = None
m_url = m.GetRemote(m.remote.name).url
if m_url.endswith('/.git'):
raise ManifestParseError, 'refusing to mirror %s' % m_url
if self._default and self._default.remote:
url = self._default.remote.resolvedFetchUrl
if not url.endswith('/'):
url += '/'
if m_url.startswith(url):
remote = self._default.remote
name = m_url[len(url):]
if name is None:
s = m_url.rindex('/') + 1
manifestUrl = self.manifestProject.config.GetString('remote.origin.url')
remote = _XmlRemote('origin', fetch=m_url[:s], manifestUrl=manifestUrl)
name = m_url[s:]
if name.endswith('.git'):
name = name[:-4]
if name not in self._projects:
m.PreSync()
gitdir = os.path.join(self.topdir, '%s.git' % name)
project = Project(manifest = self,
name = name,
remote = remote.ToRemoteSpec(name),
gitdir = gitdir,
worktree = None,
relpath = None,
revisionExpr = m.revisionExpr,
revisionId = None)
self._projects[project.name] = project
def _ParseRemote(self, node):
"""
reads a <remote> element from the manifest file
"""
name = self._reqatt(node, 'name')
alias = node.getAttribute('alias')
if alias == '':
alias = None
fetch = self._reqatt(node, 'fetch')
review = node.getAttribute('review')
if review == '':
review = None
manifestUrl = self.manifestProject.config.GetString('remote.origin.url')
return _XmlRemote(name, alias, fetch, manifestUrl, review)
def _ParseDefault(self, node):
"""
reads a <default> element from the manifest file
"""
d = _Default()
d.remote = self._get_remote(node)
d.revisionExpr = node.getAttribute('revision')
if d.revisionExpr == '':
d.revisionExpr = None
sync_j = node.getAttribute('sync-j')
if sync_j == '' or sync_j is None:
d.sync_j = 1
else:
d.sync_j = int(sync_j)
sync_c = node.getAttribute('sync-c')
if not sync_c:
d.sync_c = False
else:
d.sync_c = sync_c.lower() in ("yes", "true", "1")
sync_s = node.getAttribute('sync-s')
if not sync_s:
d.sync_s = False
else:
d.sync_s = sync_s.lower() in ("yes", "true", "1")
return d
def _ParseNotice(self, node):
"""
reads a <notice> element from the manifest file
The <notice> element is distinct from other tags in the XML in that the
data is conveyed between the start and end tag (it's not an empty-element
tag).
The white space (carriage returns, indentation) for the notice element is
relevant and is parsed in a way that is based on how python docstrings work.
In fact, the code is remarkably similar to here:
http://www.python.org/dev/peps/pep-0257/
"""
# Get the data out of the node...
notice = node.childNodes[0].data
# Figure out minimum indentation, skipping the first line (the same line
# as the <notice> tag)...
minIndent = sys.maxint
lines = notice.splitlines()
for line in lines[1:]:
lstrippedLine = line.lstrip()
if lstrippedLine:
indent = len(line) - len(lstrippedLine)
minIndent = min(indent, minIndent)
# Strip leading / trailing blank lines and also indentation.
cleanLines = [lines[0].strip()]
for line in lines[1:]:
cleanLines.append(line[minIndent:].rstrip())
# Clear completely blank lines from front and back...
while cleanLines and not cleanLines[0]:
del cleanLines[0]
while cleanLines and not cleanLines[-1]:
del cleanLines[-1]
return '\n'.join(cleanLines)
def _JoinName(self, parent_name, name):
return os.path.join(parent_name, name)
def _UnjoinName(self, parent_name, name):
return os.path.relpath(name, parent_name)
def _ParseProject(self, node, parent = None):
"""
reads a <project> element from the manifest file
"""
name = self._reqatt(node, 'name')
if parent:
name = self._JoinName(parent.name, name)
remote = self._get_remote(node)
if remote is None:
remote = self._default.remote
if remote is None:
raise ManifestParseError, \
"no remote for project %s within %s" % \
(name, self.manifestFile)
revisionExpr = node.getAttribute('revision')
if not revisionExpr:
revisionExpr = self._default.revisionExpr
if not revisionExpr:
raise ManifestParseError, \
"no revision for project %s within %s" % \
(name, self.manifestFile)
path = node.getAttribute('path')
if not path:
path = name
if path.startswith('/'):
raise ManifestParseError, \
"project %s path cannot be absolute in %s" % \
(name, self.manifestFile)
rebase = node.getAttribute('rebase')
if not rebase:
rebase = True
else:
rebase = rebase.lower() in ("yes", "true", "1")
sync_c = node.getAttribute('sync-c')
if not sync_c:
sync_c = False
else:
sync_c = sync_c.lower() in ("yes", "true", "1")
sync_s = node.getAttribute('sync-s')
if not sync_s:
sync_s = self._default.sync_s
else:
sync_s = sync_s.lower() in ("yes", "true", "1")
upstream = node.getAttribute('upstream')
groups = ''
if node.hasAttribute('groups'):
groups = node.getAttribute('groups')
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if parent is None:
relpath, worktree, gitdir = self.GetProjectPaths(name, path)
else:
relpath, worktree, gitdir = self.GetSubprojectPaths(parent, path)
default_groups = ['all', 'name:%s' % name, 'path:%s' % relpath]
groups.extend(set(default_groups).difference(groups))
project = Project(manifest = self,
name = name,
remote = remote.ToRemoteSpec(name),
gitdir = gitdir,
worktree = worktree,
relpath = relpath,
revisionExpr = revisionExpr,
revisionId = None,
rebase = rebase,
groups = groups,
sync_c = sync_c,
sync_s = sync_s,
upstream = upstream,
parent = parent)
for n in node.childNodes:
if n.nodeName == 'copyfile':
self._ParseCopyFile(project, n)
if n.nodeName == 'annotation':
self._ParseAnnotation(project, n)
if n.nodeName == 'project':
project.subprojects.append(self._ParseProject(n, parent = project))
return project
def GetProjectPaths(self, name, path):
relpath = path
if self.IsMirror:
worktree = None
gitdir = os.path.join(self.topdir, '%s.git' % name)
else:
worktree = os.path.join(self.topdir, path).replace('\\', '/')
gitdir = os.path.join(self.repodir, 'projects', '%s.git' % path)
return relpath, worktree, gitdir
def GetSubprojectName(self, parent, submodule_path):
return os.path.join(parent.name, submodule_path)
def _JoinRelpath(self, parent_relpath, relpath):
return os.path.join(parent_relpath, relpath)
def _UnjoinRelpath(self, parent_relpath, relpath):
return os.path.relpath(relpath, parent_relpath)
def GetSubprojectPaths(self, parent, path):
relpath = self._JoinRelpath(parent.relpath, path)
gitdir = os.path.join(parent.gitdir, 'subprojects', '%s.git' % path)
if self.IsMirror:
worktree = None
else:
worktree = os.path.join(parent.worktree, path).replace('\\', '/')
return relpath, worktree, gitdir
def _ParseCopyFile(self, project, node):
src = self._reqatt(node, 'src')
dest = self._reqatt(node, 'dest')
if not self.IsMirror:
# src is project relative;
# dest is relative to the top of the tree
project.AddCopyFile(src, dest, os.path.join(self.topdir, dest))
def _ParseAnnotation(self, project, node):
name = self._reqatt(node, 'name')
value = self._reqatt(node, 'value')
try:
keep = self._reqatt(node, 'keep').lower()
except ManifestParseError:
keep = "true"
if keep != "true" and keep != "false":
raise ManifestParseError, "optional \"keep\" attribute must be \"true\" or \"false\""
project.AddAnnotation(name, value, keep)
def _get_remote(self, node):
name = node.getAttribute('remote')
if not name:
return None
v = self._remotes.get(name)
if not v:
raise ManifestParseError, \
"remote %s not defined in %s" % \
(name, self.manifestFile)
return v
def _reqatt(self, node, attname):
"""
reads a required attribute from the node.
"""
v = node.getAttribute(attname)
if not v:
raise ManifestParseError, \
"no %s in <%s> within %s" % \
(attname, node.nodeName, self.manifestFile)
return v
|
|
import json
import os
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.shortcuts import render
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from unidecode import unidecode
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.admin.mail import send_mail
from wagtail.core.models import Orderable, Page
from .forms import FormBuilder, WagtailAdminFormPageForm
from .views import SubmissionsListView
FORM_FIELD_CHOICES = (
('singleline', _('Single line text')),
('multiline', _('Multi-line text')),
('email', _('Email')),
('number', _('Number')),
('url', _('URL')),
('checkbox', _('Checkbox')),
('checkboxes', _('Checkboxes')),
('dropdown', _('Drop down')),
('multiselect', _('Multiple select')),
('radio', _('Radio buttons')),
('date', _('Date')),
('datetime', _('Date/time')),
('hidden', _('Hidden field')),
)
class AbstractFormSubmission(models.Model):
"""
Data for a form submission.
You can create custom submission model based on this abstract model.
For example, if you need to save additional data or a reference to a user.
"""
form_data = models.TextField()
page = models.ForeignKey(Page, on_delete=models.CASCADE)
submit_time = models.DateTimeField(verbose_name=_('submit time'), auto_now_add=True)
def get_data(self):
"""
Returns dict with form data.
You can override this method to add additional data.
"""
form_data = json.loads(self.form_data)
form_data.update({
'submit_time': self.submit_time,
})
return form_data
def __str__(self):
return self.form_data
class Meta:
abstract = True
verbose_name = _('form submission')
class FormSubmission(AbstractFormSubmission):
"""Data for a Form submission."""
class AbstractFormField(Orderable):
"""
Database Fields required for building a Django Form field.
"""
label = models.CharField(
verbose_name=_('label'),
max_length=255,
help_text=_('The label of the form field')
)
field_type = models.CharField(verbose_name=_('field type'), max_length=16, choices=FORM_FIELD_CHOICES)
required = models.BooleanField(verbose_name=_('required'), default=True)
choices = models.TextField(
verbose_name=_('choices'),
blank=True,
help_text=_('Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.')
)
default_value = models.CharField(
verbose_name=_('default value'),
max_length=255,
blank=True,
help_text=_('Default value. Comma separated values supported for checkboxes.')
)
help_text = models.CharField(verbose_name=_('help text'), max_length=255, blank=True)
@property
def clean_name(self):
# unidecode will return an ascii string while slugify wants a
# unicode string on the other hand, slugify returns a safe-string
# which will be converted to a normal str
return str(slugify(str(unidecode(self.label))))
panels = [
FieldPanel('label'),
FieldPanel('help_text'),
FieldPanel('required'),
FieldPanel('field_type', classname="formbuilder-type"),
FieldPanel('choices', classname="formbuilder-choices"),
FieldPanel('default_value', classname="formbuilder-default"),
]
class Meta:
abstract = True
ordering = ['sort_order']
class AbstractForm(Page):
"""
A Form Page. Pages implementing a form should inherit from it
"""
base_form_class = WagtailAdminFormPageForm
form_builder = FormBuilder
submissions_list_view_class = SubmissionsListView
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self, 'landing_page_template'):
name, ext = os.path.splitext(self.template)
self.landing_page_template = name + '_landing' + ext
class Meta:
abstract = True
def get_form_fields(self):
"""
Form page expects `form_fields` to be declared.
If you want to change backwards relation name,
you need to override this method.
"""
return self.form_fields.all()
def get_data_fields(self):
"""
Returns a list of tuples with (field_name, field_label).
"""
data_fields = [
('submit_time', _('Submission date')),
]
data_fields += [
(field.clean_name, field.label)
for field in self.get_form_fields()
]
return data_fields
def get_form_class(self):
fb = self.form_builder(self.get_form_fields())
return fb.get_form_class()
def get_form_parameters(self):
return {}
def get_form(self, *args, **kwargs):
form_class = self.get_form_class()
form_params = self.get_form_parameters()
form_params.update(kwargs)
return form_class(*args, **form_params)
def get_landing_page_template(self, request, *args, **kwargs):
return self.landing_page_template
def get_submission_class(self):
"""
Returns submission class.
You can override this method to provide custom submission class.
Your class must be inherited from AbstractFormSubmission.
"""
return FormSubmission
def process_form_submission(self, form):
"""
Accepts form instance with submitted data, user and page.
Creates submission instance.
You can override this method if you want to have custom creation logic.
For example, if you want to save reference to a user.
"""
return self.get_submission_class().objects.create(
form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),
page=self,
)
def render_landing_page(self, request, form_submission=None, *args, **kwargs):
"""
Renders the landing page.
You can override this method to return a different HttpResponse as
landing page. E.g. you could return a redirect to a separate page.
"""
context = self.get_context(request)
context['form_submission'] = form_submission
return render(
request,
self.get_landing_page_template(request),
context
)
def serve_submissions_list_view(self, request, *args, **kwargs):
"""
Returns list submissions view for admin.
`list_submissions_view_class` can bse set to provide custom view class.
Your class must be inherited from SubmissionsListView.
"""
view = self.submissions_list_view_class.as_view()
return view(request, form_page=self, *args, **kwargs)
def serve(self, request, *args, **kwargs):
if request.method == 'POST':
form = self.get_form(request.POST, request.FILES, page=self, user=request.user)
if form.is_valid():
form_submission = self.process_form_submission(form)
return self.render_landing_page(request, form_submission, *args, **kwargs)
else:
form = self.get_form(page=self, user=request.user)
context = self.get_context(request)
context['form'] = form
return render(
request,
self.get_template(request),
context
)
preview_modes = [
('form', _('Form')),
('landing', _('Landing page')),
]
def serve_preview(self, request, mode):
if mode == 'landing':
request.is_preview = True
return self.render_landing_page(request)
else:
return super().serve_preview(request, mode)
class AbstractEmailForm(AbstractForm):
"""
A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it
"""
to_address = models.CharField(
verbose_name=_('to address'), max_length=255, blank=True,
help_text=_("Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.")
)
from_address = models.CharField(verbose_name=_('from address'), max_length=255, blank=True)
subject = models.CharField(verbose_name=_('subject'), max_length=255, blank=True)
def process_form_submission(self, form):
submission = super().process_form_submission(form)
if self.to_address:
self.send_mail(form)
return submission
def send_mail(self, form):
addresses = [x.strip() for x in self.to_address.split(',')]
content = []
for field in form:
value = field.value()
if isinstance(value, list):
value = ', '.join(value)
content.append('{}: {}'.format(field.label, value))
content = '\n'.join(content)
send_mail(self.subject, content, addresses, self.from_address,)
class Meta:
abstract = True
|
|
# encoding: utf-8
# Copyright 2013 maker
# License
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TicketStatus'
db.create_table('services_ticketstatus', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('details', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('hidden', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('services', ['TicketStatus'])
# Adding model 'Service'
db.create_table('services_service', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='child_set', null=True, to=orm['services.Service'])),
('details', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('services', ['Service'])
# Adding model 'ServiceLevelAgreement'
db.create_table('services_servicelevelagreement', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('service', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Service'])),
('default', self.gf('django.db.models.fields.BooleanField')(default=False)),
('response_time', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('uptime_rate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('available_from', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('available_to', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('client', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='client_sla', null=True, to=orm['identities.Contact'])),
('provider', self.gf('django.db.models.fields.related.ForeignKey')(related_name='provider_sla', to=orm['identities.Contact'])),
))
db.send_create_signal('services', ['ServiceLevelAgreement'])
# Adding model 'ServiceAgent'
db.create_table('services_serviceagent', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('related_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.User'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('occupied', self.gf('django.db.models.fields.BooleanField')(default=False)),
('available_from', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('available_to', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
))
db.send_create_signal('services', ['ServiceAgent'])
# Adding model 'TicketQueue'
db.create_table('services_ticketqueue', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='child_set', null=True, to=orm['services.TicketQueue'])),
('default_ticket_status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.TicketStatus'], null=True, blank=True)),
('default_ticket_priority', self.gf('django.db.models.fields.IntegerField')(default=3)),
('default_service', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Service'], null=True, blank=True)),
('waiting_time', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('next_queue', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='previous_set', null=True, to=orm['services.TicketQueue'])),
('ticket_code', self.gf('django.db.models.fields.CharField')(default='', max_length=8, null=True, blank=True)),
('message_stream', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['messaging.MessageStream'], null=True, blank=True)),
('details', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('services', ['TicketQueue'])
# Adding model 'Ticket'
db.create_table('services_ticket', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('reference', self.gf('django.db.models.fields.CharField')(max_length=256)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('caller', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['identities.Contact'], null=True, blank=True)),
('urgency', self.gf('django.db.models.fields.IntegerField')(default=3)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=3)),
('status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.TicketStatus'])),
('service', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Service'], null=True, blank=True)),
('sla', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.ServiceLevelAgreement'], null=True, blank=True)),
('queue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.TicketQueue'], null=True, blank=True)),
('message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['messaging.Message'], null=True, blank=True)),
('details', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('resolution', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('services', ['Ticket'])
# Adding M2M table for field assigned on 'Ticket'
db.create_table('services_ticket_assigned', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('ticket', models.ForeignKey(orm['services.ticket'], null=False)),
('serviceagent', models.ForeignKey(orm['services.serviceagent'], null=False))
))
db.create_unique('services_ticket_assigned', ['ticket_id', 'serviceagent_id'])
# Adding model 'TicketRecord'
db.create_table('services_ticketrecord', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Ticket'])),
('record_type', self.gf('django.db.models.fields.CharField')(max_length=256)),
('message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['messaging.Message'], null=True, blank=True)),
('details', self.gf('django.db.models.fields.TextField')()),
('notify', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('services', ['TicketRecord'])
def backwards(self, orm):
# Deleting model 'TicketStatus'
db.delete_table('services_ticketstatus')
# Deleting model 'Service'
db.delete_table('services_service')
# Deleting model 'ServiceLevelAgreement'
db.delete_table('services_servicelevelagreement')
# Deleting model 'ServiceAgent'
db.delete_table('services_serviceagent')
# Deleting model 'TicketQueue'
db.delete_table('services_ticketqueue')
# Deleting model 'Ticket'
db.delete_table('services_ticket')
# Removing M2M table for field assigned on 'Ticket'
db.delete_table('services_ticket_assigned')
# Deleting model 'TicketRecord'
db.delete_table('services_ticketrecord')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'everybody_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']"}),
'group_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'user_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'messaging.emailbox': {
'Meta': {'ordering': "['last_updated']", 'object_name': 'EmailBox', '_ormbases': ['core.Object']},
'email_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'email_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_username': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'messaging.message': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Message', '_ormbases': ['core.Object']},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'read_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'read_by_user'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['messaging.Message']"}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stream'", 'to': "orm['messaging.MessageStream']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'messaging.messagestream': {
'Meta': {'ordering': "['name']", 'object_name': 'MessageStream', '_ormbases': ['core.Object']},
'email_incoming': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'incoming'", 'null': 'True', 'to': "orm['messaging.EmailBox']"}),
'email_outgoing': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outgoing'", 'null': 'True', 'to': "orm['messaging.EmailBox']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'services.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['services.Service']"})
},
'services.serviceagent': {
'Meta': {'ordering': "('related_user', '-active', 'occupied')", 'object_name': 'ServiceAgent', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'available_from': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'available_to': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'occupied': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"})
},
'services.servicelevelagreement': {
'Meta': {'ordering': "('name', 'client')", 'object_name': 'ServiceLevelAgreement', '_ormbases': ['core.Object']},
'available_from': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'available_to': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'client_sla'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provider_sla'", 'to': "orm['identities.Contact']"}),
'response_time': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Service']"}),
'uptime_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'services.ticket': {
'Meta': {'ordering': "('-priority', 'reference')", 'object_name': 'Ticket', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['services.ServiceAgent']", 'null': 'True', 'blank': 'True'}),
'caller': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.Message']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.TicketQueue']", 'null': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Service']", 'null': 'True', 'blank': 'True'}),
'sla': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.ServiceLevelAgreement']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.TicketStatus']"}),
'urgency': ('django.db.models.fields.IntegerField', [], {'default': '3'})
},
'services.ticketqueue': {
'Meta': {'ordering': "('name', '-active', 'ticket_code')", 'object_name': 'TicketQueue', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Service']", 'null': 'True', 'blank': 'True'}),
'default_ticket_priority': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'default_ticket_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.TicketStatus']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'message_stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.MessageStream']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'next_queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'previous_set'", 'null': 'True', 'to': "orm['services.TicketQueue']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['services.TicketQueue']"}),
'ticket_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'waiting_time': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'services.ticketrecord': {
'Meta': {'ordering': "['ticket']", 'object_name': 'TicketRecord', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.Message']", 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.Ticket']"})
},
'services.ticketstatus': {
'Meta': {'ordering': "('hidden', '-active', 'name')", 'object_name': 'TicketStatus', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['services']
|
|
import unittest, struct
import os
import sys
from test import support
import math
from math import isinf, isnan, copysign, ldexp
import operator
import random, fractions
INF = float("inf")
NAN = float("nan")
have_getformat = hasattr(float, "__getformat__")
requires_getformat = unittest.skipUnless(have_getformat,
"requires __getformat__")
requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"),
"requires __setformat__")
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float(b" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
self.assertRaises(ValueError, float, ".nan")
self.assertRaises(ValueError, float, "+.inf")
self.assertRaises(ValueError, float, ".")
self.assertRaises(ValueError, float, "-.")
self.assertRaises(ValueError, float, b"-")
self.assertRaises(TypeError, float, {})
# Lone surrogate
self.assertRaises(UnicodeEncodeError, float, '\uD8F0')
# check that we don't accept alternate exponent markers
self.assertRaises(ValueError, float, "-1.7d29")
self.assertRaises(ValueError, float, "3D-14")
self.assertEqual(float(" \u0663.\u0661\u0664 "), 3.14)
self.assertEqual(float("\N{EM SPACE}3.14\N{EN SPACE}"), 3.14)
# extra long strings should not be a problem
float(b'.' + b'1'*1000)
float('.' + '1'*1000)
def test_error_message(self):
testlist = ('\xbd', '123\xbd', ' 123 456 ')
for s in testlist:
try:
float(s)
except ValueError as e:
self.assertIn(s.strip(), e.args[0])
else:
self.fail("Expected int(%r) to raise a ValueError", s)
@support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntac
import locale
if not locale.localeconv()['decimal_point'] == ',':
return
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertEqual(support.fcmp(float(" .25e-1 "), .025), 0)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo0:
def __float__(self):
return 42.
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
# Issue 5759: __float__ not called on str subclasses (though it is on
# unicode subclasses).
class FooStr(str):
def __float__(self):
return float(str(self)) + 1
self.assertAlmostEqual(float(Foo0()), 42.)
self.assertAlmostEqual(float(Foo1()), 42.)
self.assertAlmostEqual(float(Foo2()), 42.)
self.assertAlmostEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
self.assertAlmostEqual(float(FooStr('8')), 9.)
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
def test_float_containment(self):
floats = (INF, -INF, 0.0, 1.0, NAN)
for f in floats:
self.assertIn(f, [f])
self.assertIn(f, (f,))
self.assertIn(f, {f})
self.assertIn(f, {f: None})
self.assertEqual([f].count(f), 1, "[].count('%r') != 1" % f)
self.assertIn(f, floats)
for f in floats:
# nonidentical containers, same type, same contents
self.assertTrue([f] == [f], "[%r] != [%r]" % (f, f))
self.assertTrue((f,) == (f,), "(%r,) != (%r,)" % (f, f))
self.assertTrue({f} == {f}, "{%r} != {%r}" % (f, f))
self.assertTrue({f : None} == {f: None}, "{%r : None} != "
"{%r : None}" % (f, f))
# identical containers
l, t, s, d = [f], (f,), {f}, {f: None}
self.assertTrue(l == l, "[%r] not equal to itself" % f)
self.assertTrue(t == t, "(%r,) not equal to itself" % f)
self.assertTrue(s == s, "{%r} not equal to itself" % f)
self.assertTrue(d == d, "{%r : None} not equal to itself" % f)
def assertEqualAndEqualSign(self, a, b):
# fail unless a == b and a and b have the same sign bit;
# the only difference from assertEqual is that this test
# distingishes -0.0 and 0.0.
self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b)))
@support.requires_IEEE_754
def test_float_mod(self):
# Check behaviour of % operator for IEEE 754 special cases.
# In particular, check signs of zeros.
mod = operator.mod
self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0)
self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100)
self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100)
self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0)
self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0)
@support.requires_IEEE_754
def test_float_pow(self):
# test builtin pow and ** operator for IEEE 754 special cases.
# Special cases taken from section F.9.4.4 of the C99 specification
for pow_op in pow, operator.pow:
# x**NAN is NAN for any x except 1
self.assertTrue(isnan(pow_op(-INF, NAN)))
self.assertTrue(isnan(pow_op(-2.0, NAN)))
self.assertTrue(isnan(pow_op(-1.0, NAN)))
self.assertTrue(isnan(pow_op(-0.5, NAN)))
self.assertTrue(isnan(pow_op(-0.0, NAN)))
self.assertTrue(isnan(pow_op(0.0, NAN)))
self.assertTrue(isnan(pow_op(0.5, NAN)))
self.assertTrue(isnan(pow_op(2.0, NAN)))
self.assertTrue(isnan(pow_op(INF, NAN)))
self.assertTrue(isnan(pow_op(NAN, NAN)))
# NAN**y is NAN for any y except +-0
self.assertTrue(isnan(pow_op(NAN, -INF)))
self.assertTrue(isnan(pow_op(NAN, -2.0)))
self.assertTrue(isnan(pow_op(NAN, -1.0)))
self.assertTrue(isnan(pow_op(NAN, -0.5)))
self.assertTrue(isnan(pow_op(NAN, 0.5)))
self.assertTrue(isnan(pow_op(NAN, 1.0)))
self.assertTrue(isnan(pow_op(NAN, 2.0)))
self.assertTrue(isnan(pow_op(NAN, INF)))
# (+-0)**y raises ZeroDivisionError for y a negative odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0)
# (+-0)**y raises ZeroDivisionError for y finite and negative
# but not an odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5)
# (+-0)**y is +-0 for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0)
# (+-0)**y is 0 for y finite and positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0)
# (-1)**+-inf is 1
self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0)
# 1**y is 1 for any y, even if y is an infinity or nan
self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0)
# x**+-0 is 1 for any x, even if x is a zero, infinity, or nan
self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0)
# x**y defers to complex pow for finite negative x and
# non-integral y.
self.assertEqual(type(pow_op(-2.0, -0.5)), complex)
self.assertEqual(type(pow_op(-2.0, 0.5)), complex)
self.assertEqual(type(pow_op(-1.0, -0.5)), complex)
self.assertEqual(type(pow_op(-1.0, 0.5)), complex)
self.assertEqual(type(pow_op(-0.5, -0.5)), complex)
self.assertEqual(type(pow_op(-0.5, 0.5)), complex)
# x**-INF is INF for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF)
self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF)
# x**-INF is 0 for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0)
# x**INF is 0 for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0)
# x**INF is INF for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, INF), INF)
self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(INF, INF), INF)
# (-INF)**y is -0.0 for y a negative odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0)
# (-INF)**y is 0.0 for y negative but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0)
# (-INF)**y is -INF for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF)
# (-INF)**y is INF for y positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF)
# INF**y is INF for y positive
self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF)
self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF)
# INF**y is 0.0 for y negative
self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0)
# basic checks not covered by the special cases above
self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0)
self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0)
# 1 ** large and -1 ** large; some libms apparently
# have problems with these
self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0)
# check sign for results that underflow to 0
self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0)
self.assertEqual(type(pow_op(-2.0, -2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0)
self.assertEqual(type(pow_op(-0.5, 2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0)
# check we don't raise an exception for subnormal results,
# and validate signs. Tests currently disabled, since
# they fail on systems where a subnormal result from pow
# is flushed to zero (e.g. Debian/ia64.)
#self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315)
@requires_setformat
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assertIn(float.__getformat__('double'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertIn(float.__getformat__('float'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = b'\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = bytes(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = b'\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = bytes(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = b'\x7f\x80\x00\x00'
LE_FLOAT_INF = bytes(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = b'\x7f\xc0\x00\x00'
LE_FLOAT_NAN = bytes(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
@requires_setformat
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
@support.requires_IEEE_754
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
@support.requires_IEEE_754
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
class FormatTestCase(unittest.TestCase):
def test_format(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
self.assertEqual(format(0.0, 'f'), '0.000000')
# the default is 'g', except for empty format spec
self.assertEqual(format(0.0, ''), '0.0')
self.assertEqual(format(0.01, ''), '0.01')
self.assertEqual(format(0.01, 'g'), '0.01')
# empty presentation type should format in the same way as str
# (issue 5920)
x = 100/7.
self.assertEqual(format(x, ''), str(x))
self.assertEqual(format(x, '-'), str(x))
self.assertEqual(format(x, '>'), str(x))
self.assertEqual(format(x, '2'), str(x))
self.assertEqual(format(1.0, 'f'), '1.000000')
self.assertEqual(format(-1.0, 'f'), '-1.000000')
self.assertEqual(format( 1.0, ' f'), ' 1.000000')
self.assertEqual(format(-1.0, ' f'), '-1.000000')
self.assertEqual(format( 1.0, '+f'), '+1.000000')
self.assertEqual(format(-1.0, '+f'), '-1.000000')
# % formatting
self.assertEqual(format(-1.0, '%'), '-100.000000%')
# conversion to string should fail
self.assertRaises(ValueError, format, 3.0, "s")
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# issue 3382
self.assertEqual(format(NAN, 'f'), 'nan')
self.assertEqual(format(NAN, 'F'), 'NAN')
self.assertEqual(format(INF, 'f'), 'inf')
self.assertEqual(format(INF, 'F'), 'INF')
@support.requires_IEEE_754
def test_format_testfile(self):
with open(format_testfile) as testfile:
for line in testfile:
if line.startswith('--'):
continue
line = line.strip()
if not line:
continue
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
self.assertEqual(fmt % float(arg), rhs)
self.assertEqual(fmt % -float(arg), '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
self.assertEqual(format(1234.56, '.4'), '1.235e+03')
self.assertEqual(format(12345.6, '.4'), '1.235e+04')
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_short_repr(self):
# test short float repr introduced in Python 3.1. One aspect
# of this repr is that we get some degree of str -> float ->
# str roundtripping. In particular, for any numeric string
# containing 15 or fewer significant digits, those exact same
# digits (modulo trailing zeros) should appear in the output.
# No more repr(0.03) -> "0.029999999999999999"!
test_strings = [
# output always includes *either* a decimal point and at
# least one digit after that point, or an exponent.
'0.0',
'1.0',
'0.01',
'0.02',
'0.03',
'0.04',
'0.05',
'1.23456789',
'10.0',
'100.0',
# values >= 1e16 get an exponent...
'1000000000000000.0',
'9999999999999990.0',
'1e+16',
'1e+17',
# ... and so do values < 1e-4
'0.001',
'0.001001',
'0.00010000000000001',
'0.0001',
'9.999999999999e-05',
'1e-05',
# values designed to provoke failure if the FPU rounding
# precision isn't set correctly
'8.72293771110361e+25',
'7.47005307342313e+26',
'2.86438000439698e+28',
'8.89142905246179e+28',
'3.08578087079232e+35',
]
for s in test_strings:
negs = '-'+s
self.assertEqual(s, repr(float(s)))
self.assertEqual(negs, repr(float(negs)))
# Since Python 3.2, repr and str are identical
self.assertEqual(repr(float(s)), str(float(s)))
self.assertEqual(repr(float(negs)), str(float(negs)))
@support.requires_IEEE_754
class RoundTestCase(unittest.TestCase):
def test_inf_nan(self):
self.assertRaises(OverflowError, round, INF)
self.assertRaises(OverflowError, round, -INF)
self.assertRaises(ValueError, round, NAN)
self.assertRaises(TypeError, round, INF, 0.0)
self.assertRaises(TypeError, round, -INF, 1.0)
self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer")
self.assertRaises(TypeError, round, -0.0, 1j)
def test_large_n(self):
for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]:
self.assertEqual(round(123.456, n), 123.456)
self.assertEqual(round(-123.456, n), -123.456)
self.assertEqual(round(1e300, n), 1e300)
self.assertEqual(round(1e-320, n), 1e-320)
self.assertEqual(round(1e150, 300), 1e150)
self.assertEqual(round(1e300, 307), 1e300)
self.assertEqual(round(-3.1415, 308), -3.1415)
self.assertEqual(round(1e150, 309), 1e150)
self.assertEqual(round(1.4e-315, 315), 1e-315)
def test_small_n(self):
for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]:
self.assertEqual(round(123.456, n), 0.0)
self.assertEqual(round(-123.456, n), -0.0)
self.assertEqual(round(1e300, n), 0.0)
self.assertEqual(round(1e-320, n), 0.0)
def test_overflow(self):
self.assertRaises(OverflowError, round, 1.6e308, -308)
self.assertRaises(OverflowError, round, -1.7e308, -308)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_previous_round_bugs(self):
# particular cases that have occurred in bug reports
self.assertEqual(round(562949953421312.5, 1),
562949953421312.5)
self.assertEqual(round(56294995342131.5, 3),
56294995342131.5)
# round-half-even
self.assertEqual(round(25.0, -1), 20.0)
self.assertEqual(round(35.0, -1), 40.0)
self.assertEqual(round(45.0, -1), 40.0)
self.assertEqual(round(55.0, -1), 60.0)
self.assertEqual(round(65.0, -1), 60.0)
self.assertEqual(round(75.0, -1), 80.0)
self.assertEqual(round(85.0, -1), 80.0)
self.assertEqual(round(95.0, -1), 100.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_matches_float_format(self):
# round should give the same results as float formatting
for i in range(500):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(5, 5000, 10):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(500):
x = random.random()
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
def test_format_specials(self):
# Test formatting of nans and infs.
def test(fmt, value, expected):
# Test with both % and format().
self.assertEqual(fmt % value, expected, fmt)
fmt = fmt[1:] # strip off the %
self.assertEqual(format(value, fmt), expected, fmt)
for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g',
'%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']:
pfmt = '%+' + fmt[1:]
sfmt = '% ' + fmt[1:]
test(fmt, INF, 'inf')
test(fmt, -INF, '-inf')
test(fmt, NAN, 'nan')
test(fmt, -NAN, 'nan')
# When asking for a sign, it's always provided. nans are
# always positive.
test(pfmt, INF, '+inf')
test(pfmt, -INF, '-inf')
test(pfmt, NAN, '+nan')
test(pfmt, -NAN, '+nan')
# When using ' ' for a sign code, only infs can be negative.
# Others have a space.
test(sfmt, INF, ' inf')
test(sfmt, -INF, '-inf')
test(sfmt, NAN, ' nan')
test(sfmt, -NAN, ' nan')
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assertTrue(isinf(float("inf")))
self.assertTrue(isinf(float("+inf")))
self.assertTrue(isinf(float("-inf")))
self.assertTrue(isinf(float("infinity")))
self.assertTrue(isinf(float("+infinity")))
self.assertTrue(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
self.assertRaises(ValueError, float, "++Inf")
self.assertRaises(ValueError, float, "-+inf")
self.assertRaises(ValueError, float, "+-infinity")
self.assertRaises(ValueError, float, "--Infinity")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assertTrue(isnan(float("nan")))
self.assertTrue(isnan(float("+nan")))
self.assertTrue(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
self.assertRaises(ValueError, float, "++nan")
self.assertRaises(ValueError, float, "-+NAN")
self.assertRaises(ValueError, float, "+-NaN")
self.assertRaises(ValueError, float, "--nAn")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def notest_float_nan(self):
self.assertTrue(NAN.is_nan())
self.assertFalse(INF.is_nan())
self.assertFalse((0.).is_nan())
def notest_float_inf(self):
self.assertTrue(INF.is_inf())
self.assertFalse(NAN.is_inf())
self.assertFalse((0.).is_inf())
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'an',
'nf',
'nfinity',
'inity',
'iinity',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
'0x1p\uff10', # fullwidth Unicode digits
'\uff10x1p0',
'0x\uff11p0',
'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_whitespace(self):
value_pairs = [
('inf', INF),
('-Infinity', -INF),
('nan', NAN),
('1.0', 1.0),
('-0x.2', -0.125),
('-0.0', -0.0)
]
whitespace = [
'',
' ',
'\t',
'\n',
'\n \t',
'\f',
'\v',
'\r'
]
for inp, expected in value_pairs:
for lead in whitespace:
for trail in whitespace:
got = fromHex(lead + inp + trail)
self.identical(got, expected)
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in range(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_main():
support.run_unittest(
GeneralFloatCases,
FormatFunctionsTestCase,
UnknownFormatTestCase,
IEEEFormatTestCase,
FormatTestCase,
ReprTestCase,
RoundTestCase,
InfNanTest,
HexFloatTestCase,
)
if __name__ == '__main__':
test_main()
|
|
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper script used for creating a new release or hotfix branch on GitHub.
ONLY RELEASE COORDINATORS SHOULD USE THIS SCRIPT.
Usage: Run this script from your oppia root folder:
For release branch:
python -m scripts.release_scripts.cut_release_or_hotfix_branch
--release_version="x.y.z"
where x.y.z is the new version of Oppia, e.g. 2.5.3. The generated branch
name will be release-x.y.z, e.g. release-2.5.3.
For hotfix branch:
python -m scripts.release_scripts.cut_release_or_hotfix_branch
--release_version="x.y.z" --hotfix_number=d
where x.y.z is the new version of Oppia, e.g. 2.5.3,
d is number of the hotfix being created, e.g. 1. The generated branch
name will be release-x.y.z-hotfix-d, e.g. release-2.5.3-hotfix-1.
"""
from __future__ import annotations
import argparse
import json
import re
import subprocess
from core import constants
from core import utils
from scripts import common
def require_release_version_to_have_correct_format(
arg, pattern=re.compile(r'\d\.\d\.\d')):
"""Checks that the release version name matches the expected pattern.
Args:
arg: str. The release version name.
pattern: RegularExpression. The pattern that release version should
match.
Raises:
argparse.ArgumentTypeError. The release version name does not match
the pattern.
Returns:
str. The release version name with correct pattern.
"""
if not pattern.match(arg):
raise argparse.ArgumentTypeError(
'The format of "release_version" should be: x.x.x')
return arg
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--release_version',
help=(
'version of the release for which the branch cut is being made or the '
'hotfix is being created'),
type=require_release_version_to_have_correct_format)
_PARSER.add_argument('--hotfix_number', default=0)
def verify_target_branch_does_not_already_exist(remote_alias, new_branch_name):
"""Checks that the new release branch doesn't already exist locally or
remotely.
Args:
remote_alias: str. The alias that points to the remote oppia
repository. Example: When calling git remote -v, you get:
upstream https://github.com/oppia/oppia.git (fetch),
where 'upstream' is the alias that points to the remote oppia
repository.
new_branch_name: str. The name of the new branch to cut.
Raises:
Exception. The target branch name already exists locally.
Exception. The target branch name already exists on the remote
oppia repository.
"""
git_branch_output = subprocess.check_output(
['git', 'branch']).decode().split('\n')
if new_branch_name in git_branch_output:
raise Exception(
'ERROR: The target branch name already exists locally. '
'Run "git branch -D %s" to delete it.' % new_branch_name)
git_ls_remote_output = subprocess.check_output(
['git', 'ls-remote', '--heads', remote_alias]).decode().split('\n')
remote_branch_ref = 'refs/heads/%s' % new_branch_name
if remote_branch_ref in git_ls_remote_output:
raise Exception(
'ERROR: The target branch name already exists on the remote repo.')
def verify_target_version_compatible_with_latest_release(
target_version):
"""Checks that the target version is consistent with the latest released
version on GitHub.
Args:
target_version: str. The release version.
Raises:
Exception. Failed to fetch latest release info from GitHub.
Exception. Could not parse version number of latest GitHub release.
AssertionError. The previous and the current major version are not the
same.
AssertionError. The current patch version is not equal to previous patch
version plus one.
AssertionError. The current patch version is greater or equal to 10.
AssertionError. The current minor version is not equal to previous
minor version plus one.
AssertionError. The current patch version is different than 0.
"""
response = utils.url_open(
'https://api.github.com/repos/oppia/oppia/releases/latest')
if response.getcode() != 200:
raise Exception(
'ERROR: Failed to fetch latest release info from GitHub.')
data = json.load(response)
latest_release_tag_name = data['tag_name']
match_result = re.match(r'v(\d)\.(\d)\.(\d)', latest_release_tag_name)
if match_result is None:
raise Exception(
'ERROR: Could not parse version number of latest GitHub release.')
prev_major, prev_minor, prev_patch = match_result.group(1, 2, 3)
match_result = re.match(r'(\d)\.(\d)\.(\d)', target_version)
curr_major, curr_minor, curr_patch = match_result.group(1, 2, 3)
# This will need to be overridden if the major version changes.
assert prev_major == curr_major, 'Unexpected major version change.'
if prev_minor == curr_minor:
assert int(curr_patch) == int(prev_patch) + 1, (
'The current patch version is not equal to previous patch '
'version plus one.')
else:
assert int(curr_minor) == int(prev_minor) + 1, (
'The current minor version is not equal to previous '
'minor version plus one.')
assert int(curr_patch) == 0, (
'The current patch version is different than 0.')
def verify_hotfix_number_is_one_ahead_of_previous_hotfix_number(
remote_alias, target_version, hotfix_number):
"""Checks that the hotfix number is one ahead of previous hotfix
number.
Args:
remote_alias: str. The alias that points to the remote oppia
repository. Example: When calling git remote -v, you get:
upstream https://github.com/oppia/oppia.git (fetch),
where 'upstream' is the alias that points to the remote oppia
repository.
target_version: str. The release version.
hotfix_number: int. The number for the hotfix branch.
Raises:
Exception. The difference between two continuous hotfix numbers
is not one.
"""
all_branches = subprocess.check_output([
'git', 'branch', '-a']).decode().split('\n')
last_hotfix_number = 0
release_branch_exists = False
hotfix_branch_name_regex = '^remotes/%s/release-%s-hotfix-\\d*$' % (
remote_alias, target_version)
for branch_name in all_branches:
branch_name = branch_name.lstrip().rstrip()
if branch_name == 'remotes/%s/release-%s' % (
remote_alias, target_version):
release_branch_exists = True
if re.match(hotfix_branch_name_regex, branch_name):
branch_hotfix_number = int(branch_name[branch_name.rfind('-') + 1:])
if branch_hotfix_number > last_hotfix_number:
last_hotfix_number = branch_hotfix_number
assert release_branch_exists, 'Release branch is missing.'
assert hotfix_number == last_hotfix_number + 1, (
'The difference between two continuous hotfix numbers is not one.')
def _get_release_branch_type_and_name(target_version):
"""Returns type and name of release branch for a target version.
Args:
target_version: str. The release version.
Returns:
tuple(str, str). The type and name of release branch.
"""
return (
constants.release_constants.BRANCH_TYPE_RELEASE,
'release-%s' % target_version)
def _get_hotfix_branch_type_and_name(target_version, hotfix_number):
"""Returns type and name of hotfix branch for a target version.
Args:
target_version: str. The release version.
hotfix_number: int. The number for the hotfix branch.
Returns:
tuple(str, str). The type and name of hotfix branch.
"""
return (
constants.release_constants.BRANCH_TYPE_HOTFIX,
'release-%s-hotfix-%s' % (target_version, hotfix_number))
def execute_branch_cut(target_version, hotfix_number):
"""Creates & pushes the new release branch to Github.
Args:
target_version: str. The release version.
hotfix_number: int. The number for the hotfix branch.
Raises:
Exception. Actions tests are failing on the branch from which
the new branch is cut.
"""
# Construct the new branch name.
if not hotfix_number:
new_branch_type, new_branch_name = _get_release_branch_type_and_name(
target_version)
else:
new_branch_type, new_branch_name = _get_hotfix_branch_type_and_name(
target_version, hotfix_number)
# Do prerequisite checks.
common.require_cwd_to_be_oppia()
common.verify_local_repo_is_clean()
common.verify_current_branch_name('develop')
# Update the local repo.
remote_alias = common.get_remote_alias(
constants.release_constants.REMOTE_URL)
subprocess.check_call(['git', 'pull', remote_alias, 'develop'])
verify_target_branch_does_not_already_exist(remote_alias, new_branch_name)
if not hotfix_number:
branch_to_check = 'develop'
elif hotfix_number == 1:
branch_to_check = 'release-%s' % target_version
else:
branch_to_check = 'release-%s-hotfix-%s' % (
target_version, hotfix_number - 1)
# The release coordinator should verify that tests are passing on
# the parent branch before checking out the new branch.
common.open_new_tab_in_browser_if_possible(
'https://github.com/oppia/oppia/actions?query=branch:%s'
% branch_to_check)
print(
'Please confirm: are Actions checks passing on %s? (y/n) ' % (
branch_to_check))
answer = input().lower()
if answer not in common.AFFIRMATIVE_CONFIRMATIONS:
raise Exception(
'Tests should pass on %s before this script is run.' % (
branch_to_check))
# Cut a new release or hotfix branch.
if new_branch_type == constants.release_constants.BRANCH_TYPE_HOTFIX:
verify_hotfix_number_is_one_ahead_of_previous_hotfix_number(
remote_alias, target_version, hotfix_number)
if hotfix_number == 1:
branch_to_cut_from = 'release-%s' % target_version
else:
branch_to_cut_from = 'release-%s-hotfix-%s' % (
target_version, hotfix_number - 1)
print('Cutting a new hotfix branch: %s' % new_branch_name)
subprocess.check_call([
'git', 'checkout', '-b', new_branch_name, branch_to_cut_from])
else:
verify_target_version_compatible_with_latest_release(
target_version)
print('Cutting a new release branch: %s' % new_branch_name)
subprocess.check_call(['git', 'checkout', '-b', new_branch_name])
# Push the new release branch to GitHub.
if new_branch_type == constants.release_constants.BRANCH_TYPE_RELEASE:
print('Pushing new %s branch to GitHub.' % new_branch_type)
subprocess.check_call(['git', 'push', remote_alias, new_branch_name])
else:
print(
'Please cherrypick the required PRs and push the branch '
'to Github once this script is done.\n'
'Note: It is fine to push the branch only after creating the '
'branch protection rule and doing all the cherrypicks.')
common.ask_user_to_confirm(
'Ask Sean (or Ben, if Sean isn\'t available) to create '
'a new branch protection rule by:\n'
'1. Going to this page: https://github.com/oppia/oppia/'
'settings/branch_protection_rules/new.\n'
'2. Typing in the full branch name %s.\n'
'3. Checking the box: Restrict who can push to matching '
'branches (then add the oppia/release-coordinators team)\n' % (
new_branch_name))
print('')
print(
'New %s branch successfully cut. You are now on branch %s' % (
new_branch_type, new_branch_name))
print('Done!')
def main(args=None):
"""Main method for creating a release or hotfix branch."""
parsed_args = _PARSER.parse_args(args=args)
if parsed_args.release_version:
target_version = parsed_args.release_version
else:
raise Exception('ERROR: A "release_version" arg must be specified.')
hotfix_number = int(parsed_args.hotfix_number)
execute_branch_cut(target_version, hotfix_number)
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when cut_release_or_hotfix_branch.py is used as a
# script.
if __name__ == '__main__': # pragma: no cover
main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Migration(SchemaMigration):
depends_on = (
("containers", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'articles_post', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('headline', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=140, null=True, blank=True)),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'articles', ['Post'])
# Adding M2M table for field albums on 'Post'
m2m_table_name = db.shorten_name(u'articles_post_albums')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'articles.post'], null=False)),
('album', models.ForeignKey(orm[u'articles.album'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'album_id'])
# Adding model 'PostRelated'
db.create_table(u'articles_postrelated', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='postrelated_post', null=True, on_delete=models.SET_NULL, to=orm['articles.Post'])),
('related', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='postrelated_related', null=True, on_delete=models.SET_NULL, to=orm['containers.Container'])),
('order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'articles', ['PostRelated'])
# Adding model 'Album'
db.create_table(u'articles_album', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('headline', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=140, null=True, blank=True)),
))
db.send_create_signal(u'articles', ['Album'])
# Adding model 'Link'
db.create_table(u'articles_link', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('headline', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=140, null=True, blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('container', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='link_containers', null=True, to=orm['containers.Container'])),
))
db.send_create_signal(u'articles', ['Link'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'articles_post')
# Removing M2M table for field albums on 'Post'
db.delete_table(db.shorten_name(u'articles_post_albums'))
# Deleting model 'PostRelated'
db.delete_table(u'articles_postrelated')
# Deleting model 'Album'
db.delete_table(u'articles_album')
# Deleting model 'Link'
db.delete_table(u'articles_link')
models = {
u'%s.%s' % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User.__name__},
},
u'articles.album': {
'Meta': {'object_name': 'Album'},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
u'articles.link': {
'Meta': {'object_name': 'Link'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'link_containers'", 'null': 'True', 'to': u"orm['containers.Container']"}),
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'articles.post': {
'Meta': {'object_name': 'Post'},
'albums': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'post_albums'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['articles.Album']"}),
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_posts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'post_relatedposts'", 'to': u"orm['containers.Container']", 'through': u"orm['articles.PostRelated']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
u'articles.postrelated': {
'Meta': {'ordering': "('order',)", 'object_name': 'PostRelated'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'postrelated_post'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['articles.Post']"}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'postrelated_related'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['containers.Container']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'channels.channel': {
'Meta': {'ordering': "['name', 'parent__id', 'published']", 'unique_together': "(('site', 'long_slug', 'slug', 'parent'),)", 'object_name': 'Channel'},
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_main_rss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '250', 'db_index': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'long_slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'subchannel'", 'null': 'True', 'to': u"orm['channels.Channel']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.container': {
'Meta': {'ordering': "['-date_available']", 'unique_together': "(('site', 'child_class', 'channel_long_slug', 'slug'),)", 'object_name': 'Container'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'child_app_label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_class': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_module': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['images.Image']", 'null': 'True', 'through': u"orm['containers.ContainerImage']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'containers_container_mainimage'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_containers.container_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'show_on_root_channel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.containerimage': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'images.image': {
'Meta': {'object_name': 'Image'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'crop_example': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_x1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_x2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fit_in': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'halign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'smart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)}),
'valign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles']
|
|
import datetime
import logging
log = logging.getLogger(__name__)
class EventProcessor(object):
def __init__(self, cache):
self.cache = cache
def get_handler(self, event):
if event.domain != 'Shotgun':
log.info('skipping event; not in Shotgun domain')
return
entity_type = self.cache.get(event.entity_type)
if entity_type is None:
log.info('skipping event; unknown entity type %s' % event.entity_type)
return
func = getattr(self, '_process_%s_event' % event.subtype.lower(), None)
if func is None:
log.info('skipping event; unknown event subtype %s' % (event.subtype))
return
return lambda con: func(con, event, entity_type)
def _process_new_event(self, con, event, entity_type):
'''
{u'attribute_name': None,
u'created_at': u'2015-07-09T19:33:37Z',
u'entity': {u'id': 67378, u'name': u'something', u'type': u'Task'},
u'event_type': u'Shotgun_Task_New',
u'id': 2011530,
u'meta': {u'entity_id': 67378,
u'entity_type': u'Task',
u'type': u'new_entity'},
u'project': {u'id': 66, u'name': u'Testing Sandbox', u'type': u'Project'},
u'type': u'EventLogEntry'}
'''
# We need to fetch all of the data from the server; bleh.
entity = self.cache.event_log.shotgun.find_one(entity_type.type_name,
filters=[('id', 'is', event.entity_id)],
fields=[key for key, field in entity_type.fields.iteritems() if field.is_cached()]
)
if not entity:
log.warning('could not find "new" %s %d' % (entity_type.type_name, event.entity_id))
return
# We assume that updated_at is pulled in from Shotgun, as it only
# really matters if we are caching it anyways.
# Strip our any extra columns Shotgun might have sent us.
entity = self.cache.filter_cacheable_entity(entity)
self.cache.create_or_update(entity_type.type_name,
data=entity,
create_with_id=True,
con=con,
source_event=event,
extra={
'_last_log_event_id': event['id'],
'_active': not event.entity_is_retired,
},
)
def _process_change_event(self, con, event, entity_type):
'''
{u'attribute_name': u'color',
u'created_at': u'2015-07-09T19:33:37Z',
u'entity': {u'id': 67378, u'name': u'something', u'type': u'Task'},
u'event_type': u'Shotgun_Task_Change',
u'id': 2011531,
u'meta': {u'attribute_name': u'color',
u'entity_id': 67378,
u'entity_type': u'Task',
u'field_data_type': u'color',
u'in_create': True,
u'new_value': u'pipeline_step',
u'old_value': None,
u'type': u'attribute_change'},
u'project': {u'id': 66, u'name': u'Testing Sandbox', u'type': u'Project'},
u'type': u'EventLogEntry'}
OR (on a backref)
{
"attribute_name": "tasks",
"created_at": "2015-07-09T23:00:10Z",
"entity": {
"id": 7080,
"name": "002_001",
"type": "Shot"
},
"event_type": "Shotgun_Shot_Change",
"id": 2011759,
"meta": {
"actual_attribute_changed": "Task.entity",
"added": [
{
"id": 67380,
"name": "newtask3",
"status": "wtg",
"type": "Task",
"uuid": "3fc23e92-268e-11e5-ac19-0025900054a4",
"valid": "valid"
}
],
"attribute_name": "tasks",
"entity_id": 67380,
"entity_type": "Task",
"field_data_type": "entity",
"in_create": true,
"original_event_log_entry_id": 2011758,
"removed": [],
"type": "attribute_change"
},
"project": {
"id": 66,
"name": "Testing Sandbox",
"type": "Project"
},
"type": "EventLogEntry"
}
OR (after a retirement; note the NULL entity):
{
"attribute_name": "retirement_date",
"created_at": "2015-07-13T21:54:01Z",
"entity": null,
"event_type": "Shotgun_Task_Change",
"id": 2017315,
"meta": {
"attribute_name": "retirement_date",
"entity_id": 67519,
"entity_type": "Task",
"new_value": "2015-07-13 21:54:01 UTC",
"old_value": null,
"type": "attribute_change"
},
"project": {
"id": 66,
"name": "Testing Sandbox",
"type": "Project"
},
"type": "EventLogEntry",
"user": {
"id": 108,
"name": "Mike Boers",
"type": "HumanUser"
}
}
BUT:
>>> sg.find_one('Task', [('$FROM$EventLogEntry.entity.id', 'is', 2017315)], [], retired_only=True)
{'type': 'Task', 'id': 67519}
'''
# Make sure it is a field we care about.
try:
field = self.cache[event.entity_type][event['attribute_name']]
except KeyError:
return
if not field.is_cached():
return
# This could be a retired entity, in which case we just need the ID.
data = event.entity.copy() if event.entity else {'type': event.entity_type, 'id': event.entity_id}
if event.get('project'):
data.setdefault('project', event['project'])
# Use an internal syntax for adding or removing from multi-entities.
added = event.meta.get('added')
removed = event.meta.get('removed')
if added or removed:
data[event['attribute_name']] = {'__added__': added, '__removed__': removed}
else:
data[event['attribute_name']] = event['meta']['new_value']
# Pull in the updated_at (assuming that it is cached, of course).
data.setdefault('updated_at', event.get('entity.%s.updated_at' % event.entity_type))
data = self.cache.filter_cacheable_entity(data)
handler = self.cache.create_or_update(entity_type.type_name,
data=data,
create_with_id=True,
con=con,
source_event=event,
extra={
'_last_log_event_id': event['id'],
},
)
# If we did not know about it, then fetch all data as if it is new.
if not handler.entity_exists:
log.warning('updated un-cached %s %s; fetching all data' % (event.entity_type, event.entity_id))
self._process_new_event(con, event, entity_type)
def _process_retirement_event(self, con, event, entity_type):
'''
{
"attribute_name": null,
"created_at": "2015-07-13T22:32:35Z",
"entity": null,
"event_type": "Shotgun_Task_Retirement",
"id": 2017525,
"meta": {
"class_name": "Task",
"display_name": "another to delete",
"entity_id": 67531,
"entity_type": "Task",
"id": 67531,
"retirement_date": "2015-07-13 22:32:35 UTC",
"type": "entity_retirement"
},
"project": {
"id": 66,
"name": "Testing Sandbox",
"type": "Project"
},
"type": "EventLogEntry",
"user": {
"id": 108,
"name": "Mike Boers",
"type": "HumanUser"
}
}
'''
if not self.cache.retire(event.entity_type, event.entity_id, con=con, source_event=event, strict=False):
log.warning('retired un-cached %s %s; ignoring' % (event.entity_type, event.entity_id))
def _process_revival_event(self, con, event, entity_type):
'''
{
"attribute_name": null,
"created_at": "2015-07-13T22:34:21Z",
"entity": {
"id": 67531,
"name": "another to delete",
"type": "Task"
},
"event_type": "Shotgun_Task_Revival",
"id": 2017561,
"meta": {
"class_name": "Task",
"display_name": "another to delete",
"entity_id": 67531,
"entity_type": "Task",
"id": 67531
},
"project": {
"id": 66,
"name": "Testing Sandbox",
"type": "Project"
},
"type": "EventLogEntry",
"user": {
"id": 108,
"name": "Mike Boers",
"type": "HumanUser"
}
}
'''
# Pull in updated_at.
extra = {}
updated_at = event.get('entity.%s.updated_at' % event.entity_type)
if updated_at:
extra['updated_at'] = updated_at
if not self.cache.revive(event.entity_type, event.entity_id, con=con, source_event=event, extra=extra, strict=False):
log.warning('revived un-cached %s %d; processing as new' % (event.entity_type, entity_id))
self._process_new_event(con, event, entity_type)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# Copyright (C) 2006-2011, Herbert Valerio Riedel <hvr@gnu.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from datetime import datetime
import itertools
import os
from genshi.builder import tag
from genshi.core import Markup
from trac.cache import cached
from trac.config import BoolOption, IntOption, PathOption, Option
from trac.core import *
from trac.env import ISystemInfoProvider
from trac.util import TracError, shorten_line
from trac.util.datefmt import FixedOffset, to_timestamp, format_datetime
from trac.util.text import to_unicode, exception_to_unicode
from trac.util.translation import _
from trac.versioncontrol.api import Changeset, Node, Repository, \
IRepositoryConnector, InvalidRepository,\
NoSuchChangeset, NoSuchNode, \
IRepositoryProvider
from trac.versioncontrol.cache import CACHE_YOUNGEST_REV, CachedRepository, \
CachedChangeset
from trac.versioncontrol.web_ui import IPropertyRenderer
from trac.web.chrome import Chrome
from trac.wiki import IWikiSyntaxProvider
from tracopt.versioncontrol.git import PyGIT
class GitCachedRepository(CachedRepository):
"""Git-specific cached repository."""
def display_rev(self, rev):
return self.short_rev(rev)
def short_rev(self, path):
return self.repos.short_rev(path)
def normalize_rev(self, rev):
if not rev:
return self.get_youngest_rev()
normrev = self.repos.git.verifyrev(rev)
if normrev is None:
raise NoSuchChangeset(rev)
return normrev
def get_youngest_rev(self):
# return None if repository is empty
return CachedRepository.get_youngest_rev(self) or None
def child_revs(self, rev):
return self.repos.child_revs(rev)
def get_changesets(self, start, stop):
for key, csets in itertools.groupby(
CachedRepository.get_changesets(self, start, stop),
key=lambda cset: cset.date):
csets = list(csets)
if len(csets) == 1:
yield csets[0]
continue
rev_csets = dict((cset.rev, cset) for cset in csets)
while rev_csets:
revs = [rev for rev in rev_csets
if not any(r in rev_csets
for r in self.repos.child_revs(rev))]
for rev in sorted(revs):
yield rev_csets.pop(rev)
def get_changeset(self, rev):
return GitCachedChangeset(self, self.normalize_rev(rev), self.env)
def sync(self, feedback=None, clean=False):
if clean:
self.remove_cache()
metadata = self.metadata
self.save_metadata(metadata)
meta_youngest = metadata.get(CACHE_YOUNGEST_REV, '')
repos = self.repos
def is_synced(rev):
for count, in self.env.db_query("""
SELECT COUNT(*) FROM revision WHERE repos=%s AND rev=%s
""", (self.id, rev)):
return count > 0
return False
def traverse(rev, seen):
revs = []
merge_revs = []
while True:
if rev in seen:
break
seen.add(rev)
if is_synced(rev):
break
revs.append(rev)
parent_revs = repos.parent_revs(rev)
if not parent_revs: # root commit?
break
rev = parent_revs[0]
if len(parent_revs) > 1:
merge_revs.append((len(revs), parent_revs[1:]))
for idx, parent_revs in reversed(merge_revs):
for rev in parent_revs:
revs[idx:idx] = traverse(rev, seen)
return revs
while True:
repos.sync()
repos_youngest = repos.youngest_rev or ''
updated = False
seen = set()
for rev in repos.git.all_revs():
if repos.child_revs(rev):
continue
revs = traverse(rev, seen) # topology ordered
while revs:
# sync revision from older revision to newer revision
rev = revs.pop()
self.log.info("Trying to sync revision [%s]", rev)
cset = repos.get_changeset(rev)
try:
self.insert_changeset(rev, cset)
updated = True
except self.env.db_exc.IntegrityError as e:
self.log.info('Revision %s already cached: %r', rev, e)
continue
if feedback:
feedback(rev)
if updated:
continue # sync again
if meta_youngest != repos_youngest:
with self.env.db_transaction as db:
db("""
UPDATE repository SET value=%s WHERE id=%s AND name=%s
""", (repos_youngest, self.id, CACHE_YOUNGEST_REV))
del self.metadata
return
class GitCachedChangeset(CachedChangeset):
"""Git-specific cached changeset."""
def get_branches(self):
_rev = self.rev
return [(k, v == _rev) for k, v in
self.repos.repos.git.get_branch_contains(_rev, resolve=True)]
def get_tags(self):
return self.repos.repos.git.get_tags(self.rev)
def _last_iterable(iterable):
"""helper for detecting last iteration in for-loop"""
i = iter(iterable)
v = i.next()
for nextv in i:
yield False, v
v = nextv
yield True, v
def intersperse(sep, iterable):
"""The 'intersperse' generator takes an element and an iterable and
intersperses that element between the elements of the iterable.
inspired by Haskell's ``Data.List.intersperse``
"""
for i, item in enumerate(iterable):
if i: yield sep
yield item
# helper
def _parse_user_time(s):
"""Parse author or committer attribute lines and return
corresponding ``(user, timestamp)`` pair.
"""
user, time, tz_str = s.rsplit(None, 2)
tz = FixedOffset((int(tz_str) * 6) / 10, tz_str)
time = datetime.fromtimestamp(float(time), tz)
return user, time
class GitConnector(Component):
implements(IRepositoryConnector, ISystemInfoProvider, IWikiSyntaxProvider)
def __init__(self):
self._version = None
try:
self._version = PyGIT.Storage.git_version(git_bin=self.git_bin)
except PyGIT.GitError as e:
self.log.error("GitError: %s", e)
if self._version:
self.log.info("detected GIT version %s", self._version['v_str'])
if not self._version['v_compatible']:
self.log.error("GIT version %s installed not compatible"
"(need >= %s)", self._version['v_str'],
self._version['v_min_str'])
# ISystemInfoProvider methods
def get_system_info(self):
if self._version:
yield 'GIT', self._version['v_str']
# IWikiSyntaxProvider methods
def _format_sha_link(self, formatter, sha, label):
# FIXME: this function needs serious rethinking...
reponame = ''
context = formatter.context
while context:
if context.resource.realm in ('source', 'changeset'):
reponame = context.resource.parent.id
break
context = context.parent
try:
repos = self.env.get_repository(reponame)
if not repos:
raise Exception("Repository '%s' not found" % reponame)
sha = repos.normalize_rev(sha) # in case it was abbreviated
changeset = repos.get_changeset(sha)
return tag.a(label, class_='changeset',
title=shorten_line(changeset.message),
href=formatter.href.changeset(sha, repos.reponame))
except Exception as e:
return tag.a(label, class_='missing changeset',
title=to_unicode(e), rel='nofollow')
def get_wiki_syntax(self):
yield (r'(?:\b|!)r?[0-9a-fA-F]{%d,40}\b' % self.wiki_shortrev_len,
lambda fmt, sha, match:
self._format_sha_link(fmt, sha.startswith('r')
and sha[1:] or sha, sha))
def get_link_resolvers(self):
yield ('sha', lambda fmt, _, sha, label, match=None:
self._format_sha_link(fmt, sha, label))
# IRepositoryConnector methods
persistent_cache = BoolOption('git', 'persistent_cache', 'false',
"""Enable persistent caching of commit tree.""")
cached_repository = BoolOption('git', 'cached_repository', 'false',
"""Wrap `GitRepository` in `CachedRepository`.""")
shortrev_len = IntOption('git', 'shortrev_len', 7,
"""The length at which a sha1 should be abbreviated to (must
be >= 4 and <= 40).
""")
wiki_shortrev_len = IntOption('git', 'wikishortrev_len', 40,
"""The minimum length of an hex-string for which
auto-detection as sha1 is performed (must be >= 4 and <= 40).
""")
trac_user_rlookup = BoolOption('git', 'trac_user_rlookup', 'false',
"""Enable reverse mapping of git email addresses to trac user ids.
Performance will be reduced if there are many users and the
`cached_repository` option is `disabled`.
A repository resync is required after changing the value of this
option.
""")
use_committer_id = BoolOption('git', 'use_committer_id', 'true',
"""Use git-committer id instead of git-author id for the
changeset ''Author'' field.
""")
use_committer_time = BoolOption('git', 'use_committer_time', 'true',
"""Use git-committer timestamp instead of git-author timestamp
for the changeset ''Timestamp'' field.
""")
git_fs_encoding = Option('git', 'git_fs_encoding', 'utf-8',
"""Define charset encoding of paths within git repositories.""")
git_bin = Option('git', 'git_bin', 'git',
"""Path to the git executable.""")
def get_supported_types(self):
yield ('git', 8)
def get_repository(self, type, dir, params):
"""GitRepository factory method"""
assert type == 'git'
if not (4 <= self.shortrev_len <= 40):
raise TracError(_("%(option)s must be in the range [4..40]",
option="[git] shortrev_len"))
if not (4 <= self.wiki_shortrev_len <= 40):
raise TracError(_("%(option)s must be in the range [4..40]",
option="[git] wikishortrev_len"))
if not self._version:
raise TracError(_("GIT backend not available"))
elif not self._version['v_compatible']:
raise TracError(_("GIT version %(hasver)s installed not "
"compatible (need >= %(needsver)s)",
hasver=self._version['v_str'],
needsver=self._version['v_min_str']))
if self.trac_user_rlookup:
def rlookup_uid(email):
"""Reverse map 'real name <user@domain.tld>' addresses to trac
user ids.
:return: `None` if lookup failed
"""
try:
_, email = email.rsplit('<', 1)
email, _ = email.split('>', 1)
email = email.lower()
except Exception:
return None
for _uid, _name, _email in self.env.get_known_users():
try:
if email == _email.lower():
return _uid
except Exception:
continue
else:
def rlookup_uid(_):
return None
repos = GitRepository(self.env, dir, params, self.log,
persistent_cache=self.persistent_cache,
git_bin=self.git_bin,
git_fs_encoding=self.git_fs_encoding,
shortrev_len=self.shortrev_len,
rlookup_uid=rlookup_uid,
use_committer_id=self.use_committer_id,
use_committer_time=self.use_committer_time,
)
if self.cached_repository:
repos = GitCachedRepository(self.env, repos, self.log)
self.log.debug("enabled CachedRepository for '%s'", dir)
else:
self.log.debug("disabled CachedRepository for '%s'", dir)
return repos
class CsetPropertyRenderer(Component):
implements(IPropertyRenderer)
# relied upon by GitChangeset
def match_property(self, name, mode):
# default renderer has priority 1
return (name in ('Parents',
'Children',
'Branches',
'git-committer',
'git-author',
) and mode == 'revprop') and 4 or 0
def render_property(self, name, mode, context, props):
def sha_link(sha, label=None):
# sha is assumed to be a non-abbreviated 40-chars sha id
try:
reponame = context.resource.parent.id
repos = self.env.get_repository(reponame)
cset = repos.get_changeset(sha)
if label is None:
label = repos.display_rev(sha)
return tag.a(label, class_='changeset',
title=shorten_line(cset.message),
href=context.href.changeset(sha, repos.reponame))
except Exception as e:
return tag.a(sha, class_='missing changeset',
title=to_unicode(e), rel='nofollow')
if name == 'Branches':
branches = props[name]
# simple non-merge commit
return tag(*intersperse(', ', (sha_link(rev, label)
for label, rev in branches)))
elif name in ('Parents', 'Children'):
revs = props[name] # list of commit ids
if name == 'Parents' and len(revs) > 1:
# we got a merge...
current_sha = context.resource.id
reponame = context.resource.parent.id
parent_links = intersperse(', ', \
((sha_link(rev),
' (',
tag.a(_("diff"),
title=_("Diff against this parent (show the "
"changes merged from the other parents)"),
href=context.href.changeset(current_sha, reponame,
old=rev)),
')')
for rev in revs))
return tag(list(parent_links),
tag.br(),
tag.span(Markup(_("Note: this is a <strong>merge"
"</strong> changeset, the "
"changes displayed below "
"correspond to the merge "
"itself.")),
class_='hint'),
tag.br(),
tag.span(Markup(_("Use the <code>(diff)</code> "
"links above to see all the "
"changes relative to each "
"parent.")),
class_='hint'))
# simple non-merge commit
return tag(*intersperse(', ', map(sha_link, revs)))
elif name in ('git-committer', 'git-author'):
user_, time_ = props[name]
_str = "%s (%s)" % (
Chrome(self.env).format_author(context.req, user_),
format_datetime(time_, tzinfo=context.req.tz))
return unicode(_str)
raise TracError(_("Internal error"))
class GitRepository(Repository):
"""Git repository"""
def __init__(self, env, path, params, log,
persistent_cache=False,
git_bin='git',
git_fs_encoding='utf-8',
shortrev_len=7,
rlookup_uid=lambda _: None,
use_committer_id=False,
use_committer_time=False,
):
self.env = env
self.logger = log
self.gitrepo = path
self.params = params
self.persistent_cache = persistent_cache
self.shortrev_len = max(4, min(shortrev_len, 40))
self.rlookup_uid = rlookup_uid
self.use_committer_time = use_committer_time
self.use_committer_id = use_committer_id
try:
factory = PyGIT.StorageFactory(path, log, not persistent_cache,
git_bin=git_bin,
git_fs_encoding=git_fs_encoding)
self._git = factory.getInstance()
except PyGIT.GitError as e:
log.error(exception_to_unicode(e))
raise InvalidRepository(
_("%(path)s does not appear to be a Git repository.",
path=path))
Repository.__init__(self, 'git:' + path, self.params, log)
self._cached_git_id = str(self.id)
def close(self):
self._git = None
@property
def git(self):
if self.persistent_cache:
return self._cached_git
else:
return self._git
@cached('_cached_git_id')
def _cached_git(self):
self._git.invalidate_rev_cache()
return self._git
def get_youngest_rev(self):
return self.git.youngest_rev()
def get_path_history(self, path, rev=None, limit=None):
raise TracError(_("Unsupported \"Show only adds and deletes\""))
def get_oldest_rev(self):
return self.git.oldest_rev()
def normalize_path(self, path):
return path and path.strip('/') or '/'
def normalize_rev(self, rev):
if not rev:
return self.get_youngest_rev()
normrev = self.git.verifyrev(rev)
if normrev is None:
raise NoSuchChangeset(rev)
return normrev
def display_rev(self, rev):
return self.short_rev(rev)
def short_rev(self, rev):
return self.git.shortrev(self.normalize_rev(rev),
min_len=self.shortrev_len)
def get_node(self, path, rev=None, historian=None):
return GitNode(self, path, rev, self.log, None, historian)
def get_quickjump_entries(self, rev):
for bname, bsha in self.git.get_branches():
yield 'branches', bname, '/', bsha
for t in self.git.get_tags():
yield 'tags', t, '/', t
def get_path_url(self, path, rev):
return self.params.get('url')
def get_changesets(self, start, stop):
for rev in self.git.history_timerange(to_timestamp(start),
to_timestamp(stop)):
yield self.get_changeset(rev)
def get_changeset(self, rev):
"""GitChangeset factory method"""
return GitChangeset(self, rev)
def get_changeset_uid(self, rev):
return self.normalize_rev(rev)
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=0):
# TODO: handle renames/copies, ignore_ancestry
if old_path != new_path:
raise TracError(_("Not supported in git_fs"))
with self.git.get_historian(old_rev,
old_path.strip('/')) as old_historian:
with self.git.get_historian(new_rev,
new_path.strip('/')) as new_historian:
for chg in self.git.diff_tree(old_rev, new_rev,
self.normalize_path(new_path)):
mode1, mode2, obj1, obj2, action, path, path2 = chg
kind = Node.FILE
if mode2.startswith('04') or mode1.startswith('04'):
kind = Node.DIRECTORY
change = GitChangeset.action_map[action]
old_node = None
new_node = None
if change != Changeset.ADD:
old_node = self.get_node(path, old_rev, old_historian)
if change != Changeset.DELETE:
new_node = self.get_node(path, new_rev, new_historian)
yield old_node, new_node, kind, change
def next_rev(self, rev, path=''):
return self.git.hist_next_revision(rev)
def previous_rev(self, rev, path=''):
return self.git.hist_prev_revision(rev)
def parent_revs(self, rev):
return self.git.parents(rev)
def child_revs(self, rev):
return self.git.children(rev)
def rev_older_than(self, rev1, rev2):
return self.git.rev_is_anchestor_of(self.normalize_rev(rev1),
self.normalize_rev(rev2))
# def clear(self, youngest_rev=None):
# self.youngest = None
# if youngest_rev is not None:
# self.youngest = self.normalize_rev(youngest_rev)
# self.oldest = None
def clear(self, youngest_rev=None):
self.sync()
def sync(self, rev_callback=None, clean=None):
if rev_callback:
revs = set(self.git.all_revs())
if self.persistent_cache:
del self._cached_git # invalidate persistent cache
if not self.git.sync():
return None # nothing expected to change
if rev_callback:
revs = set(self.git.all_revs()) - revs
for rev in revs:
rev_callback(rev)
class GitNode(Node):
def __init__(self, repos, path, rev, log, ls_tree_info=None,
historian=None):
self.log = log
self.repos = repos
self.fs_sha = None # points to either tree or blobs
self.fs_perm = None
self.fs_size = None
if rev:
rev = repos.normalize_rev(to_unicode(rev))
else:
rev = repos.youngest_rev
kind = Node.DIRECTORY
p = path.strip('/')
if p: # ie. not the root-tree
if not rev:
raise NoSuchNode(path, rev)
if not ls_tree_info:
ls_tree_info = repos.git.ls_tree(rev, p) or None
if ls_tree_info:
[ls_tree_info] = ls_tree_info
if not ls_tree_info:
raise NoSuchNode(path, rev)
self.fs_perm, k, self.fs_sha, self.fs_size, fname = ls_tree_info
# fix-up to the last commit-rev that touched this node
rev = repos.git.last_change(rev, p, historian)
if k == 'tree':
pass
elif k == 'commit':
# FIXME: this is a workaround for missing git submodule
# support in the plugin
pass
elif k == 'blob':
kind = Node.FILE
else:
raise TracError(_("Internal error (got unexpected object "
"kind '%(kind)s')", kind=k))
self.created_path = path
self.created_rev = rev
Node.__init__(self, repos, path, rev, kind)
def __git_path(self):
"""return path as expected by PyGIT"""
p = self.path.strip('/')
if self.isfile:
assert p
return p
if self.isdir:
return p and (p + '/')
raise TracError(_("Internal error"))
def get_content(self):
if not self.isfile:
return None
return self.repos.git.get_file(self.fs_sha)
def get_properties(self):
return self.fs_perm and {'mode': self.fs_perm } or {}
def get_annotations(self):
if not self.isfile:
return
return [rev for rev, lineno in \
self.repos.git.blame(self.rev,self.__git_path())]
def get_entries(self):
if not self.rev: # if empty repository
return
if not self.isdir:
return
with self.repos.git.get_historian(self.rev,
self.path.strip('/')) as historian:
for ent in self.repos.git.ls_tree(self.rev, self.__git_path()):
yield GitNode(self.repos, ent[-1], self.rev, self.log, ent,
historian)
def get_content_type(self):
if self.isdir:
return None
return ''
def get_content_length(self):
if not self.isfile:
return None
if self.fs_size is None:
self.fs_size = self.repos.git.get_obj_size(self.fs_sha)
return self.fs_size
def get_history(self, limit=None):
if not self.rev: # if empty repository
return
# TODO: find a way to follow renames/copies
for is_last, rev in _last_iterable(self.repos.git.history(self.rev,
self.__git_path(), limit)):
yield (self.path, rev, Changeset.EDIT if not is_last else
Changeset.ADD)
def get_last_modified(self):
if not self.isfile:
return None
try:
msg, props = self.repos.git.read_commit(self.rev)
user, ts = _parse_user_time(props['committer'][0])
except:
self.log.error("internal error (could not get timestamp from "
"commit '%s')", self.rev)
return None
return ts
class GitChangeset(Changeset):
"""A Git changeset in the Git repository.
Corresponds to a Git commit blob.
"""
action_map = { # see also git-diff-tree(1) --diff-filter
'A': Changeset.ADD,
'M': Changeset.EDIT, # modified
'T': Changeset.EDIT, # file type (mode) change
'D': Changeset.DELETE,
'R': Changeset.MOVE, # renamed
'C': Changeset.COPY
} # TODO: U, X, B
def __init__(self, repos, sha):
if sha is None:
raise NoSuchChangeset(sha)
try:
msg, props = repos.git.read_commit(sha)
except PyGIT.GitErrorSha:
raise NoSuchChangeset(sha)
self.props = props
assert 'children' not in props
_children = list(repos.git.children(sha))
if _children:
props['children'] = _children
committer, author = self._get_committer_and_author()
# use 1st author/committer as changeset owner/timestamp
c_user = a_user = c_time = a_time = None
if committer:
c_user, c_time = _parse_user_time(committer)
if author:
a_user, a_time = _parse_user_time(author)
if repos.use_committer_time:
time = c_time or a_time
else:
time = a_time or c_time
if repos.use_committer_id:
user = c_user or a_user
else:
user = a_user or c_user
# try to resolve email address to trac uid
user = repos.rlookup_uid(user) or user
Changeset.__init__(self, repos, rev=sha, message=msg, author=user,
date=time)
def _get_committer_and_author(self):
committer = author = None
if 'committer' in self.props:
committer = self.props['committer'][0]
if 'author' in self.props:
author = self.props['author'][0]
return committer, author
def get_properties(self):
properties = {}
if 'parent' in self.props:
properties['Parents'] = self.props['parent']
if 'children' in self.props:
properties['Children'] = self.props['children']
committer, author = self._get_committer_and_author()
if author != committer:
properties['git-committer'] = _parse_user_time(committer)
properties['git-author'] = _parse_user_time(author)
branches = list(self.repos.git.get_branch_contains(self.rev,
resolve=True))
if branches:
properties['Branches'] = branches
return properties
def get_changes(self):
# Returns the differences against the first parent
parent = self.props.get('parent')
parent = parent[0] if parent else None
for mode1, mode2, obj1, obj2, action, path1, path2 in \
self.repos.git.diff_tree(parent, self.rev, find_renames=True):
path = path2 or path1
p_path, p_rev = path1, parent
kind = Node.DIRECTORY \
if mode2.startswith('04') or mode1.startswith('04') \
else Node.FILE
action = GitChangeset.action_map[action[0]]
if action == Changeset.ADD:
p_path = p_rev = None
yield path, kind, action, p_path, p_rev
def get_branches(self):
_rev = self.rev
return [(k, v == _rev)
for k, v in self.repos.git.get_branch_contains(_rev,
resolve=True)]
def get_tags(self):
return self.repos.git.get_tags(self.rev)
class GitwebProjectsRepositoryProvider(Component):
implements(IRepositoryProvider)
projects_list = PathOption('git', 'projects_list', doc=
"""Path to a gitweb-formatted projects.list""")
projects_base = PathOption('git', 'projects_base', doc=
"""Path to the base of your git projects""")
projects_url = Option('git', 'projects_url', doc=
"""Template for project URLs. %s will be replaced with the repo
name""")
def get_repositories(self):
if not self.projects_list:
return
for line in open(self.projects_list):
line = line.strip()
name = line
if name.endswith('.git'):
name = name[:-4]
repo = {
'dir': os.path.join(self.projects_base, line),
'type': 'git',
}
description_path = os.path.join(repo['dir'], 'description')
if os.path.exists(description_path):
repo['description'] = open(description_path).read().strip()
if self.projects_url:
repo['url'] = self.projects_url % name
yield name, repo
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define some common object shortcuts for RGB objects.
We can configure generative objects somewhat freely in parameters, but in
current experiments, we use a discrete set of nicknamed objects. These objects
are defined with a per-object constant set of parameters. For easier use, these
are specified here.
The initial version of RGB-objects (named RGB30) was created manually and is not
a part of the current parametric object pipeline.
There is a visualization of the objects and more information can be found here:
https://sites.google.com/corp/google.com/rgb--stacking#h.p_Hbvm_ijsde_K
"""
import collections
import copy
import itertools
from typing import Dict, Tuple
from dm_robotics.manipulation.props.parametric_object import parametric_object
from dm_robotics.manipulation.props.parametric_object.rgb_objects import parametric_rgb_object
# RGB-objects v1.0 are created with 3 deformations of a seed object (a cube with
# a 50mm side): G minor deformation, B average deformation, R major
# deformation. Deformations are chosen by sampling independently 5 parameters of
# the RGB-shapes. We have chosen: 1 - hollowness; 2 - number of sides;
# 3 - shrinking; 4 - not used; 5 - shear; 6 - form factor.
ParametersDict = parametric_object.ParametersDict
RgbVersion = parametric_rgb_object.RgbVersion
# Breaking these over > 100 lines does not help visibility so:
# pylint: disable=line-too-long, bad-whitespace
# pyformat: disable
_OBJECTS_V1_0 = collections.OrderedDict({
"g1": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 15, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50 }),
"b1": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 20, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50 }),
"r1": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 35, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50 }),
"g2": ParametersDict({ "sds": 6, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 46, "scy": 46, "scz": 50 }),
"b2": ParametersDict({ "sds": 8, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 45, "scy": 45, "scz": 50 }),
"r2": ParametersDict({ "sds": 10, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 45, "scy": 45, "scz": 50 }),
"g3": ParametersDict({ "sds": 4, "shr": 25, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 51, "scy": 51, "scz": 60 }),
"b3": ParametersDict({ "sds": 4, "shr": 48, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 46, "scy": 49, "scz": 63 }),
"r3": ParametersDict({ "sds": 4, "shr": 75, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 41, "scy": 49, "scz": 71 }),
"g5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 20, "scx": 50, "scy": 50, "scz": 50 }),
"b5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 31, "scx": 50, "scy": 50, "scz": 50 }),
"r5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 42, "scx": 50, "scy": 50, "scz": 50 }),
"g6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 40, "scy": 56, "scz": 80 }),
"b6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 32, "scy": 48, "scz": 96 }),
"r6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 29, "scy": 29, "scz": 150 })})
# pylint: enable=line-too-long, bad-whitespace
# pyformat: enable
# RGB-objects v1.3 adds two deformation axes to v1.1 (axis 7 and axis 8). The
# axis 6 in v1.1 is the deformation of the form factor by increasing the size
# along the z-axis. With v1.3 we introduce deformations which correspond to
# scaling along the x-axis (axis 7) and along the y-axis (axis 8). Moreover,
# we generate all objects as interpolations from a seed object s and 7 objects
# r2, r3, r5, r6, r7, r8 which are the maximum deformations of the seed
# object along the available axes 2, 3, 5, 6, 7, 8. Finally also, v1.3
# defines the objects Omn with Omn = (Om + On)//2.
# pylint: disable=line-too-long, bad-whitespace
# pyformat: disable
_OBJECTS_V1_3 = collections.OrderedDict({
"s": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 50, "scy": 50, "scz": 50}),
"r2": ParametersDict({ "sds": 10, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 45, "scy": 45, "scz": 50}),
"r3": ParametersDict({ "sds": 4, "shr": 75, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 41, "scy": 49, "scz": 71}),
"r5": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 42, "scx": 50, "scy": 50, "scz": 50}),
"r6": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 29, "scy": 29, "scz": 150}),
"r7": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 29, "scy": 150, "scz": 29}),
"r8": ParametersDict({ "sds": 4, "shr": 0, "drf": 0, "hlw": 0, "shx": 0, "shy": 0, "scx": 150, "scy": 29, "scz": 29})})
_OBJECTS_V1_3_NON_GRASPABLE = ("l36", "m36", "y36", "r36", "r67", "v36")
_OBJECTS_V1_3_NON_UNIQUE = ("f25", "h27", "l27", "m27", "r27", "r68", "u27", "v27", "x27", "y27", "h28", "l28", "m28", "r28", "u28", "v28", "x28", "y28", "r78")
# pylint: enable=line-too-long, bad-whitespace
# pyformat: enable
def parameters_interpolations(
params_dict1: ParametersDict,
params_dict2: ParametersDict,
interpolation_length: int = 1,
interpolation_keys: Tuple[str, ...] = ()) -> collections.OrderedDict:
"""Function to interpolate in between two parametersDict.
This function can be used to interpolate in between parametersDicts. The
function takes as input two parameterDicts. The function interpolates in
between the parametersDicts generating a given number of equally-spaced
samples. By default, only one sample is added, corresponding to an element
in between the two provided parameterDicts (e.g. m = (s+e)/2). Generated
parameterDicts are returned in a collection. By default, associated labels are
m1, m2, ... or otherwise specified by the user through a tuple.
Args:
params_dict1: the first parameterDict.
params_dict2: the second parameterDict.
interpolation_length: the numnber of interpolation samples.
interpolation_keys: the keys used in the resulting collection.
Returns:
the collection of combinations
"""
result_dictionary = collections.OrderedDict({})
# Creating intermediate objects from two adjucent ones.
if not interpolation_keys:
for i in range(0, interpolation_length):
interpolation_length = (*interpolation_length, "m" + str(i))
for i in range(1, interpolation_length+1):
obj_nickname = interpolation_keys[i - 1]
step = i / (interpolation_length + 1)
obj_values = params_dict1 + (params_dict2 - params_dict1) * step
result_dictionary.update({obj_nickname: obj_values})
return result_dictionary
def parameters_numeric_combinations(
params_dict_collection: collections.OrderedDict,
labels_alphabetic_keys: Tuple[str, ...],
labels_numeric_keys: Tuple[str, ...],
combination_length: int = 2) -> collections.OrderedDict:
"""Function to combine collections of parametersDict with alphanumeric keys.
This function can be used to create combinations of parametersDict. The
function takes as input a collection of parameterDicts each labelled with an
alphanumeric string (e.g. e1, e2, e3, g1, g2, g3). The function combines the
parametersDicts taking the set of alpahbetic keys (e.g. {e, g}) and the set of
numeric keys (e.g. {1, 2, 3}). By default, for each alphabetic key all
2-combinations of numeric keys are created using the parameterDicts algebra.
In the example above we have: e12 = (e1 + e2) // 2, e13 = (e1 + e3) // 2,
e23 = (e2 + e3) // 2, g12 = (g1 + g2) // 2, g13 = (g1 + g3) // 2,
g23 = (g2 + g3) // 2. Otherwise, a specific combination length can be
specified. If 3-combination is specified then the following parameterDicts
are created: e123 = (e1 + e2 + e3) // 3 and g123 = (g1 + g2 + g3) // 3.
Args:
params_dict_collection: a collection of parametersDict. The keys associated
to each parametersDict should be alphanumeric.
labels_alphabetic_keys: the alphabetic part of the key labels.
labels_numeric_keys: the numeric part of the key labels.
combination_length: the length of cretated combinations.
Returns:
the collection of combinations
"""
result_dictionary = collections.OrderedDict({})
# Creating intermediate objects from two adjacent ones.
for alpha in labels_alphabetic_keys:
for num in itertools.combinations(labels_numeric_keys, combination_length):
obj_nickname = alpha
obj_nickname = obj_nickname + num[0]
obj_values = params_dict_collection[alpha + num[0]]
for i in range(1, combination_length):
obj_nickname = obj_nickname + num[i]
obj_values = obj_values + params_dict_collection[alpha + num[i]]
obj_values = obj_values // combination_length
result_dictionary.update({obj_nickname: obj_values})
return result_dictionary
def parameters_equispaced_combinations(
params_dict_collection: collections.OrderedDict,
coefficients: Tuple[int, ...]) -> collections.OrderedDict:
"""Function to create equispaced combinations.
This function can be used to create equispaced distributed combinations of
parametersDict. The function takes as input a collection of alphabetically
tagged parameterDicts (e.g. a, .. z). The function combines the given
parametersDicts to create new parametersDicts constructed as a*ca + ..
+ z*cz with ca + .. + cz = 1. The number of generated parametersDicts is
controlled by fixing the valid values for the coefficients cn. The resulting
objects are named aca_..._zcz.
Args:
params_dict_collection: a collection of parametersDict.
coefficients: the valid coefficients (tuple of int) expressed as integer
percentage, (0, 25, 50, 75, 100) corresponds to (0, 0.25, 0.5, 0.75, 1).
Returns:
the collection of combinations
"""
result_dictionary = collections.OrderedDict({})
n = len(params_dict_collection)
# Creating valid combinations
valid_combinations = [
s for s in itertools.product(coefficients, repeat=n) if sum(s) == 100
]
# Creating convex combinations of objects
result_dictionary = collections.OrderedDict({})
for valid_combination in valid_combinations:
obj_nickname = ""
obj_values = None
p = params_dict_collection
for kn, vn, cn in zip(p.keys(), p.values(), valid_combination):
if obj_values is None:
obj_values = vn * cn
if cn != 0:
obj_nickname = str(coefficients.index(cn)) + kn # pytype: disable=attribute-error
else:
obj_values = obj_values + vn * cn
if cn != 0:
obj_nickname = obj_nickname + str(coefficients.index(cn)) + kn
result_dictionary.update({obj_nickname: obj_values//100})
return result_dictionary
class RgbObjectsNames:
"""A class to define the RGB-objects names according to different versions.
Args:
version: string to describe the RGB-objects version.
"""
def __init__(self, version: RgbVersion = RgbVersion.v1_0):
self.__version__ = version
self._nicknames = collections.OrderedDict({})
if version == RgbVersion.v1_0:
self._nicknames.update(_OBJECTS_V1_0)
if version == RgbVersion.v1_3:
self._nicknames = collections.OrderedDict(copy.deepcopy(_OBJECTS_V1_3))
# Adding dn, fn, en, hn, xn, ln, bn, mn, yn, un
# linearly interpolating 10 objects in between "s" and "tn"
for n in ("2", "3", "5", "6", "7", "8"):
self._nicknames.update(parameters_interpolations(
_OBJECTS_V1_3["s"],
_OBJECTS_V1_3["r" + n],
10, ("d"+n, "f"+n, "e"+n, "u"+n, "h"+n,
"x"+n, "l"+n, "v"+n, "m"+n, "y"+n)))
# Updating the seed object name.
self._nicknames["s0"] = self._nicknames.pop("s")
# Creating intermediate Omn = (Om + On)//2.
self._nicknames.update(parameters_numeric_combinations(
self._nicknames,
("d", "f", "e", "h", "x", "l", "m", "y", "r", "u", "v"),
("2", "3", "5", "6", "7", "8"),
2))
# Remove 'd2' object which is identical to the seed 's0', both are cubes.
self._nicknames.pop("d2")
# Remove non-graspable and non-unique
for o in _OBJECTS_V1_3_NON_GRASPABLE + _OBJECTS_V1_3_NON_UNIQUE:
self._nicknames.pop(o, None)
# Add RGB v1.0 objects, except for the hollow ones.
self._nicknames.update(_OBJECTS_V1_0)
for o in ["r1", "b1", "g1"]:
self._nicknames.pop(o, None)
# This is necessary to guarantee one-to-one mapping: parameters <-> shapes
for v in self._nicknames.values():
if (v["shr"], v["drf"], v["hlw"], v["shx"], v["shy"]) == (0,)*5:
ordered_scale = sorted((v["scx"], v["scy"], v["scz"]))
v["scx"] = ordered_scale[0]
v["scy"] = ordered_scale[1]
v["scz"] = ordered_scale[2]
# Look for duplicates, print log-info if found and raise an error.
my_rgb = parametric_rgb_object.RgbObject(version)
uniques, duplicates = set(), set()
self._duplicates_groups = dict()
uniques_dict = {}
for obj_nick, obj_dict in self.nicknames.items():
obj_name = my_rgb.shape.get_name(obj_dict)
if obj_name in uniques:
duplicates.add(obj_nick)
self._duplicates_groups[
obj_name] = self._duplicates_groups[obj_name] + (obj_nick,)
else:
uniques.add(obj_name)
self._duplicates_groups.update({obj_name: (obj_nick,)})
uniques_dict.update({obj_nick: obj_name})
if duplicates:
for o in duplicates:
self._nicknames.pop(o, None)
@property
def nicknames(self) -> Dict[str, ParametersDict]:
# Dictionary of creation parameters sorted by object names.
return collections.OrderedDict(sorted(self._nicknames.items()))
@property
def duplicates(self) -> Dict[str, Tuple[str]]:
# Dictionary of object names and associated nicknames.
return self._duplicates_groups
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbmonbindings(base_resource) :
""" Configuration for monitro bindings resource. """
def __init__(self) :
self._monitorname = ""
self._type = ""
self._state = ""
self.___count = 0
@property
def monitorname(self) :
ur"""The name of the monitor.<br/>Minimum length = 1.
"""
try :
return self._monitorname
except Exception as e:
raise e
@monitorname.setter
def monitorname(self, monitorname) :
ur"""The name of the monitor.<br/>Minimum length = 1
"""
try :
self._monitorname = monitorname
except Exception as e:
raise e
@property
def type(self) :
ur"""The type of monitor.<br/>Possible values = PING, TCP, HTTP, TCP-ECV, HTTP-ECV, UDP-ECV, DNS, FTP, LDNS-PING, LDNS-TCP, LDNS-DNS, RADIUS, USER, HTTP-INLINE, SIP-UDP, LOAD, FTP-EXTENDED, SMTP, SNMP, NNTP, MYSQL, MYSQL-ECV, MSSQL-ECV, ORACLE-ECV, LDAP, POP3, CITRIX-XML-SERVICE, CITRIX-WEB-INTERFACE, DNS-TCP, RTSP, ARP, CITRIX-AG, CITRIX-AAC-LOGINPAGE, CITRIX-AAC-LAS, CITRIX-XD-DDC, ND6, CITRIX-WI-EXTENDED, DIAMETER, RADIUS_ACCOUNTING, STOREFRONT, APPC, CITRIX-XNC-ECV, CITRIX-XDM.
"""
try :
return self._type
except Exception as e:
raise e
@property
def state(self) :
ur"""The state of the monitor.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbmonbindings_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbmonbindings
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.monitorname is not None :
return str(self.monitorname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the lbmonbindings resources that are configured on netscaler.
"""
try :
if type(name) != cls :
if type(name) is not list :
obj = lbmonbindings()
obj.monitorname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [lbmonbindings() for _ in range(len(name))]
obj = [lbmonbindings() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = lbmonbindings()
obj[i].monitorname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_, obj) :
ur""" Use this API to fetch filtered set of lbmonbindings resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client, obj) :
ur""" Use this API to count the lbmonbindings resources configured on NetScaler.
"""
try :
option_ = options()
option_.count = True
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_, obj) :
ur""" Use this API to count filtered the set of lbmonbindings resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.count = True
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
PING = "PING"
TCP = "TCP"
HTTP = "HTTP"
TCP_ECV = "TCP-ECV"
HTTP_ECV = "HTTP-ECV"
UDP_ECV = "UDP-ECV"
DNS = "DNS"
FTP = "FTP"
LDNS_PING = "LDNS-PING"
LDNS_TCP = "LDNS-TCP"
LDNS_DNS = "LDNS-DNS"
RADIUS = "RADIUS"
USER = "USER"
HTTP_INLINE = "HTTP-INLINE"
SIP_UDP = "SIP-UDP"
LOAD = "LOAD"
FTP_EXTENDED = "FTP-EXTENDED"
SMTP = "SMTP"
SNMP = "SNMP"
NNTP = "NNTP"
MYSQL = "MYSQL"
MYSQL_ECV = "MYSQL-ECV"
MSSQL_ECV = "MSSQL-ECV"
ORACLE_ECV = "ORACLE-ECV"
LDAP = "LDAP"
POP3 = "POP3"
CITRIX_XML_SERVICE = "CITRIX-XML-SERVICE"
CITRIX_WEB_INTERFACE = "CITRIX-WEB-INTERFACE"
DNS_TCP = "DNS-TCP"
RTSP = "RTSP"
ARP = "ARP"
CITRIX_AG = "CITRIX-AG"
CITRIX_AAC_LOGINPAGE = "CITRIX-AAC-LOGINPAGE"
CITRIX_AAC_LAS = "CITRIX-AAC-LAS"
CITRIX_XD_DDC = "CITRIX-XD-DDC"
ND6 = "ND6"
CITRIX_WI_EXTENDED = "CITRIX-WI-EXTENDED"
DIAMETER = "DIAMETER"
RADIUS_ACCOUNTING = "RADIUS_ACCOUNTING"
STOREFRONT = "STOREFRONT"
APPC = "APPC"
CITRIX_XNC_ECV = "CITRIX-XNC-ECV"
CITRIX_XDM = "CITRIX-XDM"
class lbmonbindings_response(base_response) :
def __init__(self, length=1) :
self.lbmonbindings = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbmonbindings = [lbmonbindings() for _ in range(length)]
|
|
from common_fixtures import * # NOQA
def test_host_deactivate(super_client, new_context):
host = new_context.host
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = super_client.wait_success(host.deactivate())
assert host.state == 'inactive'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
def test_host_deactivate_two_hosts(super_client, new_context):
host = new_context.host
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
# Create another host using the same agent
other_host = super_client.create_host(agentId=agent.id)
other_host = super_client.wait_success(other_host)
assert other_host.state == 'active'
assert other_host.agentId == agent.id
host = super_client.wait_success(host.deactivate())
assert host.state == 'inactive'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
def test_host_activate(super_client, new_context):
host = new_context.host
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = super_client.wait_success(host.deactivate())
assert host.state == 'inactive'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = super_client.wait_success(host.activate())
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
def test_host_purge(super_client, new_context):
account_id = new_context.project.id
image_uuid = 'sim:{}'.format(random_num())
host = new_context.host
phy_host = super_client.reload(host).physicalHost()
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
c1 = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
requestedHostId=host.id)
c1 = super_client.wait_success(c1)
assert c1.state == 'running'
c2 = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
requestedHostId=host.id)
c2 = super_client.wait_success(c2)
assert c2.state == 'running'
host = super_client.wait_success(host.deactivate())
host = super_client.wait_success(super_client.delete(host))
assert host.state == 'removed'
assert host.removed is not None
agent = super_client.wait_success(host.agent())
wait_for(lambda: super_client.reload(agent).state == 'removed')
host = super_client.wait_success(host.purge())
assert host.state == 'purged'
phy_host = super_client.wait_success(phy_host)
assert phy_host.state == 'removed'
c1 = super_client.wait_success(c1)
assert c1.removed is not None
assert c1.state == 'removed'
c2 = super_client.wait_success(c2)
assert c2.removed is not None
assert c2.state == 'removed'
c1 = super_client.wait_success(c1.purge())
assert c1.state == 'purged'
volume = super_client.wait_success(c1.volumes()[0])
assert volume.state == 'removed'
volume = super_client.wait_success(volume.purge())
assert volume.state == 'purged'
def test_host_container_actions_inactive(new_context):
host = new_context.host
client = new_context.client
c = new_context.create_container()
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
c = client.wait_success(c.stop())
assert c.state == 'stopped'
c = client.wait_success(c.start())
assert c.state == 'running'
def test_host_create_container_inactive(new_context):
client = new_context.client
host = new_context.host
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
c = new_context.create_container_no_success()
assert c.transitioning == 'error'
def test_host_create_container_requested_inactive(super_client, new_context):
client = new_context.client
host = new_context.host
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
c = new_context.create_container_no_success(requestedHostId=host.id)
wait_for(lambda: super_client.reload(c).transitioning == 'error')
def test_host_agent_state(super_client, new_context):
agent = super_client.reload(new_context.host).agent()
agent = super_client.wait_success(agent)
assert agent.state == 'active'
agent = super_client.wait_success(agent.deactivate())
host = new_context.client.reload(new_context.host)
assert host.state == 'active'
assert agent.state == 'inactive'
assert agent.state == host.agentState
agent = super_client.wait_success(agent.activate())
host = new_context.client.reload(new_context.host)
assert host.state == 'active'
assert agent.state == 'active'
assert agent.state == host.agentState
def test_host_remove(super_client, new_context):
client = new_context.client
container = new_context.create_container()
host = super_client.reload(new_context.host)
pool = find_one(host.storagePools)
agent = host.agent()
agent_account = agent.account()
phy_host = host.physicalHost()
key = find_one(super_client.list_register, key=agent.data.registrationKey)
instances = host.instances()
assert len(instances) == 2
assert container.state == 'running'
assert host.state == 'active'
assert pool.state == 'active'
assert agent.state == 'active'
assert agent_account.state == 'active'
assert phy_host.state == 'active'
assert key.state == 'active'
assert key.secretKey is not None
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
host = client.wait_success(client.delete(host))
assert host.state == 'removed'
agent = super_client.wait_success(agent)
wait_for(lambda: super_client.reload(agent).state == 'removed')
pool = super_client.wait_success(pool)
assert pool.state == 'removed'
phy_host = super_client.wait_success(phy_host)
assert phy_host.state == 'removed'
key = super_client.wait_success(key)
assert key.state == 'removed'
agent_account = super_client.wait_success(agent_account)
assert agent_account.state == 'removed'
container = super_client.wait_success(container)
assert container.state == 'removed'
for c in instances:
c = super_client.wait_success(c)
assert c.state == 'removed'
def test_host_dockersocket(context, client):
host = client.reload(context.host)
dockersocket = host.dockersocket()
assert dockersocket.token.index('.') > 0
assert '/v1/dockersocket/' in dockersocket.url
def test_host_dockersocket_inactive(context, client):
host = client.wait_success(context.host.deactivate())
dockersocket = host.dockersocket()
assert dockersocket.token.index('.') > 0
assert '/v1/dockersocket/' in dockersocket.url
|
|
import driver
import itertools
import math
import os.path
import json
import time
class Display(object):
def __init__(self, width=112, height=15):
self.width = width
self.height = height
self.buffer = [0] * height
for i in range(height):
self.buffer[i] = [0] * width;
def clear(self):
for r in range(self.height):
for c in range(self.width):
self.buffer[r][c] = 0
def draw(self, serial_driver):
serial_driver.draw(itertools.chain.from_iterable(self.buffer))
def __str__(self):
l = [0] * self.height
for r in range(self.height):
l[r] = ''.join('#' if i else ' ' for i in self.buffer[r]) + '\n'
return ''.join(l)
class Sprite(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.visible = 1
def draw(self, display):
pass
class DisplayBox(Sprite):
def __init__(self, *args, width=112, height=15, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.height = height
self.buffer = [0] * height
self.sprites = set()
for i in range(height):
self.buffer[i] = [0] * width
def add(self, sprite):
self.sprites.add(sprite)
def clear_sprites(self):
self.sprites.clear()
def clear(self):
for r in range(self.height):
for c in range(self.width):
self.buffer[r][c] = 0
def draw(self, display):
if not self.visible:
return
self.clear()
for sprite in self.sprites:
sprite.draw(self)
for row in range(0, self.height):
for col in range(0, self.width):
# It must be within both the actual and the virtual display
# bounds in order to be drawn.
if (0 <= row < self.height) \
and (0 <= self.y + row < display.height) \
and (0 <= col < self.width) \
and (0 <= self.x + col < display.width):
display.buffer[self.y + row][self.x + col] = self.buffer[row][col]
class Rectangle(Sprite):
def __init__(self, width, height, *args, wrapx=False, wrapy=False, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.height = height
self.wrapx = wrapx
self.wrapy = wrapy
def draw(self, display):
if not self.visible:
return
for r in range(round(self.y), round(self.y + self.height)):
for c in range(round(self.x), round(self.x + self.width)):
if (0 <= r < display.height or self.wrapy) and (0 <= c < display.width or self.wrapx):
display.buffer[r % display.height][c % display.width] = 1
class Circle(Sprite):
def __init__(self, radius, *args, **kwargs):
super().__init__(*args, **kwargs)
self.radius = radius
def draw(self, display):
if not self.visible:
return
for r in range(
max(int(self.y - self.radius + 0.5), 0),
min(int(self.y + self.radius + 0.5) + 1, display.height)):
for c in range(
max(int(self.x - self.radius + 0.5), 0),
min(int(self.x + self.radius + 0.5) + 1, display.width)):
dx = c - self.x
dy = r - self.y
if math.sqrt(dx*dx + dy*dy) < self.radius:
display.buffer[r][c] = 1
class CharacterSprite(Sprite):
class FontError(Exception): pass
class FontNotImplementedError(FontError): pass
fontspecs = {}
def __init__(self, letter, *args, width=4, height=4, **kwargs):
super().__init__(*args, **kwargs)
self.letter = letter
self.width = width
self.height = height
# If not loaded, load the appropriate font size from a file.
dimensionstr = "%dx%d" % (width, height)
if (dimensionstr not in type(self).fontspecs):
try:
with open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), "font",
"%s.json" % dimensionstr), 'r') as f:
type(self).fontspecs[dimensionstr] = json.load(f)
except FileNotFoundError as e:
raise type(self).FontNotImplementedError(dimensionstr)
# except ValueError as e:
# raise type(self).FontError(str(e))
self.fontspec = type(self).fontspecs[dimensionstr]
def draw(self, display):
if not self.visible:
return
# Get the letter, or a block if not available
# XXX: document the uppercase more explicitly
tfmatrix = self.fontspec.get(self.letter.upper(),
[[1]*self.width]*self.height)
for rownum, row in enumerate(tfmatrix):
for colnum, pixel in enumerate(row):
if (0 <= rownum + self.y < display.height) and (0 <= colnum +
self.x < display.width):
display.buffer[rownum + self.y][colnum + self.x] = pixel
class TextSprite(Sprite):
def __init__(self, text, *args, width=4, height=4, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.height = height
self.set_text(text)
def set_text(self, text):
self.text = text.upper()
self.sprites = [CharacterSprite(c, y=self.y, width=self.width, height=self.height) for c in self.text]
def size(self):
return (self.width + 1) * len(self.sprites)
def draw(self, display):
if not self.visible:
return
for i, x in enumerate(range(
int(self.x),
int(self.x + (self.width + 1) * len(self.sprites)), self.width + 1)):
self.sprites[i].x = x
self.sprites[i].y = self.y
for sprite in self.sprites:
sprite.draw(display)
class Animator(Sprite):
def __init__(self, targets, attr="x", max=112, min=0,
step=1, delay=1, pause=0, loop=False, reverse=False):
self.targets = [targets] if isinstance(targets, Sprite) else targets
self.step = step
self.pause = pause
self.attr = attr
self.min = min
self.max = max
self.loop = loop
self.reverse = reverse
self.delay = delay
self.next_animate = time.time() + delay
def draw(self, display):
for s in self.targets:
if time.time() >= self.next_animate:
self.next_animate = time.time() + self.delay
setattr(s, self.attr, getattr(s, self.attr) + self.step)
if getattr(s, self.attr) > self.max:
if self.loop:
# simulate it wrapping around to the next location
setattr(s, self.attr, self.min + (getattr(s, self.attr) - self.max))
elif self.reverse:
# simulate it hitting the end and bouncing back
self.step = -self.step
setattr(s, self.attr, self.max - (getattr(s, self.attr) - self.max))
if self.pause:
self.next_animate += self.pause
elif getattr(s, self.attr) < self.min:
if self.loop:
setattr(s, self.attr, self.max - (self.min - getattr(s, self.attr)))
elif self.reverse:
self.step = -self.step
setattr(s, self.attr, self.min + (self.min - getattr(s, self.attr)))
if self.pause:
self.next_animate += self.pause
if __name__ == '__main__':
disp = Display()
circ = Circle(4, 15/2+1, 15/2)
rect = Rectangle(3, 4, 25, 5)
wrect = Rectangle(4, 4, 0, -1, wrapx=True, wrapy=True)
charH = CharacterSprite("H", x=31, y=5)
charI = CharacterSprite("I", x=36, y=5)
dispbox = DisplayBox(x=20, y=12, width=5, height=5)
rectbox = Rectangle(10, 10, 0, 0)
dispbox.add(rectbox)
world = TextSprite("World", x=41, y=5)
circ.draw(disp)
rect.draw(disp)
wrect.draw(disp)
charH.draw(disp)
charI.draw(disp)
world.draw(disp)
dispbox.draw(disp)
print(disp)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MoveCollectionsOperations:
"""MoveCollectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~region_move_service_api.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.MoveCollection"] = None,
**kwargs
) -> "models.MoveCollection":
"""Creates or updates a move collection.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:param body:
:type body: ~region_move_service_api.models.MoveCollection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MoveCollection, or the result of cls(response)
:rtype: ~region_move_service_api.models.MoveCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MoveCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'MoveCollection')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MoveCollection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('MoveCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}'} # type: ignore
async def update(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.UpdateMoveCollectionRequest"] = None,
**kwargs
) -> "models.MoveCollection":
"""Updates a move collection.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:param body:
:type body: ~region_move_service_api.models.UpdateMoveCollectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MoveCollection, or the result of cls(response)
:rtype: ~region_move_service_api.models.MoveCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MoveCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'UpdateMoveCollectionRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MoveCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
move_collection_name: str,
**kwargs
) -> Optional["models.OperationStatus"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
move_collection_name: str,
**kwargs
) -> AsyncLROPoller["models.OperationStatus"]:
"""Deletes a move collection.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~region_move_service_api.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
move_collection_name=move_collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
move_collection_name: str,
**kwargs
) -> "models.MoveCollection":
"""Gets the move collection.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MoveCollection, or the result of cls(response)
:rtype: ~region_move_service_api.models.MoveCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MoveCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MoveCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}'} # type: ignore
async def _prepare_initial(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.PrepareRequest"] = None,
**kwargs
) -> Optional["models.OperationStatus"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._prepare_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'PrepareRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_prepare_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/prepare'} # type: ignore
async def begin_prepare(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.PrepareRequest"] = None,
**kwargs
) -> AsyncLROPoller["models.OperationStatus"]:
"""Initiates prepare for the set of resources included in the request body. The prepare operation
is on the moveResources that are in the moveState 'PreparePending' or 'PrepareFailed', on a
successful completion the moveResource moveState do a transition to MovePending. To aid the
user to prerequisite the operation the client can call operation with validateOnly property set
to true.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:param body:
:type body: ~region_move_service_api.models.PrepareRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~region_move_service_api.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._prepare_initial(
resource_group_name=resource_group_name,
move_collection_name=move_collection_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_prepare.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/prepare'} # type: ignore
async def _initiate_move_initial(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.ResourceMoveRequest"] = None,
**kwargs
) -> Optional["models.OperationStatus"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._initiate_move_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ResourceMoveRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_initiate_move_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/initiateMove'} # type: ignore
async def begin_initiate_move(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.ResourceMoveRequest"] = None,
**kwargs
) -> AsyncLROPoller["models.OperationStatus"]:
"""Moves the set of resources included in the request body. The move operation is triggered after
the moveResources are in the moveState 'MovePending' or 'MoveFailed', on a successful
completion the moveResource moveState do a transition to CommitPending. To aid the user to
prerequisite the operation the client can call operation with validateOnly property set to
true.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:param body:
:type body: ~region_move_service_api.models.ResourceMoveRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~region_move_service_api.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._initiate_move_initial(
resource_group_name=resource_group_name,
move_collection_name=move_collection_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_initiate_move.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/initiateMove'} # type: ignore
async def _commit_initial(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.CommitRequest"] = None,
**kwargs
) -> Optional["models.OperationStatus"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._commit_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'CommitRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_commit_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/commit'} # type: ignore
async def begin_commit(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.CommitRequest"] = None,
**kwargs
) -> AsyncLROPoller["models.OperationStatus"]:
"""Commits the set of resources included in the request body. The commit operation is triggered on
the moveResources in the moveState 'CommitPending' or 'CommitFailed', on a successful
completion the moveResource moveState do a transition to Committed. To aid the user to
prerequisite the operation the client can call operation with validateOnly property set to
true.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:param body:
:type body: ~region_move_service_api.models.CommitRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~region_move_service_api.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._commit_initial(
resource_group_name=resource_group_name,
move_collection_name=move_collection_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_commit.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/commit'} # type: ignore
async def _discard_initial(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.DiscardRequest"] = None,
**kwargs
) -> Optional["models.OperationStatus"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._discard_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'DiscardRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_discard_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/discard'} # type: ignore
async def begin_discard(
self,
resource_group_name: str,
move_collection_name: str,
body: Optional["models.DiscardRequest"] = None,
**kwargs
) -> AsyncLROPoller["models.OperationStatus"]:
"""Discards the set of resources included in the request body. The discard operation is triggered
on the moveResources in the moveState 'CommitPending' or 'DiscardFailed', on a successful
completion the moveResource moveState do a transition to MovePending. To aid the user to
prerequisite the operation the client can call operation with validateOnly property set to
true.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:param body:
:type body: ~region_move_service_api.models.DiscardRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~region_move_service_api.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._discard_initial(
resource_group_name=resource_group_name,
move_collection_name=move_collection_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_discard.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/discard'} # type: ignore
async def _resolve_dependencies_initial(
self,
resource_group_name: str,
move_collection_name: str,
**kwargs
) -> Optional["models.OperationStatus"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
# Construct URL
url = self._resolve_dependencies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'moveCollectionName': self._serialize.url("move_collection_name", move_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_resolve_dependencies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/resolveDependencies'} # type: ignore
async def begin_resolve_dependencies(
self,
resource_group_name: str,
move_collection_name: str,
**kwargs
) -> AsyncLROPoller["models.OperationStatus"]:
"""Computes, resolves and validate the dependencies of the moveResources in the move collection.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param move_collection_name: The Move Collection Name.
:type move_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~region_move_service_api.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._resolve_dependencies_initial(
resource_group_name=resource_group_name,
move_collection_name=move_collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resolve_dependencies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections/{moveCollectionName}/resolveDependencies'} # type: ignore
def list_move_collections_by_subscription(
self,
**kwargs
) -> AsyncIterable["models.MoveCollectionResultList"]:
"""Get all Move Collections.
Get all the Move Collections in the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MoveCollectionResultList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~region_move_service_api.models.MoveCollectionResultList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MoveCollectionResultList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_move_collections_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MoveCollectionResultList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_move_collections_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Migrate/moveCollections'} # type: ignore
def list_move_collections_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.MoveCollectionResultList"]:
"""Get all Move Collections.
Get all the Move Collections in the resource group.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MoveCollectionResultList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~region_move_service_api.models.MoveCollectionResultList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MoveCollectionResultList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_move_collections_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MoveCollectionResultList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_move_collections_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/moveCollections'} # type: ignore
|
|
"""
Encapsulate the methodology to process a segment from the moves-api.
Builds rdf triples based on:
http://motools.sourceforge.net/event/event.html
"""
import logging
from rhobot.components.storage import StoragePayload
from move_bot.components.update_service.interval_handler import IntervalHandler
from move_bot.components.update_service.location_handler import LocationHandler
from move_bot.components.namespace import EVENT, MOVES_SEGMENT
from rdflib.namespace import RDFS, DC, DCTERMS
logger = logging.getLogger(__name__)
class ProcessSegment:
"""
Callable that encapsulates the work that needs to be done to insert an event into the data store inside a promise.
These steps are:
If there is an event that already exists that needs to be updated.
Update the contents of that event.
Else:
Create the new event.
"""
def __init__(self, segment, owner, xmpp):
"""
Construct the callable.
:param segment: segment to process.
:param owner: owner of the installation
:param xmpp: bot details
"""
self._segment = segment
self._scheduler = xmpp['rho_bot_scheduler']
self._storage_client = xmpp['rho_bot_storage_client']
self._promise = None
self._publisher = xmpp['rho_bot_rdf_publish']
self._representation_manager = xmpp['rho_bot_representation_manager']
self._owner = owner
self._node_id = None
self.xmpp = xmpp
self.interval_handler = IntervalHandler(xmpp)
self.location_handler = LocationHandler(xmpp, owner)
def __call__(self, *args):
"""
Executable method for the instance. This will look up to see if the object needs to be updated or created, then
instantiate the correct promise chain which will accomplish the task.
:param args:
:return:
"""
logger.info('Processing segment: %s' % self._segment)
self._promise = self._scheduler.promise()
# Check in the database to see if there is anything that currently has the segment defined in it
payload = StoragePayload()
payload.add_type(EVENT.Event)
payload.add_property(RDFS.seeAlso, MOVES_SEGMENT[self._segment['startTime']])
self._storage_client.find_nodes(payload).then(self._handle_find_result)
return self._promise
def _finish_process(self, session=None):
"""
Common exit point for the promise chain.
:param session:
:return:
"""
self._promise.resolved(session)
return None
def _handle_find_result(self, result):
if result.results:
self._node_id = result.results[0].about
update_promise = self._scheduler.defer(self.start_session).then(self._find_place)
update_promise = update_promise.then(self._get_interval).then(self._update_node)
update_promise.then(self._finish_process, lambda s: self._promise.rejected(s))
return update_promise
else:
create_promise = self._scheduler.defer(self.start_session).then(self._find_place)
create_promise = create_promise.then(self._create_interval).then(self._create_node)
create_promise.then(self._finish_process, lambda s: self._promise.rejected(s))
return create_promise
def start_session(self):
return dict()
def _find_place(self, session):
"""
Find the place associated with the segment.
:param session:
:return:
"""
logger.debug('Finding place: %s' % session)
location_promise = self.location_handler(self._segment['place']).then(
self._scheduler.generate_promise_handler(self._update_session, session, 'location'))
return location_promise
def _get_interval(self, session):
"""
Get the event node to be updated, then update the interval object, and put the result into the session value.
:param session: session variable.
:return:
"""
logger.debug('Get Interval: %s' % session)
def update_interval(result):
interval_reference = result.references.get(str(EVENT.time), None)
if interval_reference:
interval_reference = interval_reference[0]
interval_promise = self.interval_handler(interval_reference,
self._segment['startTime'],
self._segment['endTime'])
interval_promise = interval_promise.then(
self._scheduler.generate_promise_handler(self._update_session, session, 'interval'))
return interval_promise
payload = StoragePayload()
payload.about = self._node_id
promise = self._storage_client.get_node(payload).then(update_interval)
return promise
def _create_interval(self, session):
"""
Create a new interval and add it to the session variable.
:param session:
:return:
"""
logger.debug('Create Interval: %s' % session)
interval_promise = self.interval_handler(None, self._segment['startTime'], self._segment['endTime'])
interval_promise = interval_promise.then(
self._scheduler.generate_promise_handler(self._update_session, session, 'interval'))
return interval_promise
@staticmethod
def _update_session(interval_result, session, key):
"""
Process the results of the creation.
"""
session[key] = interval_result
return session
def _create_node(self, session):
"""
Create a new node based and add additional properties based on the session.
:param session:
:return:
"""
logger.debug('Creating Node')
payload = self._convert_segment_to_payload(session)
# Only set the title when first creating it. The update might override a field that has been changed by the
# user.
place_name = self._segment['place'].get('name', 'Unknown')
payload.add_property(key=DC.title, value=place_name)
promise = self._storage_client.create_node(payload).then(
self._scheduler.generate_promise_handler(self._publish_modifications, created=True)).then(
lambda s: s.results[0].about)
return promise
def _update_node(self, session):
"""
Method to be used in a deferred that will update the node responsible for execution.
:return:
"""
logger.info('Updating Node')
payload = self._convert_segment_to_payload(session)
# Update that about field so that the node can be updated.
payload.about = self._node_id
promise = self._storage_client.update_node(payload).then(
self._scheduler.generate_promise_handler(self._publish_modifications, created=False)).then(
lambda s: s.results[0].about)
return promise
def _convert_segment_to_payload(self, session):
"""
Convert the segment details into a payload object.
:return:
"""
payload = StoragePayload()
payload.add_type(EVENT.Event)
payload.add_reference(key=EVENT.agent, value=self._owner)
payload.add_reference(key=DCTERMS.creator, value=self._representation_manager.representation_uri)
payload.add_property(RDFS.seeAlso, MOVES_SEGMENT[self._segment['startTime']])
if session['location']:
payload.add_reference(key=EVENT.place, value=session['location'][0])
if session['interval']:
payload.add_reference(key=EVENT.time, value=session['interval'][0])
return payload
def _publish_modifications(self, result, created=True):
self._publisher.publish_all_results(result, created=created)
return result
|
|
""".. Ignore pydocstyle D400.
==================
Register Processes
==================
"""
import os
import re
import jsonschema
import yaml
from versionfield.utils import convert_version_string_to_int
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from django.db.models import Max, Q
from resolwe.flow.engine import InvalidEngineError
from resolwe.flow.finders import get_finders
from resolwe.flow.managers import manager
from resolwe.flow.models import DescriptorSchema, Process
from resolwe.flow.models.base import VERSION_NUMBER_BITS
from resolwe.flow.models.utils import validate_schema, validation_schema
from resolwe.flow.utils import dict_dot, iterate_schema
from resolwe.permissions.utils import assign_contributor_permissions, copy_permissions
PROCESSOR_SCHEMA = validation_schema("processor")
DESCRIPTOR_SCHEMA = validation_schema("descriptor")
SCHEMA_TYPE_DESCRIPTOR = "descriptor"
SCHEMA_TYPE_PROCESS = "process"
class Command(BaseCommand):
"""Register processes."""
help = "Register processes"
def add_arguments(self, parser):
"""Command arguments."""
parser.add_argument(
"-f",
"--force",
action="store_true",
help="register also if version mismatch",
)
parser.add_argument(
"--retire",
default=False,
action="store_true",
help="retire obsolete processes",
)
def valid(self, instance, schema):
"""Validate schema."""
try:
jsonschema.validate(instance, schema)
except jsonschema.exceptions.ValidationError as ex:
self.stderr.write(
" VALIDATION ERROR: {}".format(
instance["name"] if "name" in instance else ""
)
)
self.stderr.write(" path: {}".format(ex.path))
self.stderr.write(" message: {}".format(ex.message))
self.stderr.write(" validator: {}".format(ex.validator))
self.stderr.write(" val. value: {}".format(ex.validator_value))
return False
try:
# Check that default values fit field schema.
for field in ["input", "output", "schema"]:
for schema, _, path in iterate_schema({}, instance.get(field, {})):
if "default" in schema:
validate_schema({schema["name"]: schema["default"]}, [schema])
except ValidationError:
self.stderr.write(" VALIDATION ERROR: {}".format(instance["name"]))
self.stderr.write(
" Default value of field '{}' is not valid.".format(path)
)
return False
return True
def find_descriptor_schemas(self, schema_file):
"""Find descriptor schemas in given path."""
if not schema_file.lower().endswith((".yml", ".yaml")):
return []
with open(schema_file) as fn:
schemas = yaml.load(fn, Loader=yaml.FullLoader)
if not schemas:
self.stderr.write("Could not read YAML file {}".format(schema_file))
return []
descriptor_schemas = []
for schema in schemas:
if "schema" not in schema:
continue
descriptor_schemas.append(schema)
return descriptor_schemas
def find_schemas(self, schema_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=1):
"""Find schemas in packages that match filters."""
schema_matches = []
if not os.path.isdir(schema_path):
if verbosity > 0:
self.stdout.write("Invalid path {}".format(schema_path))
return
if schema_type not in [SCHEMA_TYPE_PROCESS, SCHEMA_TYPE_DESCRIPTOR]:
raise ValueError("Invalid schema type")
for root, _, files in os.walk(schema_path):
for schema_file in [os.path.join(root, fn) for fn in files]:
schemas = None
if schema_type == SCHEMA_TYPE_DESCRIPTOR:
# Discover descriptors.
schemas = self.find_descriptor_schemas(schema_file)
elif schema_type == SCHEMA_TYPE_PROCESS:
# Perform process discovery for all supported execution engines.
schemas = []
for execution_engine in manager.execution_engines.values():
schemas.extend(execution_engine.discover_process(schema_file))
for schema in schemas:
schema_matches.append(schema)
return schema_matches
def register_processes(self, process_schemas, user, force=False, verbosity=1):
"""Read and register processors."""
log_processors = []
log_templates = []
for p in process_schemas:
# TODO: Remove this when all processes are migrated to the
# new syntax.
if "flow_collection" in p:
if "entity" in p:
self.stderr.write(
"Skip processor {}: only one of 'flow_collection' and 'entity' fields "
"allowed".format(p["slug"])
)
continue
p["entity"] = {"type": p.pop("flow_collection")}
if p["type"][-1] != ":":
p["type"] += ":"
if "category" in p and not p["category"].endswith(":"):
p["category"] += ":"
for field in ["input", "output"]:
for schema, _, _ in iterate_schema({}, p[field] if field in p else {}):
if not schema["type"][-1].endswith(":"):
schema["type"] += ":"
# TODO: Check if schemas validate with our JSON meta schema and Processor model docs.
if not self.valid(p, PROCESSOR_SCHEMA):
continue
if "entity" in p:
if "type" not in p["entity"]:
self.stderr.write(
"Skip process {}: 'entity.type' required if 'entity' defined".format(
p["slug"]
)
)
continue
if "input" in p["entity"] and p["entity"].get("always_create", False):
self.stderr.write(
"Skip process {}: 'entity.input' will not be considered if 'entity.always_create' "
"is set to true.".format(p["slug"])
)
continue
p["entity_type"] = p["entity"]["type"]
p["entity_descriptor_schema"] = p["entity"].get(
"descriptor_schema", p["entity_type"]
)
p["entity_input"] = p["entity"].get("input", None)
p["entity_always_create"] = p["entity"].get("always_create", False)
p.pop("entity")
if not DescriptorSchema.objects.filter(
slug=p["entity_descriptor_schema"]
).exists():
self.stderr.write(
"Skip processor {}: Unknown descriptor schema '{}' used in 'entity' "
"field.".format(p["slug"], p["entity_descriptor_schema"])
)
continue
if "persistence" in p:
persistence_mapping = {
"RAW": Process.PERSISTENCE_RAW,
"CACHED": Process.PERSISTENCE_CACHED,
"TEMP": Process.PERSISTENCE_TEMP,
}
p["persistence"] = persistence_mapping[p["persistence"]]
if "scheduling_class" in p:
scheduling_class_mapping = {
"interactive": Process.SCHEDULING_CLASS_INTERACTIVE,
"batch": Process.SCHEDULING_CLASS_BATCH,
}
p["scheduling_class"] = scheduling_class_mapping[p["scheduling_class"]]
if "input" in p:
p["input_schema"] = p.pop("input")
if "output" in p:
p["output_schema"] = p.pop("output")
slug = p["slug"]
if "run" in p:
# Set default language to 'bash' if not set.
p["run"].setdefault("language", "bash")
# Transform output schema using the execution engine.
try:
execution_engine = manager.get_execution_engine(
p["run"]["language"]
)
extra_output_schema = execution_engine.get_output_schema(p)
if extra_output_schema:
p.setdefault("output_schema", []).extend(extra_output_schema)
except InvalidEngineError:
self.stderr.write(
"Skip processor {}: execution engine '{}' not supported".format(
slug, p["run"]["language"]
)
)
continue
# Validate if container image is allowed based on the configured pattern.
# NOTE: This validation happens here and is not deferred to executors because the idea
# is that this will be moved to a "container" requirement independent of the
# executor.
if hasattr(settings, "FLOW_CONTAINER_VALIDATE_IMAGE"):
try:
container_image = dict_dot(p, "requirements.executor.docker.image")
if not re.match(
settings.FLOW_CONTAINER_VALIDATE_IMAGE, container_image
):
self.stderr.write(
"Skip processor {}: container image does not match '{}'".format(
slug, settings.FLOW_CONTAINER_VALIDATE_IMAGE
)
)
continue
except KeyError:
pass
version = p["version"]
int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS)
# `latest version` is returned as `int` so it has to be compared to `int_version`
latest_version = Process.objects.filter(slug=slug).aggregate(
Max("version")
)["version__max"]
if latest_version is not None and latest_version > int_version:
self.stderr.write(
"Skip processor {}: newer version installed".format(slug)
)
continue
previous_process_qs = Process.objects.filter(slug=slug)
if previous_process_qs.exists():
previous_process = previous_process_qs.latest()
else:
previous_process = None
process_query = Process.objects.filter(slug=slug, version=version)
if process_query.exists():
if not force:
if verbosity > 0:
self.stdout.write(
"Skip processor {}: same version installed".format(slug)
)
continue
process_query.update(**p)
log_processors.append("Updated {}".format(slug))
else:
process = Process.objects.create(contributor=user, **p)
assign_contributor_permissions(process)
if previous_process:
copy_permissions(previous_process, process)
log_processors.append("Inserted {}".format(slug))
if verbosity > 0:
if log_processors:
self.stdout.write("Processor Updates:")
for log in log_processors:
self.stdout.write(" {}".format(log))
if log_templates:
self.stdout.write("Default Template Updates:")
for log in log_templates:
self.stdout.write(" {}".format(log))
def register_descriptors(self, descriptor_schemas, user, force=False, verbosity=1):
"""Read and register descriptors."""
log_descriptors = []
for descriptor_schema in descriptor_schemas:
for schema, _, _ in iterate_schema({}, descriptor_schema.get("schema", {})):
if not schema["type"][-1].endswith(":"):
schema["type"] += ":"
if "schema" not in descriptor_schema:
descriptor_schema["schema"] = []
if not self.valid(descriptor_schema, DESCRIPTOR_SCHEMA):
continue
slug = descriptor_schema["slug"]
version = descriptor_schema.get("version", "0.0.0")
int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS)
# `latest version` is returned as `int` so it has to be compared to `int_version`
latest_version = DescriptorSchema.objects.filter(slug=slug).aggregate(
Max("version")
)["version__max"]
if latest_version is not None and latest_version > int_version:
self.stderr.write(
"Skip descriptor schema {}: newer version installed".format(slug)
)
continue
previous_descriptor_qs = DescriptorSchema.objects.filter(slug=slug)
if previous_descriptor_qs.exists():
previous_descriptor = previous_descriptor_qs.latest()
else:
previous_descriptor = None
descriptor_query = DescriptorSchema.objects.filter(
slug=slug, version=version
)
if descriptor_query.exists():
if not force:
if verbosity > 0:
self.stdout.write(
"Skip descriptor schema {}: same version installed".format(
slug
)
)
continue
descriptor_query.update(**descriptor_schema)
log_descriptors.append("Updated {}".format(slug))
else:
descriptor = DescriptorSchema.objects.create(
contributor=user, **descriptor_schema
)
assign_contributor_permissions(descriptor)
if previous_descriptor:
copy_permissions(previous_descriptor, descriptor)
log_descriptors.append("Inserted {}".format(slug))
if log_descriptors and verbosity > 0:
self.stdout.write("Descriptor schemas Updates:")
for log in log_descriptors:
self.stdout.write(" {}".format(log))
def retire(self, process_schemas):
"""Retire obsolete processes.
Remove old process versions without data. Find processes that have been
registered but do not exist in the code anymore, then:
- If they do not have data: remove them
- If they have data: flag them not active (``is_active=False``)
"""
process_slugs = set(ps["slug"] for ps in process_schemas)
# Processes that are in DB but not in the code
retired_processes = Process.objects.filter(~Q(slug__in=process_slugs))
# Remove retired processes which do not have data
retired_processes.filter(data__exact=None).delete()
# Remove non-latest processes which do not have data
latest_version_processes = Process.objects.order_by(
"slug", "-version"
).distinct("slug")
Process.objects.filter(data__exact=None).exclude(
id__in=latest_version_processes
).delete()
# Deactivate retired processes which have data
retired_processes.update(is_active=False)
def handle(self, *args, **options):
"""Register processes."""
force = options.get("force")
retire = options.get("retire")
verbosity = int(options.get("verbosity"))
users = (
get_user_model().objects.filter(is_superuser=True).order_by("date_joined")
)
if not users.exists():
self.stderr.write("Admin does not exist: create a superuser")
exit(1)
process_paths, descriptor_paths = [], []
process_schemas, descriptor_schemas = [], []
for finder in get_finders():
process_paths.extend(finder.find_processes())
descriptor_paths.extend(finder.find_descriptors())
for proc_path in process_paths:
process_schemas.extend(
self.find_schemas(
proc_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=verbosity
)
)
for desc_path in descriptor_paths:
descriptor_schemas.extend(
self.find_schemas(
desc_path, schema_type=SCHEMA_TYPE_DESCRIPTOR, verbosity=verbosity
)
)
user_admin = users.first()
self.register_descriptors(
descriptor_schemas, user_admin, force, verbosity=verbosity
)
# NOTE: Descriptor schemas must be registered first, so
# processes can validate 'entity_descriptor_schema' field.
self.register_processes(process_schemas, user_admin, force, verbosity=verbosity)
if retire:
self.retire(process_schemas)
if verbosity > 0:
self.stdout.write("Running executor post-registration hook...")
manager.get_executor().post_register_hook(verbosity=verbosity)
|
|
import sys
import pywintypes
import win32api
import win32con
import win32netcon
import ntsecuritycon
import win32security
import win32net
import ctypes
import enum
import _winreg as winreg
import winsys
from winsys import accounts, registry, security
STUDENTS_GROUP = "OPEStudents"
import gluon.contrib.aes as AES
import threading
import base64
def fast_urandom16(urandom=[], locker=threading.RLock()):
"""
this is 4x faster than calling os.urandom(16) and prevents
the "too many files open" issue with concurrent access to os.urandom()
"""
try:
return urandom.pop()
except IndexError:
try:
locker.acquire()
ur = os.urandom(16 * 1024)
urandom += [ur[i:i + 16] for i in xrange(16, 1024 * 16, 16)]
return ur[0:16]
finally:
locker.release()
def pad(s, n=32, padchar=' '):
if len(s) == 0:
# Handle empty value - pad it out w empty data
s += padchar * n
return s
while ((len(s) % n) != 0):
s += padchar
#pad_len = len(s) % 32 # How many characters do we need to pad out to a multiple of 32
#if (pad_len != 0):
# #return s + (32 - len(s) % 32) * padchar
# return s + (
return s
def AES_new(key, iv=None):
""" Returns an AES cipher object and random IV if None specified """
if iv is None:
iv = fast_urandom16()
# return AES.new(key, AES.MODE_CBC, IV), IV
# Util.aes = pyaes.AESModeOfOperationCBC(key, iv = iv)
# plaintext = "TextMustBe16Byte"
# ciphertext = aes.encrypt(plaintext)
return AES.AESModeOfOperationCBC(key, iv = iv), iv
def encrypt(data, key):
key = pad(key[:32])
cipher, iv = AES_new(key)
encrypted_data = iv + cipher.encrypt(pad(data, 16))
return base64.urlsafe_b64encode(encrypted_data)
def decrypt(data, key):
key = pad(key[:32])
if data is None:
data = ""
try:
data = base64.urlsafe_b64decode(data)
except TypeError as ex:
# Don't let error blow things up
pass
iv, data = data[:16], data[16:]
try:
cipher, _ = AES_new(key, iv=iv)
except:
# bad IV = bad data
return data
try:
data = cipher.decrypt(data)
except:
# Don't let error blow things up
pass
data = data.rstrip(' ')
return data
def create_local_student_account(user_name, full_name, password):
global STUDENTS_GROUP
# Ensure the Students group exists
ret = create_local_students_group()
# Create local student account
student = None
try:
print("\tAdding student account...")
accounts.User.create(user_name, password)
except pywintypes.error as err:
if err[2] == "The account already exists.":
pass
else:
# Unexpected error
print(str(err))
ret = False
# Get the student object
student = accounts.user(user_name)
# Set properties for this student
# win32net.NetUserChangePassword(None, user_name, old_pw, password)
user_data = dict()
user_data['name'] = user_name
user_data['full_name'] = full_name
user_data['password'] = password
user_data['flags'] = win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_PASSWD_CANT_CHANGE | win32netcon.UF_DONT_EXPIRE_PASSWD | win32netcon.UF_SCRIPT
user_data['priv'] = win32netcon.USER_PRIV_USER
user_data['comment'] = 'OPE Student Account'
# user_data['home_dir'] = home_dir
# user_data['home_dir_drive'] = "h:"
user_data['primary_group_id'] = ntsecuritycon.DOMAIN_GROUP_RID_USERS
user_data['password_expired'] = 0
user_data['acct_expires'] = win32netcon.TIMEQ_FOREVER
win32net.NetUserSetInfo(None, user_name, 3, user_data)
# Add student to the students group
print("\tAdding student to students group...")
grp = accounts.LocalGroup(accounts.group(STUDENTS_GROUP).sid)
try:
grp.add(student)
except pywintypes.error as err:
if err[2] == "The specified account name is already a member of the group.":
pass
else:
# Unexpected error
print(str(err))
ret = False
# # home_dir = "%s\\%s" % (server_name, user_name)
#
return ret
def create_local_students_group():
global STUDENTS_GROUP
# Ensure the local students group exists
ret = True
try:
accounts.LocalGroup.create(STUDENTS_GROUP)
except pywintypes.error as err:
if err[2] == "The specified local group already exists.":
pass
else:
# Unexpected error
print(str(err))
ret = False
return ret
def delete_user(user_name):
# Remove the local user
accounts.User(user_name).delete()
def disable_guest_account():
pass
# Run this to disable the guest account?
# NET USER Guest /ACTIVE:no
def create_reg_key(key_str, user_name=None):
reg = registry.create(key_str)
# Add the user to the key with permissions
if user_name is not None:
with reg.security() as s:
# Break inheritance causes things to reapply properly
s.break_inheritance(copy_first=True)
s.dacl.append((user_name, "W", "ALLOW"))
s.dacl.append((accounts.me(), "F", "ALLOW"))
# s.dacl.dump()
return reg
# def reorder_acls(acl_list):
# # Order acls in this order
# # 1 - Access denied on object
# # 2 - Access denied on child or property
# # 3 - Access allowed on object
# # 4 - Access allowed on child or property
# # 5 - All inherited ACEs
#
# deny_object = list()
# deny_other = list()
# allow_object = list()
# allow_other = list()
# inherited = list()
#
# for i in range(acl_list.GetAceCount()):
# ace = acl_list.GetAce(i)
# if ace[0][1] & win32security.INHERITED_ACE:
# inherited.append(ace)
# elif ace[0][0] == win32security.ACCESS_ALLOWED_ACE_TYPE:
# allow_other.append(ace)
# elif ace[0][0] == win32security.ACCESS_ALLOWED_OBJECT_ACE_TYPE:
# allow_object.append(ace)
# elif ace[0][0] == win32security.ACCESS_DENIED_ACE_TYPE:
# deny_other.append(ace)
# elif ace[0][0] == win32security.ACCESS_DENIED_OBJECT_ACE_TYPE:
# deny_object.append(ace)
#
# print("ACE: " + str(win32security.LookupAccountSid(None, ace[2])) + " - " + str(ace))
#
# print("deny object " + str(deny_object))
# print("deny other " + str(deny_other))
# print("allow object " + str(allow_object))
# print("allow other " + str(allow_other))
# print("inherited " + str(inherited))
#
# # Reassemble aces into a list
# ret = win32security.ACL()
#
# # deny
# for d in deny_other:
# ret.AddAccessDeniedAceEx(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2]
# )
# # deny object
# for d in deny_object:
# ret.AddAccessDeniedObjectAce(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2], d[3], d[4]
# )
# # allow
# for d in allow_other:
# ret.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2]
# )
# # allow object
# for d in allow_object:
# ret.AddAccessAllowedObjectAce(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2], d[3], d[4]
# )
# # inherited
# print("Inherited: ")
# for d in inherited:
# if d[0][0] == win32security.ACCESS_ALLOWED_ACE_TYPE:
# ret.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2]
# )
# print("ALLOWED " + str(win32security.LookupAccountSid(None, d[2])))
# elif d[0][0] == win32security.ACCESS_ALLOWED_OBJECT_ACE_TYPE:
# ret.AddAccessAllowedObjectAce(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2], d[3], d[4]
# )
# print("ALLOWED OBJ " + str(win32security.LookupAccountSid(None, d[2])))
# elif d[0][0] == win32security.ACCESS_DENIED_ACE_TYPE:
# ret.AddAccessDeniedAceEx(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2]
# )
# print("DENIED " + str(win32security.LookupAccountSid(None, d[2])))
# elif d[0][0] == win32security.ACCESS_DENIED_OBJECT_ACE_TYPE:
# ret.AddAccessDeniedObjectAce(win32security.ACL_REVISION_DS,
# d[0][1], d[1], d[2], d[3], d[4]
# )
# print("DENIED OBJ " + str(win32security.LookupAccountSid(None, d[2])))
# return ret
#
#
# def add_user_to_key(reg_key, user_name=""):
#
# return True
# def create_user_security_descriptor(user_name):
# sid_user = win32security.LookupAccountName(user_name)
# sd = win32security.SECURITY_DESCRIPTOR()
#
# # Create well known SID for the administrators group
# sub_auths = ntsecuritycon.SECURITY_BUILTIN_DOMAIN_RID, \
# ntsecuritycon.DOMAIN_ALIAS_RID_ADMINS
# sid_admins = win32security.SID(ntsecuritycon.SECURITY_NT_AUTHORITY, sub_auths)
#
# # Create ACL with user and admins full access
# acl = win32security.ACL(128)
# acl.AddAccessAllowedAce(win32file.FILE_ALL_ACCESS, sid_user)
# acl.AddAccessAllowedAce(win32file.FILE_ALL_ACCESS, sid_admins)
#
# sd.SetSecurityDescriptorDacl(1, acl, 0)
#
# return sd
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DataSetsOperations(object):
"""DataSetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datashare.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
data_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DataSet"
"""Get DataSet in a share.
Get a DataSet in a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param data_set_name: The name of the dataSet.
:type data_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSet, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.DataSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'dataSetName': self._serialize.url("data_set_name", data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets/{dataSetName}'} # type: ignore
def create(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
data_set_name, # type: str
data_set, # type: "_models.DataSet"
**kwargs # type: Any
):
# type: (...) -> "_models.DataSet"
"""Adds a new data set to an existing share.
Create a DataSet.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share to add the data set to.
:type share_name: str
:param data_set_name: The name of the dataSet.
:type data_set_name: str
:param data_set: The new data set information.
:type data_set: ~azure.mgmt.datashare.models.DataSet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSet, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.DataSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'dataSetName': self._serialize.url("data_set_name", data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(data_set, 'DataSet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DataSet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets/{dataSetName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
data_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'dataSetName': self._serialize.url("data_set_name", data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets/{dataSetName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
data_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete DataSet in a share.
Delete a DataSet in a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param data_set_name: The name of the dataSet.
:type data_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
data_set_name=data_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'dataSetName': self._serialize.url("data_set_name", data_set_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets/{dataSetName}'} # type: ignore
def list_by_share(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
skip_token=None, # type: Optional[str]
filter=None, # type: Optional[str]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DataSetList"]
"""List DataSets in a share.
List DataSets in a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param skip_token: continuation token.
:type skip_token: str
:param filter: Filters the results using OData syntax.
:type filter: str
:param orderby: Sorts the results using OData syntax.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataSetList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datashare.models.DataSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_share.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DataSetList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_share.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets'} # type: ignore
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
module: iworkflow_license_pool
short_description: Manage license pools in iWorkflow
description:
- Manage license pools in iWorkflow.
version_added: 2.4
options:
name:
description:
- Name of the license pool to create.
required: True
state:
description:
- Whether the license pool should exist, or not. A state of C(present)
will attempt to activate the license pool if C(accept_eula) is set
to C(yes).
required: False
default: present
choices:
- present
- absent
base_key:
description:
- Key that the license server uses to verify the functionality that
you are entitled to license. This option is required if you are
creating a new license.
required: False
default: None
accept_eula:
description:
- Specifies that you accept the EULA that is part of iWorkflow. Note
that this is required to activate the license pool. If this is not
specified, or it is set to C(no), then the pool will remain in a state
of limbo until you choose to accept the EULA. This option is required
when updating a license. It is also suggested that you provide it when
creating a license, but if you do not, the license will remain
inactive and you will have to run this module again with this option
set to C(yes) to activate it.
required: False
default: 'no'
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create license pool
iworkflow_license_pool:
accept_eula: "yes"
name: "my-lic-pool"
base_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX"
state: "present"
server: "iwf.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
'''
RETURN = '''
'''
import time
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
F5ModuleError,
HAS_F5SDK,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'baseRegKey': 'base_key'
}
returnables = []
api_attributes = [
'baseRegKey', 'state'
]
updatables = []
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def name(self):
if self._values['name'] is None:
return None
name = str(self._values['name']).strip()
if name == '':
raise F5ModuleError(
"You must specify a name for this module"
)
return name
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def _pool_is_licensed(self):
if self.have.state == 'LICENSED':
return True
return False
def _pool_is_unlicensed_eula_unaccepted(self, current):
if current.state != 'LICENSED' and not self.want.accept_eula:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=changed))
return result
def exists(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
if len(collection) == 1:
return True
elif len(collection) == 0:
return False
else:
raise F5ModuleError(
"Multiple license pools with the provided name were found!"
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def should_update(self):
if self._pool_is_licensed():
return False
if self._pool_is_unlicensed_eula_unaccepted():
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
resource = collection.pop()
resource.modify(
state='RELICENSE',
method='AUTOMATIC'
)
return self._wait_for_license_pool_state_to_activate(resource)
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
if self.want.base_key is None:
raise F5ModuleError(
"You must specify a 'base_key' when creating a license pool"
)
self.create_on_device()
return True
def read_current_from_device(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
resource = collection.pop()
result = resource.attrs
return Parameters(result)
def create_on_device(self):
resource = self.client.api.cm.shared.licensing.pools_s.pool.create(
name=self.want.name,
baseRegKey=self.want.base_key,
method="AUTOMATIC"
)
return self._wait_for_license_pool_state_to_activate(resource)
def _wait_for_license_pool_state_to_activate(self, pool):
error_values = ['EXPIRED', 'FAILED']
# Wait no more than 5 minutes
for x in range(1, 30):
pool.refresh()
if pool.state == 'LICENSED':
return True
elif pool.state == 'WAITING_FOR_EULA_ACCEPTANCE':
pool.modify(
eulaText=pool.eulaText,
state='ACCEPTED_EULA'
)
elif pool.state in error_values:
raise F5ModuleError(pool.errorText)
time.sleep(10)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the license pool")
return True
def remove_from_device(self):
collection = self.client.api.cm.shared.licensing.pools_s.get_collection(
requests_params=dict(
params="$filter=name+eq+'{0}'".format(self.want.name)
)
)
resource = collection.pop()
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
accept_eula=dict(
type='bool',
default='no',
choices=BOOLEANS
),
base_key=dict(
required=False,
no_log=True
),
name=dict(
required=True
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'iworkflow'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
|
from django.test import TestCase
from django.db import models
from .models import (Product, WarehouseEntry, ProductCategory, ExtremeWidget,
SaleInvoice, Employee, ProductRating, Property, PropertyOwner)
from .views import (index, rate_product, CategoryCreateView, ProductCreateView,
ProductDeleteView, ProductUpdateView, ExtremeWidgetCreateView,
PropertyOwnerCreateView, PropertyCreateView, PropertyUpdateView)
from django.test.client import Client
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^/$', index),
url(r'^rate/(\d)/$', rate_product),
url(r'^category/create/$', CategoryCreateView.as_view()),
url(r'^product/create/$', ProductCreateView.as_view()),
url(r'^product/update/(?P<pk>\d+)/$', ProductUpdateView.as_view()),
url(r'^product/delete/(?P<pk>\d+)/$', ProductDeleteView.as_view()),
url(r'^extremewidget/create/$', ExtremeWidgetCreateView.as_view()),
url(r'^propertyowner/create/$', PropertyOwnerCreateView.as_view()),
url(r'^property/create/$', PropertyCreateView.as_view()),
url(r'^property/update/(?P<pk>\d+)/$', PropertyUpdateView.as_view()),
)
def __setup_admins():
from django.contrib.auth.models import User
User.objects.all().delete()
admin = User(username = "admin@example.com", is_staff = True, is_superuser = True)
admin.set_password("admin")
admin.save()
admin = User(username = "admin1@example.com", is_staff = True, is_superuser = True)
admin.set_password("admin1")
admin.save()
def __setup_employees():
from .models import Employee
Employee.objects.all().delete()
admin = Employee(email = "admin@example.com",)
admin.set_password("admin")
admin.save()
admin = Employee(email = "admin1@example.com",)
admin.set_password("admin1")
admin.save()
def _setup_admin():
from django.conf import settings
if settings.AUTH_USER_MODEL =="audit_log.Employee":
__setup_employees()
else:
__setup_admins()
class LogEntryMetaOptionsTest(TestCase):
def test_app_label(self):
self.failUnlessEqual(Product.audit_log.model._meta.app_label, Product._meta.app_label)
self.failUnlessEqual(WarehouseEntry.audit_log.model._meta.app_label, WarehouseEntry._meta.app_label)
def test_table_name(self):
self.failUnlessEqual(Product.audit_log.model._meta.db_table, "%sauditlogentry"%Product._meta.db_table)
self.failUnlessEqual(WarehouseEntry.audit_log.model._meta.db_table, "%sauditlogentry"%WarehouseEntry._meta.db_table)
class TrackingAuthFieldsTest(TestCase):
urls = __name__
def setUp(self):
category = ProductCategory.objects.create(name = "gadgets", description = "gadgetry")
category.product_set.create(name = "new gadget", description = "best gadget eva", price = 100)
def test_logging_user(self):
_setup_admin()
product = Product.objects.get(pk = 1)
self.assertEqual(product.productrating_set.all().count(), 0)
c = Client()
c.login(username = "admin@example.com", password = "admin")
c.post('/rate/1/', {'rating': 4})
self.assertEqual(product.productrating_set.all().count(), 1)
self.assertEqual(product.productrating_set.all()[0].user.username, "admin@example.com")
def test_logging_session(self):
_setup_admin()
product = Product.objects.get(pk = 1)
self.assertEqual(product.productrating_set.all().count(), 0)
c = Client()
c.login(username = "admin@example.com", password = "admin")
c.get('/rate/1/',)
key = c.session.session_key
resp = c.post('/rate/1/', {'rating': 4})
self.assertEqual(resp.status_code, 200)
self.assertEqual(product.productrating_set.all().count(), 1)
self.assertIsNotNone(product.productrating_set.all()[0].session)
self.assertEqual(product.productrating_set.all()[0].session, key)
def test_logging_anon_session(self):
pass
#TODO need to find a way to test this
def test_logging_user_none(self):
product = Product.objects.get(pk = 1)
self.assertEqual(product.productrating_set.all().count(), 0)
c = Client()
c.post('/rate/1/', {'rating': 4})
self.assertEqual(product.productrating_set.all().count(), 1)
self.assertEqual(product.productrating_set.all()[0].user, None)
class TrackingChangesTest(TestCase):
urls = __name__
def run_client(self, client):
client.post('/category/create/', {'name': 'Test Category', 'description': 'Test description'})
client.post('/category/create/', {'name': 'Test Category 2', 'description': 'Test description 2'})
client.post('/product/create/', {'name': 'Test Product', 'description': 'Test description', 'price': '2.22', 'category': 'Test Category'})
client.post('/product/update/1/', {'name': 'Test Product', 'description': 'Test description new', 'price': '5.00', 'category': 'Test Category'})
def test_logging_insert_anon(self):
c = Client()
self.run_client(c)
category = ProductCategory.objects.get(name = 'Test Category')
self.assertEqual(category.audit_log.all()[0].name, category.name)
self.assertEqual(category.audit_log.all()[0].description, category.description)
self.assertEqual(category.audit_log.all()[0].action_type, "I")
self.assertEqual(category.audit_log.all()[0].action_user, None)
def test_logging_insert_auth(self):
_setup_admin()
c = Client()
c.login(username = "admin@example.com", password = "admin")
self.run_client(c)
category = ProductCategory.objects.get(name = 'Test Category 2')
self.assertEqual(category.audit_log.all()[0].name, category.name)
self.assertEqual(category.audit_log.all()[0].description, category.description)
self.assertEqual(category.audit_log.all()[0].action_type, "I")
self.assertNotEqual(category.audit_log.all()[0].action_user, None)
self.assertEqual(category.audit_log.all()[0].action_user.username, 'admin@example.com')
def test_loggin_update_anon(self):
c = Client()
self.run_client(c)
product= Product.objects.get(name = 'Test Product')
self.assertGreater(product.audit_log.all()[0].action_date, product.audit_log.all()[1].action_date)
self.assertEqual(product.audit_log.all()[1].action_type, 'I')
self.assertEqual(product.audit_log.all()[0].action_type, 'U')
self.assertEqual(product.audit_log.all()[0].description, 'Test description new')
self.assertEqual(product.audit_log.all()[0].price, 5.00)
self.assertEqual(product.audit_log.all()[0].action_user, None)
def test_loging_update_auth(self):
_setup_admin()
c = Client()
c.login(username = 'admin@example.com', password = 'admin')
self.run_client(c)
product= Product.objects.get(name = 'Test Product')
self.assertNotEqual(product.audit_log.all()[0].action_user, None)
self.assertEqual(product.audit_log.all()[0].action_user.username, 'admin@example.com')
def test_logging_delete_anon(self):
c = Client()
self.run_client(c)
c.post('/product/delete/1/')
self.assertEqual(Product.objects.all().count(), 0)
self.assertEqual(Product.audit_log.all()[0].action_type, 'D')
self.assertEqual(Product.audit_log.all()[0].name, 'Test Product')
self.assertEqual(Product.audit_log.all()[0].action_user, None)
def test_logging_delete_auth(self):
_setup_admin()
c = Client()
c.login(username = 'admin@example.com', password = 'admin')
self.run_client(c)
self.assertEqual(Product.objects.all().count(), 1)
c.post('/product/delete/1/')
self.assertEqual(Product.objects.all().count(), 0)
self.assertEqual(Product.audit_log.all()[0].action_type, 'D')
self.assertEqual(Product.audit_log.all()[0].name, 'Test Product')
self.assertNotEqual(Product.audit_log.all()[0].action_user, None)
self.assertEqual(Product.audit_log.all()[0].action_user.username, 'admin@example.com')
def test_logging_inherited(self):
_setup_admin()
c = Client()
c.login(username = "admin@example.com", password = "admin")
c.post('/extremewidget/create/', {'name': 'Test name', 'special_power': 'Testpower'})
widget = ExtremeWidget.objects.all()[0]
self.failUnlessEqual(widget.audit_log.all()[0].name, 'Test name')
self.failUnlessEqual(hasattr(widget.audit_log.all()[0], 'special_power'), True)
self.failUnlessEqual(widget.audit_log.all()[0].special_power, "Testpower")
class TestOneToOne(TestCase):
urls = __name__
def run_client(self, client):
client.post('/propertyowner/create/', {'name': 'John Dory'})
client.post('/propertyowner/create/', {'name': 'Jane Doe'})
client.post('/property/create/', {'name': 'Property1', 'owned_by': '1'})
client.post('/property/update/1/', {'name': 'Property2', 'owned_by': '2'})
def test_fields(self):
c = Client()
self.run_client(c)
owner = PropertyOwner.objects.get(pk = 1)
prop = Property.objects.get(pk = 1)
self.assertEqual(prop.audit_log.all()[0]._meta.get_field('owned_by').__class__, models.ForeignKey)
def test_logging(self):
c = Client()
self.run_client(c)
owner1 = PropertyOwner.objects.get(pk = 1)
owner2 = PropertyOwner.objects.get(pk = 2)
prop = Property.objects.get(pk = 1)
self.assertEqual(prop.audit_log.all().count(), 2)
self.assertEqual(prop.audit_log.all()[0].action_type, 'U')
self.assertEqual(prop.audit_log.all()[1].action_type, 'I')
self.assertEqual(prop.audit_log.all()[0].owned_by, owner2)
self.assertEqual(prop.audit_log.all()[1].owned_by, owner1)
|
|
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises, \
assert_array_almost_equal, assert_
from scipy.signal import firwin, firwin2, kaiserord, freqz, remez
class TestFirwin(TestCase):
def check_response(self, h, expected_response, tol=.05):
N = len(h)
alpha = 0.5 * (N-1)
m = np.arange(0,N) - alpha # time indices of taps
for freq, expected in expected_response:
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
mse = abs(actual-expected)**2
self.assertTrue(mse < tol, 'response not as expected, mse=%g > %g'\
%(mse, tol))
def test_response(self):
N = 51
f = .5
# increase length just to try even/odd
h = firwin(N, f) # low-pass from 0 to f
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+1, f, window='nuttall') # specific window
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
self.check_response(h, [(.25,0), (.75,1)])
f1, f2, f3, f4 = .2, .4, .6, .8
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
h = firwin(N+4, [f1, f2]) # band-stop filter
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
h = firwin(N+7, 0.1, width=.03) # low-pass
self.check_response(h, [(.05,1), (.75,0)])
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
self.check_response(h, [(.05,0), (.75,1)])
def mse(self, h, bands):
"""Compute mean squared error versus ideal response across frequency
band.
h -- coefficients
bands -- list of (left, right) tuples relative to 1==Nyquist of
passbands
"""
w, H = freqz(h, worN=1024)
f = w/np.pi
passIndicator = np.zeros(len(w), bool)
for left, right in bands:
passIndicator |= (f>=left) & (f<right)
Hideal = np.where(passIndicator, 1, 0)
mse = np.mean(abs(abs(H)-Hideal)**2)
return mse
def test_scaling(self):
"""
For one lowpass, bandpass, and highpass example filter, this test
checks two things:
- the mean squared error over the frequency domain of the unscaled
filter is smaller than the scaled filter (true for rectangular
window)
- the response of the scaled filter is exactly unity at the center
of the first passband
"""
N = 11
cases = [
([.5], True, (0, 1)),
([0.2, .6], False, (.4, 1)),
([.5], False, (1, 1)),
]
for cutoff, pass_zero, expected_response in cases:
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
if len(cutoff) == 1:
if pass_zero:
cutoff = [0] + cutoff
else:
cutoff = cutoff + [1]
self.assertTrue(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
'least squares violation')
self.check_response(hs, [expected_response], 1e-12)
class TestFirWinMore(TestCase):
"""Different author, different style, different tests..."""
def test_lowpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta), scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_highpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
# Ensure that ntaps is odd.
ntaps |= 1
taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta),
pass_zero=False, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test_bandpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=[0.3, 0.7], window=('kaiser', beta),
pass_zero=False, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
0.7-width/2, 0.7+width/2, 0.8, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_multi(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
pass_zero=True, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
0.5-width/2, 0.5+width/2, 0.65,
0.8-width/2, 0.8+width/2, 0.9, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
decimal=5)
def test_nyq(self):
"""Test the nyq keyword."""
nyquist = 1000
width = 40.0
relative_width = width/nyquist
ntaps, beta = kaiserord(120, relative_width)
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, nyq=nyquist)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
700-width/2, 700+width/2, 800, 1000])
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_bad_cutoff(self):
"""Test that invalid cutoff argument raises ValueError."""
# cutoff values must be greater than 0 and less than 1.
assert_raises(ValueError, firwin, 99, -0.5)
assert_raises(ValueError, firwin, 99, 1.5)
# Don't allow 0 or 1 in cutoff.
assert_raises(ValueError, firwin, 99, [0, 0.5])
assert_raises(ValueError, firwin, 99, [0.5, 1])
# cutoff values must be strictly increasing.
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
# Must have at least one cutoff value.
assert_raises(ValueError, firwin, 99, [])
# 2D array not allowed.
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
# cutoff values must be less than nyq.
assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
def test_even_highpass_raises_value_error(self):
"""Test that attempt to create a highpass filter with an even number
of taps raises a ValueError exception."""
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
assert_raises(ValueError, firwin, 40, [.25, 0.5])
class TestFirwin2(TestCase):
def test_invalid_args(self):
# `freq` and `gain` have different lengths.
assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0])
# `nfreqs` is less than `ntaps`.
assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
# Decreasing value in `freq`
assert_raises(ValueError, firwin2, 50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
# Value in `freq` repeated more than once.
assert_raises(ValueError, firwin2, 50, [ 0, .1, .1, .1, 1.0],
[0.0, 0.5, 0.75, 1.0, 1.0])
# `freq` does not start at 0.0.
assert_raises(ValueError, firwin2, 50, [0.5, 1.0], [0.0, 1.0])
def test01(self):
width = 0.04
beta = 12.0
ntaps = 400
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
freq = [0.0, 0.5, 1.0]
gain = [1.0, 1.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
0.75, 1.0-width/2])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
def test02(self):
width = 0.04
beta = 12.0
# ntaps must be odd for positive gain at Nyquist.
ntaps = 401
# An ideal highpass filter.
freq = [0.0, 0.5, 0.5, 1.0]
gain = [0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test03(self):
width = 0.02
ntaps, beta = kaiserord(120, width)
# ntaps must be odd for positive gain at Nyquist.
ntaps = int(ntaps) | 1
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test_nyq(self):
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
assert_array_almost_equal(taps1, taps2)
class TestRemez(TestCase):
def test_hilbert(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design an unity gain hilbert bandpass filter from w to 0.5-w
h = remez(11, [ a, 0.5-a ], [ 1 ], type='hilbert')
# make sure the filter has correct # of taps
assert_(len(h) == N, "Number of Taps")
# make sure it is type III (anti-symmetric tap coefficients)
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
# Since the requested response is symmetric, all even coeffcients
# should be zero (or in this case really small)
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = abs(H)
# should have a zero at 0 and pi (in this case close to zero)
assert_((Hmag[ [0,-1] ] < 0.02).all(), "Zero at zero and pi")
# check that the pass band is close to unity
idx = (f > a) * (f < 0.5-a)
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
if __name__ == "__main__":
run_module_suite()
|
|
from ctypes import *
import time
import bacon
from bacon.core import lib
from bacon import native
from bacon import controller
from bacon import keyboard
from bacon import graphics
from bacon import mouse_input
from bacon import shader
from bacon import window
class Game(object):
'''Base class for all Bacon games. An instance of this class is passed to :func:`run`. Override methods on
this class to handle game events such as :func:`on_tick`. A complete example of a game::
class MyGame(bacon.Game):
def on_tick(self):
# Update and draw game here.
pass
# Start the game
bacon.run(MyGame())
'''
def on_init(self):
'''Called once when the game starts. You can use this to do any initialization that
requires the graphics device to have been initialized; for example, rendering to a texture.'''
pass
def on_tick(self):
'''Called once per frame to update and render the game. You may only call
drawing functions within the scope of this method.'''
clear(1, 0, 1, 1)
def on_key(self, key, pressed):
'''Called when a key on the keyboard is pressed or released.
:param key: key code, one of :class:`Keys` enumeration
:param pressed: ``True`` if the key was pressed, otherwise ``False``
'''
pass
def on_mouse_button(self, button, pressed):
'''Called when a mouse button is pressed or released.
:param button: button index, of :class:`MouseButton` enumeration
:param pressed: ``True`` if the button was pressed, otherwise ``False``
'''
pass
def on_mouse_scroll(self, dx, dy):
'''Called when the mouse scroll wheel is scrolled. Most mice have a scroll wheel that moves in
the ``y`` axis only; Apple trackpads and mice support scrolling in ``x`` as well.
:note: units are aribitrary and not currently consistent across platforms
:param dx: relative scroll amount along the ``x`` axis
:param dy: relative scroll amount along the ``y`` axis
'''
pass
def on_resize(self, width, height):
'''Called when size of the window changes.
:param width: width of the drawable area of the window, in pixels
:param height: height of the drawable area of the window, in pixels
'''
pass
def on_controller_connected(self, controller):
'''Called when a game controller is connected.
:param controller: the :class:`Controller` that is now available for polling and events
'''
pass
def on_controller_disconnected(self, controller):
'''Called when a game controller is disconnected. You should use the `controller` parameter only
to identify a previously used controller; its properties and values will no longer be available.
:param controller: the :class:`Controller` that was disconnected
'''
pass
def on_controller_button(self, controller, button, pressed):
'''Called when a button on a game controller is pressed or released.
:param controller: the :class:`Controller` containing the button
:param button: button index, of :class:`ControllerButtons` enumeration
:param pressed: ``True`` if the button was pressed, otherwise ``False``
'''
pass
def on_controller_axis(self, controller, axis, value):
'''Called when an axis on a game controller is moved.
:param controller: the :class:`Controller` containing the axis
:param button: axis index, of :class:`ControllerAxes` enumeration
:param value: absolute position of the axis, between ``-1.0`` and ``1.0``
'''
pass
if not native._mock_native:
_time_uniform = shader.ShaderUniform('g_Time', shader.ShaderUniformType.float_)
#: Number of seconds since the last frame. This is a convenience value for timing animations.
bacon.timestep = 0.0
def _first_tick_callback():
global _tick_callback_handle
global _last_frame_time
global _start_time
global timestep
_start_time = time.time()
_last_frame_time = _start_time
bacon.timestep = 0.0
_tick_callback_handle = lib.TickCallback(_tick_callback)
lib.SetTickCallback(_tick_callback_handle)
# Exceptions on startup (either during on_init or the first on_tick) stop the
# game loop immediately, since they're likely to be showstoppers.
try:
bacon._current_game.on_init()
_last_frame_time = time.time() - _start_time
_tick_callback()
except:
_tick_callback_handle = lib.TickCallback(_error_tick_callback)
lib.SetTickCallback(_tick_callback_handle)
raise
def _error_tick_callback():
lib.Stop()
def _tick_callback():
global _last_frame_time
now_time = time.time() - _start_time
bacon.timestep = now_time - _last_frame_time
_last_frame_time = now_time
_time_uniform.value = now_time
graphics._target_stack = [None]
window._begin_frame()
mouse_input.mouse._update_position()
try:
bacon._current_game.on_tick()
except:
_tick_callback_handle = lib.TickCallback(_error_tick_callback)
lib.SetTickCallback(_tick_callback_handle)
raise
window._end_frame()
bacon._current_game = None
def run(game):
'''Start running the game. The window is created and shown at this point, and then
the main event loop is entered. 'game.on_tick' and other event handlers are called
repeatedly until the game exits.
'''
global _tick_callback_handle
bacon._current_game = game
# Window handler
window_resize_callback_handle = lib.WindowResizeEventHandler(window._window_resize_event_handler)
lib.SetWindowResizeEventHandler(window_resize_callback_handle)
# Key handler
key_callback_handle = lib.KeyEventHandler(keyboard._key_event_handler)
lib.SetKeyEventHandler(key_callback_handle)
# Mouse handlers
mouse_button_callback_handle = lib.MouseButtonEventHandler(mouse_input._mouse_button_event_handler)
lib.SetMouseButtonEventHandler(mouse_button_callback_handle)
mouse_scroll_callback_handle = lib.MouseScrollEventHandler(mouse_input._mouse_scroll_event_handler)
lib.SetMouseScrollEventHandler(mouse_scroll_callback_handle)
# Controller handlers
controller_connected_handle = lib.ControllerConnectedEventHandler(controller._controller_connected_event_handler)
lib.SetControllerConnectedEventHandler(controller_connected_handle)
controller_button_handle = lib.ControllerButtonEventHandler(controller._controller_button_event_handler)
lib.SetControllerButtonEventHandler(controller_button_handle)
controller_axis_handle = lib.ControllerAxisEventHandler(controller._controller_axis_event_handler)
lib.SetControllerAxisEventHandler(controller_axis_handle)
# Tick handler
_tick_callback_handle = lib.TickCallback(_first_tick_callback)
lib.SetTickCallback(_tick_callback_handle)
lib.Run()
bacon._current_game = None
_tick_callback_handle = None
lib.SetWindowResizeEventHandler(lib.WindowResizeEventHandler(0))
lib.SetKeyEventHandler(lib.KeyEventHandler(0))
lib.SetMouseButtonEventHandler(lib.MouseButtonEventHandler(0))
lib.SetMouseScrollEventHandler(lib.MouseScrollEventHandler(0))
lib.SetControllerConnectedEventHandler(lib.ControllerConnectedEventHandler(0))
lib.SetControllerButtonEventHandler(lib.ControllerButtonEventHandler(0))
lib.SetControllerAxisEventHandler(lib.ControllerAxisEventHandler(0))
lib.SetTickCallback(lib.TickCallback(0))
def quit():
'''Stop the game loop and exit the application before the next :func:`on_tick` is called.
'''
lib.Stop()
|
|
"""Test the Google Maps Travel Time config flow."""
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.google_travel_time.const import (
ARRIVAL_TIME,
CONF_ARRIVAL_TIME,
CONF_AVOID,
CONF_DEPARTURE_TIME,
CONF_DESTINATION,
CONF_LANGUAGE,
CONF_ORIGIN,
CONF_TIME,
CONF_TIME_TYPE,
CONF_TRAFFIC_MODEL,
CONF_TRANSIT_MODE,
CONF_TRANSIT_ROUTING_PREFERENCE,
CONF_UNITS,
DEFAULT_NAME,
DEPARTURE_TIME,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
CONF_UNIT_SYSTEM_IMPERIAL,
)
from tests.components.google_travel_time.const import MOCK_CONFIG
@pytest.mark.usefixtures("validate_config_entry", "bypass_setup")
async def test_minimum_fields(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == DEFAULT_NAME
assert result2["data"] == {
CONF_NAME: DEFAULT_NAME,
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
}
@pytest.mark.usefixtures("invalidate_config_entry")
async def test_invalid_config_entry(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
@pytest.mark.parametrize(
"data,options",
[
(
MOCK_CONFIG,
{
CONF_MODE: "driving",
CONF_ARRIVAL_TIME: "test",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
},
)
],
)
@pytest.mark.usefixtures("validate_config_entry")
async def test_options_flow(hass, mock_config):
"""Test options flow."""
result = await hass.config_entries.options.async_init(
mock_config.entry_id, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: ARRIVAL_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert mock_config.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
@pytest.mark.parametrize(
"data,options",
[(MOCK_CONFIG, {})],
)
@pytest.mark.usefixtures("validate_config_entry")
async def test_options_flow_departure_time(hass, mock_config):
"""Test options flow with departure time."""
result = await hass.config_entries.options.async_init(
mock_config.entry_id, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: DEPARTURE_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert mock_config.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
@pytest.mark.usefixtures("validate_config_entry", "bypass_setup")
async def test_dupe(hass):
"""Test setting up the same entry data twice is OK."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
|
from __future__ import annotations
import importlib.util
import itertools
import os
import re
import shutil
from collections import defaultdict
from collections.abc import Iterator
from typing import IO, TYPE_CHECKING
import pytest
import numpy as np
import numpy.typing as npt
from numpy.typing.mypy_plugin import (
_PRECISION_DICT,
_EXTENDED_PRECISION_LIST,
_C_INTP,
)
try:
from mypy import api
except ImportError:
NO_MYPY = True
else:
NO_MYPY = False
if TYPE_CHECKING:
# We need this as annotation, but it's located in a private namespace.
# As a compromise, do *not* import it during runtime
from _pytest.mark.structures import ParameterSet
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
MISC_DIR = os.path.join(DATA_DIR, "misc")
MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
#: A dictionary with file names as keys and lists of the mypy stdout as values.
#: To-be populated by `run_mypy`.
OUTPUT_MYPY: dict[str, list[str]] = {}
def _key_func(key: str) -> str:
"""Split at the first occurrence of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
drive, tail = os.path.splitdrive(key)
return os.path.join(drive, tail.split(":", 1)[0])
def _strip_filename(msg: str) -> str:
"""Strip the filename from a mypy message."""
_, tail = os.path.splitdrive(msg)
return tail.split(":", 1)[-1]
def strip_func(match: re.Match[str]) -> str:
"""`re.sub` helper function for stripping module names."""
return match.groups()[1]
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.fixture(scope="module", autouse=True)
def run_mypy() -> None:
"""Clears the cache and run mypy before running any of the typing tests.
The mypy results are cached in `OUTPUT_MYPY` for further use.
The cache refresh can be skipped using
NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests
"""
if (
os.path.isdir(CACHE_DIR)
and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True))
):
shutil.rmtree(CACHE_DIR)
for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR):
# Run mypy
stdout, stderr, exit_code = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
directory,
])
if stderr:
pytest.fail(f"Unexpected mypy standard error\n\n{stderr}")
elif exit_code not in {0, 1}:
pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}")
stdout = stdout.replace('*', '')
# Parse the output
iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def get_test_cases(directory: str) -> Iterator[ParameterSet]:
for root, _, files in os.walk(directory):
for fname in files:
short_fname, ext = os.path.splitext(fname)
if ext in (".pyi", ".py"):
fullpath = os.path.join(root, fname)
yield pytest.param(fullpath, id=short_fname)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path) -> None:
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
msg = "Unexpected mypy output\n\n"
msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
raise AssertionError(msg)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path: str) -> None:
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group('lineno'))
errors[lineno] += f'{error_line}\n'
for i, line in enumerate(lines):
lineno = i + 1
if (
line.startswith('#')
or (" E:" not in line and lineno not in errors)
):
continue
target_line = lines[lineno - 1]
if "# E:" in target_line:
expression, _, marker = target_line.partition(" # E: ")
expected_error = errors[lineno].strip()
marker = marker.strip()
_test_fail(path, expression, marker, expected_error, lineno)
else:
pytest.fail(
f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}"
)
_FAIL_MSG1 = """Extra error at line {}
Expression: {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expression: {}
Expected error: {!r}
Observed error: {!r}
"""
def _test_fail(
path: str,
expression: str,
error: str,
expected_error: None | str,
lineno: int,
) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, expression, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(
lineno, expression, expected_error, error
))
def _construct_ctypes_dict() -> dict[str, str]:
dct = {
"ubyte": "c_ubyte",
"ushort": "c_ushort",
"uintc": "c_uint",
"uint": "c_ulong",
"ulonglong": "c_ulonglong",
"byte": "c_byte",
"short": "c_short",
"intc": "c_int",
"int_": "c_long",
"longlong": "c_longlong",
"single": "c_float",
"double": "c_double",
"longdouble": "c_longdouble",
}
# Match `ctypes` names to the first ctypes type with a given kind and
# precision, e.g. {"c_double": "c_double", "c_longdouble": "c_double"}
# if both types represent 64-bit floats.
# In this context "first" is defined by the order of `dct`
ret = {}
visited: dict[tuple[str, int], str] = {}
for np_name, ct_name in dct.items():
np_scalar = getattr(np, np_name)()
# Find the first `ctypes` type for a given `kind`/`itemsize` combo
key = (np_scalar.dtype.kind, np_scalar.dtype.itemsize)
ret[ct_name] = visited.setdefault(key, f"ctypes.{ct_name}")
return ret
def _construct_format_dict() -> dict[str, str]:
dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for
k, v in _PRECISION_DICT.items()}
return {
"uint8": "numpy.unsignedinteger[numpy.typing._8Bit]",
"uint16": "numpy.unsignedinteger[numpy.typing._16Bit]",
"uint32": "numpy.unsignedinteger[numpy.typing._32Bit]",
"uint64": "numpy.unsignedinteger[numpy.typing._64Bit]",
"uint128": "numpy.unsignedinteger[numpy.typing._128Bit]",
"uint256": "numpy.unsignedinteger[numpy.typing._256Bit]",
"int8": "numpy.signedinteger[numpy.typing._8Bit]",
"int16": "numpy.signedinteger[numpy.typing._16Bit]",
"int32": "numpy.signedinteger[numpy.typing._32Bit]",
"int64": "numpy.signedinteger[numpy.typing._64Bit]",
"int128": "numpy.signedinteger[numpy.typing._128Bit]",
"int256": "numpy.signedinteger[numpy.typing._256Bit]",
"float16": "numpy.floating[numpy.typing._16Bit]",
"float32": "numpy.floating[numpy.typing._32Bit]",
"float64": "numpy.floating[numpy.typing._64Bit]",
"float80": "numpy.floating[numpy.typing._80Bit]",
"float96": "numpy.floating[numpy.typing._96Bit]",
"float128": "numpy.floating[numpy.typing._128Bit]",
"float256": "numpy.floating[numpy.typing._256Bit]",
"complex64": ("numpy.complexfloating"
"[numpy.typing._32Bit, numpy.typing._32Bit]"),
"complex128": ("numpy.complexfloating"
"[numpy.typing._64Bit, numpy.typing._64Bit]"),
"complex160": ("numpy.complexfloating"
"[numpy.typing._80Bit, numpy.typing._80Bit]"),
"complex192": ("numpy.complexfloating"
"[numpy.typing._96Bit, numpy.typing._96Bit]"),
"complex256": ("numpy.complexfloating"
"[numpy.typing._128Bit, numpy.typing._128Bit]"),
"complex512": ("numpy.complexfloating"
"[numpy.typing._256Bit, numpy.typing._256Bit]"),
"ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]",
"ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]",
"uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]",
"uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]",
"uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]",
"ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]",
"byte": f"numpy.signedinteger[{dct['_NBitByte']}]",
"short": f"numpy.signedinteger[{dct['_NBitShort']}]",
"intc": f"numpy.signedinteger[{dct['_NBitIntC']}]",
"intp": f"numpy.signedinteger[{dct['_NBitIntP']}]",
"int_": f"numpy.signedinteger[{dct['_NBitInt']}]",
"longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]",
"half": f"numpy.floating[{dct['_NBitHalf']}]",
"single": f"numpy.floating[{dct['_NBitSingle']}]",
"double": f"numpy.floating[{dct['_NBitDouble']}]",
"longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]",
"csingle": ("numpy.complexfloating"
f"[{dct['_NBitSingle']}, {dct['_NBitSingle']}]"),
"cdouble": ("numpy.complexfloating"
f"[{dct['_NBitDouble']}, {dct['_NBitDouble']}]"),
"clongdouble": (
"numpy.complexfloating"
f"[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]"
),
# numpy.typing
"_NBitInt": dct['_NBitInt'],
# numpy.ctypeslib
"c_intp": f"ctypes.{_C_INTP}"
}
#: A dictionary with all supported format keys (as keys)
#: and matching values
FORMAT_DICT: dict[str, str] = _construct_format_dict()
FORMAT_DICT.update(_construct_ctypes_dict())
def _parse_reveals(file: IO[str]) -> tuple[npt.NDArray[np.str_], list[str]]:
"""Extract and parse all ``" # E: "`` comments from the passed
file-like object.
All format keys will be substituted for their respective value
from `FORMAT_DICT`, *e.g.* ``"{float64}"`` becomes
``"numpy.floating[numpy.typing._64Bit]"``.
"""
string = file.read().replace("*", "")
# Grab all `# E:`-based comments and matching expressions
expression_array, _, comments_array = np.char.partition(
string.split("\n"), sep=" # E: "
).T
comments = "/n".join(comments_array)
# Only search for the `{*}` pattern within comments, otherwise
# there is the risk of accidentally grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for
k in key_set
}
fmt_str = comments.format(**kwargs)
return expression_array, fmt_str.split("/n")
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path: str) -> None:
"""Validate that mypy correctly infers the return-types of
the expressions in `path`.
"""
__tracebackhide__ = True
with open(path) as fin:
expression_array, reveal_list = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group('lineno')) - 1
assert "Revealed type is" in error_line
marker = reveal_list[lineno]
expression = expression_array[lineno]
_test_reveal(path, expression, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expression: {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
def _test_reveal(
path: str,
expression: str,
reveal: str,
expected_reveal: str,
lineno: int,
) -> None:
"""Error-reporting helper function for `test_reveal`."""
strip_pattern = re.compile(r"(\w+\.)+(\w+)")
stripped_reveal = strip_pattern.sub(strip_func, reveal)
stripped_expected_reveal = strip_pattern.sub(strip_func, expected_reveal)
if stripped_reveal not in stripped_expected_reveal:
raise AssertionError(
_REVEAL_MSG.format(lineno,
expression,
stripped_expected_reveal,
stripped_reveal)
)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path: str) -> None:
"""Validate that the code in `path` properly during runtime."""
path_without_extension, _ = os.path.splitext(path)
dirname, filename = path.split(os.sep)[-2:]
spec = importlib.util.spec_from_file_location(
f"{dirname}.{filename}", path
)
assert spec is not None
assert spec.loader is not None
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
LINENO_MAPPING = {
3: "uint128",
4: "uint256",
6: "int128",
7: "int256",
9: "float80",
10: "float96",
11: "float128",
12: "float256",
14: "complex160",
15: "complex192",
16: "complex256",
17: "complex512",
}
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
def test_extended_precision() -> None:
path = os.path.join(MISC_DIR, "extended_precision.pyi")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
with open(path, "r") as f:
expression_list = f.readlines()
for _msg in output_mypy[path]:
*_, _lineno, msg_typ, msg = _msg.split(":")
msg = _strip_filename(msg)
lineno = int(_lineno)
expression = expression_list[lineno - 1].rstrip("\n")
msg_typ = msg_typ.strip()
assert msg_typ in {"error", "note"}
if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST:
if msg_typ == "error":
raise ValueError(f"Unexpected reveal line format: {lineno}")
else:
marker = FORMAT_DICT[LINENO_MAPPING[lineno]]
_test_reveal(path, expression, marker, msg, lineno)
else:
if msg_typ == "error":
marker = "Module has no attribute"
_test_fail(path, expression, marker, msg, lineno)
|
|
from django.db import models
from djforms.core.models import GENDER_CHOICES
from djforms.core.models import GenericChoice
from djforms.core.models import GenericContact
from djforms.core.models import SEMESTER_CHOICES
from localflavor.us.models import USStateField
from localflavor.us.us_states import STATE_CHOICES
ENTRY_CHOICES = [
('Freshman','Freshman'),
('Transfer','Transfer'),
]
GUARDIAN_CHOICES = (
('Mother', 'Mother'),
('Father', 'Father'),
('Guardian', 'Guardian'),
)
DATEFORMAT = '%A, %B %d, %Y'
def limit_time():
ids = [
g.id for g in GenericChoice.objects.filter(
tags__name__in=['Admissions Visit Time'],
).exclude(active=False).order_by('ranking')
]
return {'id__in':ids}
def limit_format():
ids = [
g.id for g in GenericChoice.objects.filter(
tags__name__in=['Admissions Contact Platform'],
).exclude(active=False).order_by('ranking')
]
return {'id__in':ids}
class VisitDay(models.Model):
title = models.CharField(max_length=255)
slug = models.CharField(
max_length=255, verbose_name='Slug', unique=True,
)
description = models.TextField(
"Description",
help_text="This information will appear above the form.",
)
about = models.TextField(
"About",
help_text="""
This information will appear in the sidebar next to the form.
""",
)
email_info = models.TextField(
"Email Instructions",
help_text="This information will be sent to the registrant.",
null=True,
blank=True,
)
extended = models.BooleanField(
"Extended Form",
default=False,
help_text="""
Check this box if you want the long form with fields for
Educational Background and Plans
""",
)
number_attend = models.BooleanField(
"Number Attending",
default=False,
help_text="""
Check this box if you want field with 'Number Attending'.
""",
)
date_alternate = models.BooleanField(
"Enable an alternate date option",
default=False,
help_text="""
Check this box if you want to allow users to choose
a alternate date for their visit.
""",
)
time_slots = models.BooleanField(
"Enable time slots",
default=False,
help_text="""
Check this box if you want to allow users to choose
time slots for their visit.
""",
)
meeting_format = models.BooleanField(
"Enable meeting format",
default=False,
help_text="""
Check this box if you want to allow users to choose
a format for online meeting.
""",
)
meeting_request = models.BooleanField(
"Enable meeting requests",
default=False,
help_text="""
Check this box if you want to allow users to choose
meeting requests (e.g. coaches, faculty) for their appointment.
""",
)
spanish_rep = models.BooleanField(
"Spanish speaking representative",
default=False,
help_text="""
Check this box if you want to allow users to choose
a Spanish speaking representative for their appointment.
""",
)
def __str__(self):
return self.title
class VisitDayEvent(models.Model):
date = models.DateField()
time = models.CharField(
help_text="Morning or Afternoon or time frame (e.g. 6-8pm)",
max_length=32,
)
max_attendees = models.IntegerField()
cur_attendees = models.IntegerField()
active = models.BooleanField(default=True)
event = models.ForeignKey(VisitDay, on_delete=models.CASCADE)
def __str__(self):
return '{0} ({1})'.format(
str(self.date.strftime(DATEFORMAT)), self.time,
)
class VisitDayBaseProfile(GenericContact):
CC_STATE_CHOICES = list(STATE_CHOICES)
CC_STATE_CHOICES.insert(666, ('', 'International Student'))
date = models.ForeignKey(
VisitDayEvent,
related_name='visitday_date',
on_delete=models.CASCADE,
)
date_alternate = models.ForeignKey(
VisitDayEvent,
related_name='visitday_altdate',
verbose_name="Second choice date",
on_delete=models.CASCADE,
null=True,
blank=True,
)
address = models.CharField(max_length=255, verbose_name="Address")
city = models.CharField(max_length=128, verbose_name="City")
state = USStateField(choices=CC_STATE_CHOICES)
postal_code = models.CharField(max_length=10, verbose_name="Zip Code")
phone = models.CharField(
max_length=12,
verbose_name="Phone Number",
help_text="Format: XXX-XXX-XXXX",
)
mobile = models.CharField(
max_length=12,
verbose_name="Mobile Phone",
help_text="Format: XXX-XXX-XXXX",
null=True,
blank=True,
)
gender = models.CharField(max_length=16, choices=GENDER_CHOICES)
number_attend = models.CharField(
max_length=2,
verbose_name="Number Attending",
null=True,
blank=True,
)
time_primary = models.ForeignKey(
GenericChoice,
verbose_name="Time, First Choice",
related_name='visit_day_time_primary',
limit_choices_to=limit_time,
on_delete=models.CASCADE,
null=True,
blank=True,
)
time_secondary = models.ForeignKey(
GenericChoice,
verbose_name="Time, Second Choice",
related_name='visit_day_time_secondary',
limit_choices_to=limit_time,
on_delete=models.CASCADE,
null=True,
blank=True,
)
meeting_format = models.ForeignKey(
GenericChoice,
verbose_name="Meeting Format",
related_name='visit_day_format',
limit_choices_to=limit_format,
on_delete=models.CASCADE,
null=True,
blank=True,
)
meeting_request = models.ManyToManyField(
GenericChoice,
verbose_name="Meeting Requests",
related_name="visit_day_request",
blank=True,
)
spanish_rep = models.BooleanField(
"I would like to meet with a Spanish speaking admissions representative.",
default=False,
)
def __str__(self):
return '{0} {1}'.format(self.last_name, self.first_name)
class VisitDayProfile(VisitDayBaseProfile):
guardian_email = models.EmailField(null=True, blank=True)
guardian_type = models.CharField(
"Parent/Guardian type", max_length=16, choices=GUARDIAN_CHOICES
)
high_school = models.CharField(
"High School", max_length=255
)
hs_city = models.CharField(
"High School city", max_length=128
)
hs_state = USStateField("High School state")
hs_grad_year = models.IntegerField("High School graduation year")
entry_as = models.CharField(
"Entering as a", max_length=16, choices=ENTRY_CHOICES
)
transfer = models.CharField(
"If transfer, list university/college attended and city/state",
max_length=255, null=True, blank=True
)
entry_year = models.IntegerField("Entry year")
entry_term = models.CharField(
"Entry term", max_length=32, choices=SEMESTER_CHOICES
)
academic = models.TextField(
"Academic interests", null=True, blank=True
)
xtracurricular = models.TextField(
"Extracurricular interests (clubs, fine arts, sports, etc.)",
null=True, blank=True
)
comments = models.TextField(null=True, blank=True)
def __str__(self):
return '{0} {1}'.format(self.last_name, self.first_name)
def event_title(self):
return self.date.event.title
|
|
#! /usr/bin/env python
"""Contains various functions to perform useful queries of the
``acsql`` database.
The available queries are:
1. ``all_filenames``
2. ``filters_for_rootname(rootname)``
3. ``filter_distribution()``
4. ``rootnames_for_target(targname)``
5. ``filenames_for_calibration(calibration_keyword, value)``
6. ``goodmean_for_dataset(dataset)``
7. ``rootnames_with_postflash()``
8. ``non_asn_rootnames()``
9. ``filenames_in_date_rage()``
See each function's docstrings for further details.
Each function returns the ``sqlalchemy.query`` object for the query
performed so that the user may perform the query themselves and
perform additional operations with the query and/or its results.
Authors
-------
- Sara Ogaz
- Matthew Bourque
Use
---
This script is intended to be imported as such:
::
from acsql.database import queries
``queries`` can then be used to perform individual queries, e.g.:
::
query = queries.filter_distribution()
Each function will print the results to the screen, but the user
may also perform the query and handle the results themselves, e.g.:
::
results = query.all()
Dependencies
------------
External library dependencies include:
- ``acsql``
- ``sqlalchemy``
"""
from sqlalchemy import and_
from sqlalchemy import exists
from sqlalchemy import func
from acsql.database.database_interface import session
from acsql.database.database_interface import Master
from acsql.database.database_interface import Datasets
from acsql.database.database_interface import WFC_asn_0
from acsql.database.database_interface import WFC_raw_0
from acsql.database.database_interface import WFC_flt_1
from acsql.database.database_interface import WFC_flt_4
def all_filenames(dataset):
"""Queries for all filenames that exist for the given dataset.
Parameters
----------
dataset : str
Any portion of (or entire) rootname (e.g. 'jd2615qi', or
'jd2615').
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(Datasets)\
.filter(Datasets.rootname.like('{}%'.format(dataset)))
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
results_dict = result.__dict__
del results_dict['_sa_instance_state']
print(results_dict)
return query
def filters_for_rootname(rootname):
"""Queries for the FILTER1/FILTER2 combination for the geven
observation.
Parameters
----------
rootname : str
The rootname to query by.
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(WFC_raw_0.filter1, WFC_raw_0.filter2)\
.filter(WFC_raw_0.rootname == rootname)
query_results = query.one()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
print('{}: {}'.format(rootname, query_results))
return query
def filter_distribution():
"""Queries for the FILTER1/FILTER2 combination for the given
observation.
Parameters
----------
rootname : str
The rootname to query by.
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(WFC_raw_0.filter1, WFC_raw_0.filter2,
func.count(WFC_raw_0.filter1))\
.group_by(WFC_raw_0.filter1, WFC_raw_0.filter2)
query_results = query.all()
db_count = session.query(WFC_raw_0).count()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
perc_used = round((result[2] / db_count) * 100., 2)
print('\t{}/{}: {}%'.format(result[0], result[1], perc_used))
return query
def rootnames_for_target(targname):
"""Queries for the rootname and filename for a given target.
Parameters
----------
targname : str
The target name (e.g. 'NGC104')
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(WFC_raw_0.rootname, WFC_raw_0.filename,
WFC_raw_0.targname)\
.filter(WFC_raw_0.targname == targname)
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
print(result)
return query
def filenames_for_calibration(calibration_keyword, value):
"""Queries for the filenames that used a given calibration mode.
The 'calibration' mode is defined by the type of calibration and
the calibration reference file used
(e.g. 'BIASFILE = jref$06u15056j_bia.fits')
Parameters
----------
calibration_keyword : str
The calibration file to query on (e.g. DARKFILE, BIASFILE)
value : str
The calibration file value (e.g. jref$06u15056j_bia.fits)
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
calibration_keyword_obj = getattr(WFC_raw_0, calibration_keyword)
query = session.query(WFC_raw_0.filename)\
.filter(calibration_keyword_obj == value)
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
print(result[0])
return query
def goodmean_for_dataset(dataset):
"""Queries for the GOODMEAN values for a given dataset
The GOODMEAN describes the mean of all 'good' (i.e. non-flagged)
pixels in the image.
Parameters
----------
dataset : str
Any portion of (or entire) rootname (e.g. 'jd2615qi', or
'jd2615').
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(Master.rootname, WFC_flt_1.goodmean,
WFC_flt_4.goodmean)\
.join(WFC_flt_1)\
.join(WFC_flt_4)\
.filter(Master.rootname.like('{}%'.format(dataset)))
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
print(result)
return query
def rootnames_with_postflash():
"""Queries for rootnames and FLASHDURs for non-DARK observations
that have a FLASHDUR > 0.
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(Master.rootname, WFC_raw_0.flashdur)\
.join(WFC_raw_0)\
.filter(WFC_raw_0.flashdur > 0)\
.filter(WFC_raw_0.targname != 'DARK')
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
print(result)
return query
def non_asn_rootnames():
"""Queries for rootnames that are not part of an association.
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(Master.rootname)\
.filter(~exists().where(and_(Master.rootname == WFC_asn_0.rootname)))
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
print(result[0])
return query
def filenames_in_date_range(begin_date, end_date):
"""Queries for filenames for observations that occur between the
``begin_date`` and ``end_date``.
Parameters
----------
begin_date : str
The start of the date range (in the format YYYY-MM-DD).
end_date : str
The end of the date range (in the format YYYY-MM-DD).
Returns
-------
query : obj
The query object that contains attributes and methods for
performing the query.
"""
query = session.query(WFC_raw_0.filename)\
.filter(WFC_raw_0.date_obs >= begin_date)\
.filter(WFC_raw_0.date_obs <= end_date)
query_results = query.all()
print('\nQuery performed:\n\n{}\n'.format(str(query)))
for result in query_results:
print(result[0])
return query
|
|
"""The LBFGS attack
"""
import numpy as np
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans.compat import reduce_sum, softmax_cross_entropy_with_logits
from cleverhans.model import CallableModelWrapper, Model, wrapper_warning
from cleverhans import utils
from cleverhans import utils_tf
_logger = utils.create_logger("cleverhans.attacks.lbfgs")
tf_dtype = tf.as_dtype("float32")
class LBFGS(Attack):
"""
LBFGS is the first adversarial attack for convolutional neural networks,
and is a target & iterative attack.
Paper link: "https://arxiv.org/pdf/1312.6199.pdf"
:param model: cleverhans.model.Model
:param sess: tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess, dtypestr="float32", **kwargs):
if not isinstance(model, Model):
wrapper_warning()
model = CallableModelWrapper(model, "probs")
super(LBFGS, self).__init__(model, sess, dtypestr, **kwargs)
self.feedable_kwargs = ("y_target",)
self.structural_kwargs = [
"batch_size",
"binary_search_steps",
"max_iterations",
"initial_const",
"clip_min",
"clip_max",
]
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert (
self.sess is not None
), "Cannot use `generate` when no `sess` was provided"
self.parse_params(**kwargs)
if self.y_target is None:
self.y_target, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False
else:
_, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(
self.sess,
x,
self.model.get_logits(x),
self.y_target,
self.targeted_attack,
self.binary_search_steps,
self.max_iterations,
self.initial_const,
self.clip_min,
self.clip_max,
nb_classes,
self.batch_size,
)
def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
def parse_params(
self,
y_target=None,
batch_size=1,
binary_search_steps=5,
max_iterations=1000,
initial_const=1e-2,
clip_min=0,
clip_max=1,
):
"""
:param y_target: (optional) A tensor with the one-hot target labels.
:param batch_size: The number of inputs to include in a batch and
process simultaneously.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and cross-entropy loss of the classification.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
self.y_target = y_target
self.batch_size = batch_size
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
class LBFGS_impl(object):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sess: a TF session.
:param x: A tensor with the inputs.
:param logits: A tensor with model's output logits.
:param targeted_label: A tensor with the target labels.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the purturbation
and cross-entropy loss of the classification.
:param clip_min: Minimum input component value
:param clip_max: Maximum input component value
:param num_labels: The number of classes in the model's output.
:param batch_size: Number of attacks to run simultaneously.
"""
def __init__(
self,
sess,
x,
logits,
targeted_label,
targeted_attack,
binary_search_steps,
max_iterations,
initial_const,
clip_min,
clip_max,
nb_classes,
batch_size,
):
self.sess = sess
self.x = x
self.logits = logits
assert logits.op.type != "Softmax"
self.targeted_label = targeted_label
self.targeted_attack = targeted_attack
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
self.batch_size = batch_size
self.repeat = self.binary_search_steps >= 10
self.shape = tuple([self.batch_size] + list(self.x.get_shape().as_list()[1:]))
self.ori_img = tf.Variable(np.zeros(self.shape), dtype=tf_dtype, name="ori_img")
self.const = tf.Variable(
np.zeros(self.batch_size), dtype=tf_dtype, name="const"
)
self.score = softmax_cross_entropy_with_logits(
labels=self.targeted_label, logits=self.logits
)
self.l2dist = reduce_sum(tf.square(self.x - self.ori_img))
# small self.const will result small adversarial perturbation
# targeted attack aims at minimize loss against target label
# untargeted attack aims at maximize loss against True label
if self.targeted_attack:
self.loss = reduce_sum(self.score * self.const) + self.l2dist
else:
self.loss = -reduce_sum(self.score * self.const) + self.l2dist
(self.grad,) = tf.gradients(self.loss, self.x)
def attack(self, x_val, targets):
"""
Perform the attack on the given instance for the given targets.
"""
def lbfgs_objective(adv_x, self, targets, oimgs, CONST):
""" returns the function value and the gradient for fmin_l_bfgs_b """
loss = self.sess.run(
self.loss,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST,
},
)
grad = self.sess.run(
self.grad,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST,
},
)
return loss, grad.flatten().astype(float)
def attack_success(out, target, targeted_attack):
""" returns attack result """
if targeted_attack:
return out == target
else:
return out != target
# begin the main part for the attack
from scipy.optimize import fmin_l_bfgs_b
oimgs = np.clip(x_val, self.clip_min, self.clip_max)
CONST = np.ones(self.batch_size) * self.initial_const
# set the lower and upper bounds accordingly
lower_bound = np.zeros(self.batch_size)
upper_bound = np.ones(self.batch_size) * 1e10
# set the box constraints for the optimization function
clip_min = self.clip_min * np.ones(oimgs.shape[:])
clip_max = self.clip_max * np.ones(oimgs.shape[:])
clip_bound = list(zip(clip_min.flatten(), clip_max.flatten()))
# placeholders for the best l2 and instance attack found so far
o_bestl2 = [1e10] * self.batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.binary_search_steps):
_logger.debug(
" Binary search step %s of %s", outer_step, self.binary_search_steps
)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.binary_search_steps - 1:
CONST = upper_bound
# optimization function
adv_x, _, __ = fmin_l_bfgs_b(
lbfgs_objective,
oimgs.flatten().astype(float),
args=(self, targets, oimgs, CONST),
bounds=clip_bound,
maxiter=self.max_iterations,
iprint=0,
)
adv_x = adv_x.reshape(oimgs.shape)
assert (
np.amax(adv_x) <= self.clip_max and np.amin(adv_x) >= self.clip_min
), "fmin_l_bfgs_b returns are invalid"
# adjust the best result (i.e., the adversarial example with the
# smallest perturbation in terms of L_2 norm) found so far
preds = np.atleast_1d(
utils_tf.model_argmax(self.sess, self.x, self.logits, adv_x)
)
_logger.debug("predicted labels are %s", preds)
l2s = np.zeros(self.batch_size)
for i in range(self.batch_size):
l2s[i] = np.sum(np.square(adv_x[i] - oimgs[i]))
for e, (l2, pred, ii) in enumerate(zip(l2s, preds, adv_x)):
if l2 < o_bestl2[e] and attack_success(
pred, np.argmax(targets[e]), self.targeted_attack
):
o_bestl2[e] = l2
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(self.batch_size):
if attack_success(
preds[e], np.argmax(targets[e]), self.targeted_attack
):
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(
" Successfully generated adversarial examples "
"on %s of %s instances.",
sum(upper_bound < 1e9),
self.batch_size,
)
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack
|
|
#!/usr/bin/env python
# --------------
# USER INSTRUCTIONS
#
# Now you will put everything together.
#
# First make sure that your sense and move functions
# work as expected for the test cases provided at the
# bottom of the previous two programming assignments.
# Once you are satisfied, copy your sense and move
# definitions into the robot class on this page, BUT
# now include noise.
#
# A good way to include noise in the sense step is to
# add Gaussian noise, centered at zero with variance
# of self.bearing_noise to each bearing. You can do this
# with the command random.gauss(0, self.bearing_noise)
#
# In the move step, you should make sure that your
# actual steering angle is chosen from a Gaussian
# distribution of steering angles. This distribution
# should be centered at the intended steering angle
# with variance of self.steering_noise.
#
# Feel free to use the included set_noise function.
#
# Please do not modify anything except where indicated
# below.
from math import *
import random
# --------
#
# some top level parameters
#
max_steering_angle = pi / 4.0 # You do not need to use this value, but keep in mind the limitations of a real car.
bearing_noise = 0.1 # Noise parameter: should be included in sense function.
steering_noise = 0.1 # Noise parameter: should be included in move function.
distance_noise = 5.0 # Noise parameter: should be included in move function.
tolerance_xy = 15.0 # Tolerance for localization in the x and y directions.
tolerance_orientation = 0.25 # Tolerance for orientation.
# --------
#
# the "world" has 4 landmarks.
# the robot's initial coordinates are somewhere in the square
# represented by the landmarks.
#
# NOTE: Landmark coordinates are given in (y, x) form and NOT
# in the traditional (x, y) format!
landmarks = [[0.0, 100.0], [0.0, 0.0], [100.0, 0.0], [100.0, 100.0]] # position of 4 landmarks in (y, x) format.
world_size = 100.0 # world is NOT cyclic. Robot is allowed to travel "out of bounds"
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation
#
def __init__(self, length = 20.0):
self.x = random.random() * world_size # initial x position
self.y = random.random() * world_size # initial y position
self.orientation = random.random() * 2.0 * pi # initial orientation
self.length = length # length of robot
self.bearing_noise = 0.0 # initialize bearing noise to zero
self.steering_noise = 0.0 # initialize steering noise to zero
self.distance_noise = 0.0 # initialize distance noise to zero
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_b_noise, new_s_noise, new_d_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.bearing_noise = float(new_b_noise)
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
# --------
# measurement_prob
# computes the probability of a measurement
#
def measurement_prob(self, measurements):
# calculate the correct measurement
predicted_measurements = self.sense(0) # Our sense function took 0 as an argument to switch off noise.
# compute errors
error = 1.0
for i in range(len(measurements)):
error_bearing = abs(measurements[i] - predicted_measurements[i])
error_bearing = (error_bearing + pi) % (2.0 * pi) - pi # truncate
# update Gaussian
error *= (exp(- (error_bearing ** 2) / (self.bearing_noise ** 2) / 2.0) /
sqrt(2.0 * pi * (self.bearing_noise ** 2)))
return error
def __repr__(self): #allows us to print robot attributes.
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y),
str(self.orientation))
def move(self, motion, tolerance = 0.001):
steering = motion[0]
distance = motion[1]
if abs(steering) > max_steering_angle:
raise ValueError, 'Exceeding max steering angle'
if distance < 0.0:
raise ValueError, 'Moving backwards is not valid'
#Make a new copy
res = robot()
res.length = self.length
res.bearing_noise = self.bearing_noise
res.steering_noise = self.steering_noise
res.distance_noise = self.distance_noise
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# execute motion
turn = tan(steering2) * distance2 / res.length
if abs(turn) < tolerance:
# approxiame by straight line motion
res.x = self.x + (distance2 * cos(self.orientation))
res.y = self.y + (distance2 * sin(self.orientation))
res.orientation = (self.orientation + turn) % (2.0 * pi) #limit orientation between 0 and 2PI
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (sin(self.orientation) * radius) # center of rotation - bicycle model
cy = self.y + (cos(self.orientation) * radius)
res.orientation = (self.orientation + turn) % (2.0 * pi)
res.x = cx + (sin(res.orientation) * radius)
res.y = cy - (cos(res.orientation) * radius)
return res
def sense(self, add_noise = 1):
Z = []
for i in range(len(landmarks)):
bearing = atan2(landmarks[i][0] - self.y, landmarks[i][1] - self.x) - self.orientation
if add_noise:
bearing += random.gauss(0.0, self.bearing_noise)
bearing %= 2.0*pi
Z.append(bearing)
return Z
# --------
#
# extract position from a particle set
#
def get_position(p):
x = 0.0
y = 0.0
orientation = 0.0
for i in range(len(p)):
x += p[i].x
y += p[i].y
# orientation is tricky because it is cyclic. By normalizing
# around the first particle we are somewhat more robust to
# the 0=2pi problem
orientation += (((p[i].orientation - p[0].orientation + pi) % (2.0 * pi))
+ p[0].orientation - pi)
return [x / len(p), y / len(p), orientation / len(p)]
# --------
#
# The following code generates the measurements vector
# You can use it to develop your solution.
#
def generate_ground_truth(motions):
myrobot = robot()
myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
Z = []
T = len(motions)
for t in range(T):
myrobot = myrobot.move(motions[t])
Z.append(myrobot.sense())
#print 'Robot: ', myrobot
return [myrobot, Z]
# --------
#
# The following code prints the measurements associated
# with generate_ground_truth
#
def print_measurements(Z):
T = len(Z)
print 'measurements = [[%.8s, %.8s, %.8s, %.8s],' % \
(str(Z[0][0]), str(Z[0][1]), str(Z[0][2]), str(Z[0][3]))
for t in range(1,T-1):
print ' [%.8s, %.8s, %.8s, %.8s],' % \
(str(Z[t][0]), str(Z[t][1]), str(Z[t][2]), str(Z[t][3]))
print ' [%.8s, %.8s, %.8s, %.8s]]' % \
(str(Z[T-1][0]), str(Z[T-1][1]), str(Z[T-1][2]), str(Z[T-1][3]))
# --------
#
# The following code checks to see if your particle filter
# localizes the robot to within the desired tolerances
# of the true position. The tolerances are defined at the top.
#
def check_output(final_robot, estimated_position):
error_x = abs(final_robot.x - estimated_position[0])
error_y = abs(final_robot.y - estimated_position[1])
error_orientation = abs(final_robot.orientation - estimated_position[2])
error_orientation = (error_orientation + pi) % (2.0 * pi) - pi
correct = error_x < tolerance_xy and error_y < tolerance_xy \
and error_orientation < tolerance_orientation
return correct
def particle_filter(motions, measurements, N=500): # I know it's tempting, but don't change N!
# --------
#
# Make particles
#
p = []
for i in range(N):
r = robot()
r.set_noise(bearing_noise, steering_noise, distance_noise)
p.append(r)
# --------
#
# Update particles
#
for t in range(len(motions)):
# motion update (prediction)
p2 = []
for i in range(N):
p2.append(p[i].move(motions[t]))
p = p2
# measurement update
w = []
for i in range(N):
w.append(p[i].measurement_prob(measurements[t]))
# resampling
p3 = []
index = int(random.random() * N)
beta = 0.0
mw = max(w)
for i in range(N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % N
p3.append(p[index])
p = p3
return get_position(p)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out.
##
## You can test whether your particle filter works using the
## function check_output (see test case 2). We will be using a similar
## function. Note: Even for a well-implemented particle filter this
## function occasionally returns False. This is because a particle
## filter is a randomized algorithm. We will be testing your code
## multiple times. Make sure check_output returns True at least 80%
## of the time.
## --------
## TEST CASES:
##
##1) Calling the particle_filter function with the following
## motions and measurements should return a [x,y,orientation]
## vector near [x=93.476 y=75.186 orient=5.2664], that is, the
## robot's true location.
##
##motions = [[2. * pi / 10, 20.] for row in range(8)]
##measurements = [[4.746936, 3.859782, 3.045217, 2.045506],
## [3.510067, 2.916300, 2.146394, 1.598332],
## [2.972469, 2.407489, 1.588474, 1.611094],
## [1.906178, 1.193329, 0.619356, 0.807930],
## [1.352825, 0.662233, 0.144927, 0.799090],
## [0.856150, 0.214590, 5.651497, 1.062401],
## [0.194460, 5.660382, 4.761072, 2.471682],
## [5.717342, 4.736780, 3.909599, 2.342536]]
##print particle_filter(motions, measurements)
## 2) You can generate your own test cases by generating
## measurements using the generate_ground_truth function.
## It will print the robot's last location when calling it.
##
##
number_of_iterations = 6
motions = [[2. * pi / 20, 12.] for row in range(number_of_iterations)]
x = generate_ground_truth(motions)
final_robot = x[0]
measurements = x[1]
estimated_position = particle_filter(motions, measurements)
print_measurements(measurements)
print 'Ground truth: ', final_robot
print 'Particle filter: ', estimated_position
print 'Code check: ', check_output(final_robot, estimated_position)
|
|
"""Provide infrastructure to allow exploration of variations within populations.
Uses the gemini framework (https://github.com/arq5x/gemini) to build SQLite
database of variations for query and evaluation.
"""
import collections
import csv
import os
import toolz as tz
from bcbio import install, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import multiallelic, vcfanno, vcfutils
def prep_gemini_db(fnames, call_info, samples, extras):
"""Prepare a gemini database from VCF inputs prepared with snpEff.
"""
data = samples[0]
use_gemini = do_db_build(samples) and any(vcfutils.vcf_has_variants(f) for f in fnames)
name, caller, is_batch = call_info
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
gemini_vcf = get_multisample_vcf(fnames, name, caller, data)
if use_gemini:
passonly = all("gemini_allvariants" not in dd.get_tools_on(d) for d in samples)
gemini_vcf = multiallelic.to_single(gemini_vcf, data, passonly=passonly)
ann_vcf = _run_vcfanno(gemini_vcf, data, use_gemini)
gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller))
if vcfutils.vcf_has_variants(gemini_vcf):
if not utils.file_exists(gemini_db) and use_gemini:
ped_file = create_ped_file(samples + extras, gemini_vcf)
# Use original approach for hg19/GRCh37 pending additional testing
if support_gemini_orig(data) and not any(dd.get_vcfanno(d) for d in samples):
gemini_db = create_gemini_db_orig(gemini_vcf, data, gemini_db, ped_file)
elif ann_vcf:
gemini_db = create_gemini_db(ann_vcf, data, gemini_db, ped_file)
return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None,
"vcf": ann_vcf or gemini_vcf,
"decomposed": use_gemini}]]
def _run_vcfanno(gemini_vcf, data, use_gemini=False):
data_basepath = install.get_gemini_dir(data) if support_gemini_orig(data) else None
conf_files = dd.get_vcfanno(data)
if not conf_files and use_gemini:
conf_files = ["gemini"]
if conf_files:
return vcfanno.run_vcfanno(gemini_vcf, conf_files, data, data_basepath)
else:
return gemini_vcf
def create_gemini_db(gemini_vcf, data, gemini_db=None, ped_file=None):
"""Generalized vcfanno/vcf2db workflow for loading variants into a GEMINI database.
"""
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
if not utils.file_exists(gemini_db):
with file_transaction(data, gemini_db) as tx_gemini_db:
vcf2db = config_utils.get_program("vcf2db.py", data)
if "vcf2db_expand" in dd.get_tools_on(data):
vcf2db_args = ["--expand", "gt_types", "--expand", "gt_ref_depths", "--expand", "gt_alt_depths"]
else:
vcf2db_args = []
cmd = [vcf2db, gemini_vcf, ped_file, tx_gemini_db] + vcf2db_args
do.run(cmd, "GEMINI: create database with vcf2db")
return gemini_db
def create_gemini_db_orig(gemini_vcf, data, gemini_db=None, ped_file=None):
"""Original GEMINI specific data loader, only works with hg19/GRCh37.
"""
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not utils.file_exists(gemini_db):
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
load_opts = ""
if "gemini_allvariants" not in dd.get_tools_on(data):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
gemini_dir = install.get_gemini_dir(data)
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
tmpdir = os.path.dirname(tx_gemini_db)
eanns = _get_effects_flag(data)
# Apply custom resource specifications, allowing use of alternative annotation_dir
resources = config_utils.get_resources("gemini", data["config"])
gemini_opts = " ".join([str(x) for x in resources["options"]]) if resources.get("options") else ""
exports = utils.local_path_export()
cmd = ("{exports} {gemini} {gemini_opts} load {load_opts} "
"-v {gemini_vcf} {eanns} --cores {num_cores} "
"--tempdir {tmpdir} {tx_gemini_db}")
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s" % gemini_vcf, data)
if ped_file:
cmd = [gemini, "amend", "--sample", ped_file, tx_gemini_db]
do.run(cmd, "Add PED file to gemini database", data)
return gemini_db
def _get_effects_flag(data):
effects_config = tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
if effects_config == "snpeff":
return "-t snpEff"
elif effects_config == "vep":
return "-t VEP"
else:
return ""
def get_affected_status(data):
"""Retrieve the affected/unaffected status of sample.
Uses unaffected (1), affected (2), unknown (0) coding from PED files:
http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
"""
affected = set(["tumor", "affected", "2"])
unaffected = set(["normal", "unaffected", "1"])
phenotype = str(tz.get_in(["metadata", "phenotype"], data, "")).lower()
if dd.get_svclass(data) == "control":
return 1
elif phenotype in affected:
return 2
elif phenotype in unaffected:
return 1
else:
return 0
def get_gender(data):
"""Retrieve gender from metadata, codified as male/female/unknown.
"""
g = str(dd.get_gender(data))
if g and str(g).lower() in ["male", "m", "1"]:
return "male"
elif g and str(g).lower() in ["female", "f", "2"]:
return "female"
else:
return "unknown"
def create_ped_file(samples, base_vcf):
"""Create a GEMINI-compatible PED file, including gender, family and phenotype information.
Checks for a specified `ped` file in metadata, and will use sample information from this file
before reconstituting from metadata information.
"""
out_file = "%s.ped" % utils.splitext_plus(base_vcf)[0]
sample_ped_lines = {}
header = ["#Family_ID", "Individual_ID", "Paternal_ID", "Maternal_ID", "Sex", "Phenotype", "Ethnicity"]
for md_ped in list(set([x for x in [tz.get_in(["metadata", "ped"], data)
for data in samples] if x is not None])):
with open(md_ped) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for parts in reader:
if parts[0].startswith("#") and len(parts) > len(header):
header = header + parts[len(header):]
else:
sample_ped_lines[parts[1]] = parts
if not utils.file_exists(out_file):
with file_transaction(samples[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(header)
batch = _find_shared_batch(samples)
for data in samples:
gender = {"male": 1, "female": 2, "unknown": 0}.get(get_gender(data))
sname = dd.get_sample_name(data)
if sname in sample_ped_lines:
writer.writerow(sample_ped_lines[sname])
else:
writer.writerow([batch, sname, "-9", "-9",
gender, get_affected_status(data), "-9"])
return out_file
def _find_shared_batch(samples):
for data in samples:
batch = tz.get_in(["metadata", "batch"], data, dd.get_sample_name(data))
if not isinstance(batch, (list, tuple)):
return batch
def _is_small_vcf(vcf_file):
"""Check for small VCFs which we want to analyze quicker.
"""
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
count += 1
if count > small_thresh:
return False
return True
def get_multisample_vcf(fnames, name, caller, data):
"""Retrieve a multiple sample VCF file in a standard location.
Handles inputs with multiple repeated input files from batches.
"""
unique_fnames = []
for f in fnames:
if f not in unique_fnames:
unique_fnames.append(f)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
if len(unique_fnames) > 1:
gemini_vcf = os.path.join(out_dir, "%s-%s.vcf.gz" % (name, caller))
vrn_file_batch = None
for variant in data.get("variants", []):
if variant["variantcaller"] == caller and variant.get("vrn_file_batch"):
vrn_file_batch = variant["vrn_file_batch"]
if vrn_file_batch:
utils.symlink_plus(vrn_file_batch, gemini_vcf)
return gemini_vcf
else:
return vcfutils.merge_variant_files(unique_fnames, gemini_vcf, dd.get_ref_file(data),
data["config"])
else:
gemini_vcf = os.path.join(out_dir, "%s-%s%s" % (name, caller, utils.splitext_plus(unique_fnames[0])[1]))
utils.symlink_plus(unique_fnames[0], gemini_vcf)
return gemini_vcf
def has_gemini_data(data):
"""Use gemini if we installed required data for hg19, hg38.
Other organisms don't have special data targets.
"""
if support_gemini_orig(data, all_human=True):
from bcbio import install
return "gemini" in install.get_defaults().get("datatarget", [])
else:
return True
def do_db_build(samples, need_bam=True):
"""Confirm we should build a gemini database: need gemini and not in tools_off.
"""
genomes = set()
for data in samples:
if not need_bam or data.get("align_bam") or _has_precalled(data):
genomes.add(data["genome_build"])
if "gemini" in dd.get_tools_off(data):
return False
if len(genomes) == 1:
return has_gemini_data(samples[0])
else:
return False
def support_gemini_orig(data, all_human=False):
support_gemini = ["hg19", "GRCh37"]
if all_human:
support_gemini += ["hg38"]
return dd.get_genome_build(data) in set(support_gemini)
def get_gemini_files(data):
"""Enumerate available gemini data files in a standard installation.
"""
try:
from gemini import annotations, config
except ImportError:
return {}
return {"base": config.read_gemini_config()["annotation_dir"],
"files": annotations.get_anno_files().values()}
def _group_by_batches(samples, check_fn):
"""Group data items into batches, providing details to retrieve results.
"""
batch_groups = collections.defaultdict(list)
singles = []
out_retrieve = []
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch = tz.get_in(["metadata", "batch"], data)
name = str(dd.get_sample_name(data))
if batch:
out_retrieve.append((str(batch), data))
else:
out_retrieve.append((name, data))
for vrn in data["variants"]:
if vrn.get("population", True):
if batch:
batch_groups[(str(batch), vrn["variantcaller"])].append((vrn["vrn_file"], data))
else:
singles.append((name, vrn["variantcaller"], data, vrn["vrn_file"]))
else:
extras.append(data)
return batch_groups, singles, out_retrieve, extras
def _has_precalled(data):
return any(v.get("variantcaller") in ["precalled"] for v in data.get("variants", []))
def _has_variant_calls(data):
for vrn in data["variants"]:
if (vrn.get("vrn_file") and vcfutils.vcf_has_variants(vrn["vrn_file"]) and
(_has_precalled(data) or data.get("align_bam"))):
return True
return False
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.items():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
if (len(samples) > 0 and not do_db_build([x[0] for x in samples])
and not has_batches and not any(dd.get_vcfanno(x[0] for x in samples))):
return samples
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out
|
|
"""Philips Hue lights platform tests."""
import asyncio
from collections import deque
import logging
from unittest.mock import Mock
import aiohue
from aiohue.lights import Lights
from aiohue.groups import Groups
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.components.hue import light as hue_light
from homeassistant.util import color
_LOGGER = logging.getLogger(__name__)
HUE_LIGHT_NS = "homeassistant.components.light.hue."
GROUP_RESPONSE = {
"1": {
"name": "Group 1",
"lights": ["1", "2"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 254,
"hue": 10000,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
"2": {
"name": "Group 2",
"lights": ["3", "4", "5"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 153,
"hue": 4345,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
}
LIGHT_1_ON = {
"state": {
"on": True,
"bri": 144,
"hue": 13088,
"sat": 212,
"xy": [0.5128, 0.4147],
"ct": 467,
"alert": "none",
"effect": "none",
"colormode": "xy",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": "Extended color light",
"name": "Hue Lamp 1",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "456",
}
LIGHT_1_OFF = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "xy",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": "Extended color light",
"name": "Hue Lamp 1",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "456",
}
LIGHT_2_OFF = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": "Extended color light",
"name": "Hue Lamp 2",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
LIGHT_2_ON = {
"state": {
"on": True,
"bri": 100,
"hue": 13088,
"sat": 210,
"xy": [0.5, 0.4],
"ct": 420,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": "Extended color light",
"name": "Hue Lamp 2 new",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
LIGHT_RESPONSE = {"1": LIGHT_1_ON, "2": LIGHT_2_OFF}
LIGHT_RAW = {
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"swversion": "66009461",
}
LIGHT_GAMUT = color.GamutType(
color.XYPoint(0.704, 0.296),
color.XYPoint(0.2151, 0.7106),
color.XYPoint(0.138, 0.08),
)
LIGHT_GAMUT_TYPE = "A"
@pytest.fixture
def mock_bridge(hass):
"""Mock a Hue bridge."""
bridge = Mock(
available=True,
allow_unreachable=False,
allow_groups=False,
api=Mock(),
spec=hue.HueBridge,
)
bridge.mock_requests = []
# We're using a deque so we can schedule multiple responses
# and also means that `popleft()` will blow up if we get more updates
# than expected.
bridge.mock_light_responses = deque()
bridge.mock_group_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
bridge.mock_requests.append(kwargs)
if path == "lights":
return bridge.mock_light_responses.popleft()
if path == "groups":
return bridge.mock_group_responses.popleft()
return None
bridge.api.config.apiversion = "9.9.9"
bridge.api.lights = Lights({}, mock_request)
bridge.api.groups = Groups({}, mock_request)
return bridge
async def setup_bridge(hass, mock_bridge):
"""Load the Hue light platform with the provided bridge."""
hass.config.components.add(hue.DOMAIN)
hass.data[hue.DOMAIN] = {"mock-host": mock_bridge}
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": "mock-host"},
"test",
config_entries.CONN_CLASS_LOCAL_POLL,
system_options={},
)
await hass.config_entries.async_forward_entry_setup(config_entry, "light")
# To flush out the service call to update the group
await hass.async_block_till_done()
async def test_not_load_groups_if_old_bridge(hass, mock_bridge):
"""Test that we don't try to load gorups if bridge runs old software."""
mock_bridge.api.config.apiversion = "1.12.0"
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 0
async def test_no_lights_or_groups(hass, mock_bridge):
"""Test the update_lights function when no lights are found."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append({})
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 0
async def test_lights(hass, mock_bridge):
"""Test the update_lights function with some lights."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 1 All Lights group, 2 lights
assert len(hass.states.async_all()) == 3
lamp_1 = hass.states.get("light.hue_lamp_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 144
assert lamp_1.attributes["hs_color"] == (36.067, 69.804)
lamp_2 = hass.states.get("light.hue_lamp_2")
assert lamp_2 is not None
assert lamp_2.state == "off"
async def test_lights_color_mode(hass, mock_bridge):
"""Test that lights only report appropriate color mode."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
lamp_1 = hass.states.get("light.hue_lamp_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 144
assert lamp_1.attributes["hs_color"] == (36.067, 69.804)
assert "color_temp" not in lamp_1.attributes
new_light1_on = LIGHT_1_ON.copy()
new_light1_on["state"] = new_light1_on["state"].copy()
new_light1_on["state"]["colormode"] = "ct"
mock_bridge.mock_light_responses.append({"1": new_light1_on})
mock_bridge.mock_group_responses.append({})
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_2"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
lamp_1 = hass.states.get("light.hue_lamp_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 144
assert lamp_1.attributes["color_temp"] == 467
assert "hs_color" not in lamp_1.attributes
async def test_groups(hass, mock_bridge):
"""Test the update_lights function with some lights."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
# 1 all lights group, 2 hue group lights
assert len(hass.states.async_all()) == 3
lamp_1 = hass.states.get("light.group_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 254
assert lamp_1.attributes["color_temp"] == 250
lamp_2 = hass.states.get("light.group_2")
assert lamp_2 is not None
assert lamp_2.state == "on"
async def test_new_group_discovered(hass, mock_bridge):
"""Test if 2nd update has a new group."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 3
new_group_response = dict(GROUP_RESPONSE)
new_group_response["3"] = {
"name": "Group 3",
"lights": ["3", "4", "5"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 153,
"hue": 4345,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
}
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(new_group_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.group_1"}, blocking=True
)
# 2x group update, 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 5
assert len(hass.states.async_all()) == 4
new_group = hass.states.get("light.group_3")
assert new_group is not None
assert new_group.state == "on"
assert new_group.attributes["brightness"] == 153
assert new_group.attributes["color_temp"] == 250
async def test_new_light_discovered(hass, mock_bridge):
"""Test if 2nd update has a new light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 3
new_light_response = dict(LIGHT_RESPONSE)
new_light_response["3"] = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": "Extended color light",
"name": "Hue Lamp 3",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "789",
}
mock_bridge.mock_light_responses.append(new_light_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 4
light = hass.states.get("light.hue_lamp_3")
assert light is not None
assert light.state == "off"
async def test_group_removed(hass, mock_bridge):
"""Test if 2nd update has removed group."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 3
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append({"1": GROUP_RESPONSE["1"]})
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.group_1"}, blocking=True
)
# 2x group update, 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 5
assert len(hass.states.async_all()) == 2
group = hass.states.get("light.group_1")
assert group is not None
removed_group = hass.states.get("light.group_2")
assert removed_group is None
async def test_light_removed(hass, mock_bridge):
"""Test if 2nd update has removed light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 3
mock_bridge.mock_light_responses.clear()
mock_bridge.mock_light_responses.append({"1": LIGHT_RESPONSE.get("1")})
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 2
light = hass.states.get("light.hue_lamp_1")
assert light is not None
removed_light = hass.states.get("light.hue_lamp_2")
assert removed_light is None
async def test_other_group_update(hass, mock_bridge):
"""Test changing one group that will impact the state of other light."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 3
group_2 = hass.states.get("light.group_2")
assert group_2 is not None
assert group_2.name == "Group 2"
assert group_2.state == "on"
assert group_2.attributes["brightness"] == 153
assert group_2.attributes["color_temp"] == 250
updated_group_response = dict(GROUP_RESPONSE)
updated_group_response["2"] = {
"name": "Group 2 new",
"lights": ["3", "4", "5"],
"type": "LightGroup",
"action": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"effect": "none",
"xy": [0, 0],
"ct": 0,
"alert": "none",
"colormode": "ct",
},
"state": {"any_on": False, "all_on": False},
}
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(updated_group_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.group_1"}, blocking=True
)
# 2x group update, 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 5
assert len(hass.states.async_all()) == 3
group_2 = hass.states.get("light.group_2")
assert group_2 is not None
assert group_2.name == "Group 2 new"
assert group_2.state == "off"
async def test_other_light_update(hass, mock_bridge):
"""Test changing one light that will impact state of other light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 3
lamp_2 = hass.states.get("light.hue_lamp_2")
assert lamp_2 is not None
assert lamp_2.name == "Hue Lamp 2"
assert lamp_2.state == "off"
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response["2"] = {
"state": {
"on": True,
"bri": 100,
"hue": 13088,
"sat": 210,
"xy": [0.5, 0.4],
"ct": 420,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": "Extended color light",
"name": "Hue Lamp 2 new",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
mock_bridge.mock_light_responses.append(updated_light_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 3
lamp_2 = hass.states.get("light.hue_lamp_2")
assert lamp_2 is not None
assert lamp_2.name == "Hue Lamp 2 new"
assert lamp_2.state == "on"
assert lamp_2.attributes["brightness"] == 100
async def test_update_timeout(hass, mock_bridge):
"""Test bridge marked as not available if timeout error during update."""
mock_bridge.api.lights.update = Mock(side_effect=asyncio.TimeoutError)
mock_bridge.api.groups.update = Mock(side_effect=asyncio.TimeoutError)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
assert mock_bridge.available is False
async def test_update_unauthorized(hass, mock_bridge):
"""Test bridge marked as not available if unauthorized during update."""
mock_bridge.api.lights.update = Mock(side_effect=aiohue.Unauthorized)
mock_bridge.api.groups.update = Mock(side_effect=aiohue.Unauthorized)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
assert mock_bridge.available is False
async def test_light_turn_on_service(hass, mock_bridge):
"""Test calling the turn on service on a light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
light = hass.states.get("light.hue_lamp_2")
assert light is not None
assert light.state == "off"
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response["2"] = LIGHT_2_ON
mock_bridge.mock_light_responses.append(updated_light_response)
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": "light.hue_lamp_2", "brightness": 100, "color_temp": 300},
blocking=True,
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert mock_bridge.mock_requests[1]["json"] == {
"bri": 100,
"on": True,
"ct": 300,
"alert": "none",
}
assert len(hass.states.async_all()) == 3
light = hass.states.get("light.hue_lamp_2")
assert light is not None
assert light.state == "on"
# test hue gamut in turn_on service
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": "light.hue_lamp_2", "rgb_color": [0, 0, 255]},
blocking=True,
)
assert len(mock_bridge.mock_requests) == 5
assert mock_bridge.mock_requests[3]["json"] == {
"on": True,
"xy": (0.138, 0.08),
"alert": "none",
}
async def test_light_turn_off_service(hass, mock_bridge):
"""Test calling the turn on service on a light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
light = hass.states.get("light.hue_lamp_1")
assert light is not None
assert light.state == "on"
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response["1"] = LIGHT_1_OFF
mock_bridge.mock_light_responses.append(updated_light_response)
await hass.services.async_call(
"light", "turn_off", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert mock_bridge.mock_requests[1]["json"] == {"on": False, "alert": "none"}
assert len(hass.states.async_all()) == 3
light = hass.states.get("light.hue_lamp_1")
assert light is not None
assert light.state == "off"
def test_available():
"""Test available property."""
light = hue_light.HueLight(
light=Mock(
state={"reachable": False},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
request_bridge_update=None,
bridge=Mock(allow_unreachable=False),
is_group=False,
)
assert light.available is False
light = hue_light.HueLight(
light=Mock(
state={"reachable": False},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
request_bridge_update=None,
bridge=Mock(allow_unreachable=True),
is_group=False,
)
assert light.available is True
light = hue_light.HueLight(
light=Mock(
state={"reachable": False},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
request_bridge_update=None,
bridge=Mock(allow_unreachable=False),
is_group=True,
)
assert light.available is True
def test_hs_color():
"""Test hs_color property."""
light = hue_light.HueLight(
light=Mock(
state={"colormode": "ct", "hue": 1234, "sat": 123},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
request_bridge_update=None,
bridge=Mock(),
is_group=False,
)
assert light.hs_color is None
light = hue_light.HueLight(
light=Mock(
state={"colormode": "hs", "hue": 1234, "sat": 123},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
request_bridge_update=None,
bridge=Mock(),
is_group=False,
)
assert light.hs_color is None
light = hue_light.HueLight(
light=Mock(
state={"colormode": "xy", "hue": 1234, "sat": 123, "xy": [0.4, 0.5]},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
request_bridge_update=None,
bridge=Mock(),
is_group=False,
)
assert light.hs_color == color.color_xy_to_hs(0.4, 0.5, LIGHT_GAMUT)
|
|
#
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
"""
Main window, root notebook, navigation history
"""
import gtk
import gobject
import pango
from os.path import join
from general_dialogs import Approve_Deny_Dialog_2
from pathname import ICON_DIR, get_dir
from plugins import get_plugin_by_type
from support import debug, normal_mode, set_debug_mode, warning, get_version, die
from proximateprotocol import PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_NOTIFICATION, \
PLUGIN_TYPE_SETTINGS
import proximatestate
from utils import pretty_line, str_to_int
have_hildon = True
try:
import hildon
except ImportError:
have_hildon = False
import splash
APP_NAME = 'Proximate'
STATUSBAR_ICON_SIZE = 64
MIN_WIDTH = gtk.gdk.screen_width()/2
MIN_HEIGHT = gtk.gdk.screen_height() * 36 / 100
class Proximate_GUI:
""" Main GUI component for Proximate.
This class implements all the main components of GUI and offers
actions for GUI parts of other plugins. Also, guihandler initializes
all plugin GUI's after initializing itself. """
#images
BACK_BUTTON1_IMG = '48px-Go-previous.png'
def __init__(self):
"""Constructor for Proximate_GUI."""
self.community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
self.notification = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION)
settings = get_plugin_by_type(PLUGIN_TYPE_SETTINGS)
self.width_setting = settings.register('gui.width', int, None, 0)
self.height_setting = settings.register('gui.height', int, None, 0)
self.page_history = []
self.main_window = None
self.initialize_menu()
try:
# This only seem to only exists on Maemo
gtk.set_application_name(APP_NAME)
except AttributeError:
pass
button = gtk.Button()
button.get_settings().set_property('gtk-button-images', True)
if have_hildon:
self.initialize_hildon_program()
else:
self.initialize_gtk_program()
self.main_loop = gobject.MainLoop()
self.fullscreen_mode = False
self.keybindings = []
self.add_key_binding(None, gtk.keysyms.F6, self.key_pressed_F6)
self.add_key_binding(gtk.gdk.CONTROL_MASK, gtk.keysyms.q, self.key_pressed_ctrl_q)
self.popup_timeout_id = None
self.event_clicked_at = None
self.statusbar_timeout_id = None
self.tbitems = {}
# Signal Ids for connecting communities' userlists
self.userlist_drag_data_get_signal_ids = {}
self.userlists_button_press_signal_ids = {}
# Initialize menu
self.connect_default_menu_signals()
self.notification.register_progress_update(self.handle_progress_update)
def add_key_binding(self, mask, key, callback, ctx=None):
self.keybindings.append((mask, key, callback, ctx))
def key_pressed_F6(self, target, ctx):
# if fullscreen key is pressed
if self.fullscreen_mode:
self.main_window.unfullscreen()
debug('GUI: Fullscreen mode OFF\n')
else:
self.main_window.fullscreen()
debug('GUI: Fullscreen mode ON\n')
def key_pressed_ctrl_q(self, target, ctx):
self.quit()
def run(self):
try:
self.main()
except KeyboardInterrupt:
pass
def main(self):
""" Starts GObject's main loop. """
splash.splash_hide()
self.main_loop.run()
def load_config(self):
# make sure that dimensions are something sane
window_width = max(300, self.width_setting.value)
window_height = max(200, self.height_setting.value)
# set size
debug('Proximate GUI: Resizing main window to %ix%i\n' %(window_width, window_height))
self.main_window.set_default_size(window_width, window_height)
def quit(self):
""" Quits GObject's main loop. """
debug('GUI: Exiting application\n')
self.main_loop.quit()
def quit_clicked(self, widget, *args):
self.main_window.destroy()
def initialize_widgets(self):
self.proximate_main_vbox = gtk.VBox()
self.root_notebook = gtk.Notebook()
self.root_notebook.set_show_tabs(False)
self.root_notebook.set_show_border(False)
# Status hbox: on the bottom of GUI
self.status_hbox = gtk.HBox()
self.back_button_eb = gtk.EventBox()
self.main_progress_bar = gtk.ProgressBar()
self.main_progress_bar.modify_font(pango.FontDescription('normal 20'))
self.main_progress_bar.set_ellipsize(pango.ELLIPSIZE_END)
self.status_icons_hbox = gtk.HBox()
self.status_hbox.pack_start(self.back_button_eb, False, False)
self.status_hbox.pack_start(self.main_progress_bar, True, True)
self.status_hbox.pack_start(self.status_icons_hbox, False, True)
# Add menubar at the top..
self.proximate_main_vbox.pack_start(self.main_menu, False, False)
# Put elements inside main_vbox
self.proximate_main_vbox.pack_end(self.status_hbox, False, False)
self.proximate_main_vbox.pack_end(self.root_notebook, True, True)
button_img = gtk.Image()
button_img.set_from_file(join(get_dir(ICON_DIR), self.BACK_BUTTON1_IMG))
self.back_button_eb.add(button_img)
self.back_button_eb.connect("button-press-event", self.back_button_cb)
self.main_window.add(self.proximate_main_vbox)
def handle_progress_update(self, indicators):
""" Called when some progress indicator changes. This has the effect
on animated progress on the status bar at the bottom. """
if len(indicators) > 0:
self.main_progress_bar.set_text(indicators[0].name + ': ' + indicators[0].msg)
if self.statusbar_timeout_id == None:
self.statusbar_timeout_id = gobject.timeout_add(100, self.statusbar_update_handler)
else:
self.main_progress_bar.set_text('')
self.main_progress_bar.set_fraction(0.0)
if self.statusbar_timeout_id != None:
gobject.source_remove(self.statusbar_timeout_id)
self.statusbar_timeout_id = None
def statusbar_update_handler(self):
self.main_progress_bar.pulse()
return True
def initialize_menu(self):
self.version_menu_item = gtk.MenuItem("Version")
self.plugins_menu_item = gtk.MenuItem("Plugins")
self.plugins_menu = gtk.Menu()
self.plugins_menu_item.set_submenu(self.plugins_menu)
self.preferences_menu_item = gtk.MenuItem("Preferences")
self.preferences_menu = gtk.Menu()
self.preferences_menu_item.set_submenu(self.preferences_menu)
self.quit_menu_item = gtk.MenuItem("Quit")
for (name, function) in (('Debug mode', self.debug_mode_clicked),
('Normal mode', self.normal_mode_clicked),
):
item = gtk.MenuItem(name)
item.connect('activate', function)
self.add_preferences_item(item)
def debug_mode_clicked(self, menu, data=None):
set_debug_mode(True)
self.notification.notify('Entering debug mode (AYEE!)')
def normal_mode_clicked(self, menu, data=None):
normal_mode()
self.notification.notify('Leaving debug mode (fix it later..)')
def initialize_hildon_program(self):
""" Function creates hildon program and window from
UI main window (proximate). Function also connects required events
for using tablet's fullscreen button. """
# Creates hildon Program
self.program = hildon.Program()
# Create the menu
self.main_menu = gtk.Menu()
self.main_menu.append(self.version_menu_item)
self.main_menu.append(self.plugins_menu_item)
self.main_menu.append(self.preferences_menu_item)
self.main_menu.append(self.quit_menu_item)
self.program.set_common_menu(self.main_menu)
self.main_progress_bar = gtk.ProgressBar()
self.main_progress_bar.modify_font(pango.FontDescription('normal 20'))
self.main_progress_bar.set_ellipsize(pango.ELLIPSIZE_END)
self.tb = gtk.Toolbar()
item = gtk.ToolItem()
item.add(self.main_progress_bar)
item.set_expand(True)
self.tb.insert(item, -1)
self.tb.show()
self.program.set_common_toolbar(self.tb)
# fix maemos treeview appearance inside a pannablearea
gtk.rc_parse_string('''style "fremantle-touchlist" {
GtkTreeView::row-height = -1 }''')
# fullscreen
#self.main_window.connect("window-state-event", self.on_window_state_change)
def window_configure(self, window, event):
self.width_setting.set(event.width)
self.height_setting.set(event.height)
def initialize_gtk_program(self):
self.main_window = gtk.Window()
self.main_window.set_title(APP_NAME)
self.main_window.connect('configure-event', self.window_configure)
self.load_config() # this must be done here
self.main_window.set_icon_from_file(join(get_dir(ICON_DIR), "proximate_task_icon.png"))
# Create the menubar
self.main_menu = gtk.MenuBar()
self.app_menu_item = gtk.MenuItem("Application")
self.app_menu = gtk.Menu()
self.app_menu_item.set_submenu(self.app_menu)
self.main_menu.append(self.app_menu_item)
self.main_menu.append(self.preferences_menu_item)
self.app_menu.append(self.version_menu_item)
self.main_menu.append(self.plugins_menu_item)
self.app_menu.append(self.quit_menu_item)
self.initialize_widgets()
self.main_window.connect("delete-event", self.close_proximate)
self.main_window.connect("key-press-event", self.on_key_press)
self.main_window.show_all()
def on_key_press(self, widget, event, *args):
for (mask, keyval, callback, ctx) in self.keybindings:
if mask != None and (event.state & mask) == 0:
continue
if keyval == event.keyval:
target = self.community.get_default_community()
callback(target, ctx)
return True
return False
def on_window_state_change(self, widget, event, *args):
if event.new_window_state & gtk.gdk.WINDOW_STATE_FULLSCREEN:
self.fullscreen_mode = True
else:
self.fullscreen_mode = False
def add_menu(self, name, menu):
new_plugin_menu = gtk.MenuItem(name)
new_plugin_menu.set_submenu(menu)
self.plugins_menu.append(new_plugin_menu)
new_plugin_menu.show_all()
def add_preferences_item(self, menuitem):
self.preferences_menu.append(menuitem)
menuitem.show_all()
def add_statusbar_icon(self, icon, tooltip, callback):
""" Adds new icon to statusbar. By clicking the icon,
the given callback is called."""
eventbox = gtk.EventBox()
try: # Maemos PyGTK 2.12 doesn't seem to have
#set_tooltip_text() though it should
eventbox.set_tooltip_text(tooltip)
except:
pass
statusbar_image = gtk.Image()
statusbar_image.set_from_pixbuf(icon)
eventbox.add(statusbar_image)
if have_hildon:
item = gtk.ToolItem()
item.add(eventbox)
self.tb.insert(item, -1)
self.tb.show_all()
self.tbitems[eventbox] = item
else:
self.status_icons_hbox.pack_end(eventbox)
self.status_icons_hbox.show_all()
if callback != None:
eventbox.connect("button-press-event", self.status_icon_clicked, callback)
return eventbox
def remove_statusbar_icon(self, widget):
""" Removes icon from statusbar. """
if have_hildon:
# hackhackhack
self.tb.remove(self.tbitems[widget])
self.tbitems.pop(widget)
else:
self.status_icons_hbox.remove(widget)
def display_version(self, widget):
headline = 'Proximate %s' % get_version()
msg = headline + """
Department of Computer Systems (2008-2010),
Tampere University of Technology.
Authors:
Timo Heinonen <timo.heinonen@tut.fi>
Tero Huttunen <tero.huttunen@tut.fi>
Janne Kulmala <janne.t.kulmala@tut.fi>
Antti Laine <antti.a.laine@tut.fi>
Jussi Nieminen <jussi.v.nieminen@tut.fi>
Heikki Orsila <heikki.orsila@tut.fi>
More information about copyrights and credits in
/usr/share/doc/proximate/AUTHORS
"""
self.notification.ok_dialog(headline, msg)
def status_icon_clicked(self, widget, event, callback):
callback()
def connect_default_menu_signals(self):
self.version_menu_item.connect("activate", self.display_version)
self.quit_menu_item.connect("activate", self.menu_close_proximate)
def menu_close_proximate(self, widget):
self.close_proximate(None)
def close_proximate(self, widget, data=None):
dlg = Approve_Deny_Dialog_2(widget, 'Close Proximate?', 'Do you really want to close Proximate?', modal=True)
if dlg.run():
self.quit()
return True
def back_button_cb(self, widget, event):
self.go_back_one_page()
def go_back_one_page(self):
""" Go to previous page in root notebook navigation history. Can be
called from plugins, for ex. when plugin needs to hide a page."""
if len(self.page_history) > 0:
self.go_back_page(self.page_history[-1])
def delete_window(self, widget, event, page):
self.go_back_page(page)
return True
def go_back_page(self, page):
if not page.is_visible:
return
if not page.back_action():
# default action: just hide the page
self.hide_page(page)
def add_page(self, page):
if have_hildon:
page.hwindow.connect('delete_event', self.delete_window, page)
page.hwindow.connect("key-press-event", self.on_key_press)
self.program.add_window(page.hwindow)
else:
self.root_notebook.append_page(page)
def remove_page(self, page):
if page.is_visible:
self.hide_page(page)
if have_hildon:
self.program.remove_window(page.hwindow)
else:
self.root_notebook.remove(page)
def has_focus(self):
""" Returns true if Proximate window has focus """
if have_hildon:
return self.page_history[-1].hwindow.get_property('has-toplevel-focus')
else:
return self.main_window.get_property('has-toplevel-focus')
def show_page(self, page):
""" Display the given GUI page to the user """
if have_hildon:
if page.is_visible:
page.hwindow.hide()
page.hwindow.show()
# Use first shown page as the main window
if self.main_window == None:
self.main_window = page.hwindow
else:
self.set_visible_page(page)
if page.is_visible:
self.page_history.remove(page)
self.page_history.append(page)
page.is_visible = True
def hide_page(self, page):
""" Hide the given GUI page by removing it from the navigation history """
assert(page.is_visible)
if have_hildon:
page.hwindow.hide()
else:
if page == self.page_history[-1]:
# currently visible page, go to the previous one
self.set_visible_page(self.page_history[-2])
self.page_history.remove(page)
page.is_visible = False
def get_current_page(self):
return self.page_history[-1]
def set_visible_page(self, page):
""" For internal use """
self.root_notebook.set_current_page(self.root_notebook.page_num(page))
title = '%s - %s' % (APP_NAME, page.get_page_title())
self.main_window.set_title(title)
def get_main_window(self):
return self.main_window
def pretty_line(self, msg):
n = 80
if have_hildon:
n = 60
return pretty_line(msg, n)
def set_user_double_clicking(self, doubleclick):
self.user_double_clicking = doubleclick
return False # timeout shouldn't call this again
def run_gui():
""" Start Graphical User Interface """
main_gui = Proximate_GUI()
for modulename in ['community_gui', 'messaging_gui',
'notification_gui', 'filesharing_gui',
'messageboard_gui', 'filetransfergui',
'radar', 'keymanagement_gui', 'settings_gui']:
module = __import__(modulename)
try:
module.init_ui(main_gui)
except TypeError:
die('GUI module %s init() called with invalid arguments\n' %(modulename))
proximatestate.load_external_plugins(ui=main_gui)
main_gui.run()
|
|
"""
A script to set a matrix for the cross version tests for MLflow Models / autologging integrations.
# How to run:
```
# ===== Include all items =====
python dev/set_matrix.py
# ===== Include only `ml-package-versions.yml` updates =====
REF_VERSIONS_YAML="https://raw.githubusercontent.com/mlflow/mlflow/master/ml-package-versions.yml"
python dev/set_matrix.py --ref-versions-yaml $REF_VERSIONS_YAML
# ===== Include only flavor file updates =====
CHANGED_FILES="
mlflow/keras.py
mlflow/tensorlfow/__init__.py
"
python dev/set_matrix.py --changed-files $CHANGED_FILES
# ===== Include both `ml-package-versions.yml` & flavor file updates =====
python dev/set_matrix.py --ref-versions-yaml $REF_VERSIONS_YAML --changed-files $CHANGED_FILES
```
# How to run doctests:
```
pytest dev/set_matrix.py --doctest-modules
```
"""
import sys
import argparse
from packaging.version import Version
import json
import operator
import os
import re
import shutil
import urllib.request
import functools
import yaml
VERSIONS_YAML_PATH = "mlflow/ml-package-versions.yml"
DEV_VERSION = "dev"
def read_yaml(location, if_error=None):
"""
Reads a YAML file.
Examples
--------
>>> read_yaml("https://raw.githubusercontent.com/mlflow/mlflow/master/.circleci/config.yml")
{...}
>>> read_yaml(".circleci/config.yml")
{...}
>>> read_yaml("non_existent.yml", if_error={})
Failed to read ...
{}
"""
try:
if re.search("^https?://", location):
with urllib.request.urlopen(location) as f:
return yaml.load(f, Loader=yaml.SafeLoader)
else:
with open(location) as f:
return yaml.load(f, Loader=yaml.SafeLoader)
except Exception as e:
if if_error is not None:
print("Failed to read '{}' due to: `{}`".format(location, e))
return if_error
raise
@functools.lru_cache()
def get_released_versions(package_name):
"""
Fetches the released versions & datetimes of the specified Python package.
Examples
--------
>>> get_released_versions("scikit-learn")
{'0.10': '2012-01-11T14:42:25', '0.11': '2012-05-08T00:40:14', ...}
"""
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
data = json.load(urllib.request.urlopen(url))
versions = {
# We can actually select any element in `dist_files` because all the distribution files
# should have almost the same upload time.
version: dist_files[0]["upload_time"]
for version, dist_files in data["releases"].items()
# If len(dist_files) = 0, this release is unavailable.
# Example: https://pypi.org/project/xgboost/0.7
#
# > pip install 'xgboost==0.7'
# ERROR: Could not find a version that satisfies the requirement xgboost==0.7
if len(dist_files) > 0 and (not dist_files[0].get("yanked", False))
}
return versions
def select_latest_micro_versions(versions):
"""
Selects the latest micro version in each minor version.
Examples
--------
>>> versions = {
... "1.3.0": "2020-01-01T00:00:00",
... "1.3.1": "2020-02-01T00:00:00", # latest in 1.3
... "1.4.0": "2020-03-01T00:00:00",
... "1.4.1": "2020-04-01T00:00:00",
... "1.4.2": "2020-05-01T00:00:00", # latest in 1.4
... }
>>> select_latest_micro_versions(versions)
['1.3.1', '1.4.2']
"""
seen_minors = set()
res = []
for ver, _ in sorted(
versions.items(),
# Sort by (minor_version, upload_time) in descending order
key=lambda x: (Version(x[0]).release[:2], x[1]),
reverse=True,
):
minor_ver = Version(ver).release[:2]
if minor_ver not in seen_minors:
seen_minors.add(minor_ver)
res.insert(0, ver)
return res
def filter_versions(versions, min_ver, max_ver, excludes=None, allow_unreleased_max_version=False):
"""
Filter versions that satisfy the following conditions:
1. is a final or post release that PEP 440 defines
2. is newer than or equal to `min_ver`
3. shares the same major version as `max_ver` or `min_ver`
4. (Optional) is not in `excludes`
Examples
--------
>>> versions = {
... "0.1.0": "2020-01-01T00:00:01",
... "0.2.0": "2020-01-01T00:00:02",
... "1.0.0": "2020-01-01T00:00:00",
... "1.1.0": "2020-01-01T00:01:00",
... }
>>> filter_versions(versions, "0.1.0", "0.2.0") # fetch up to the latest in 0.x.y
{'0.1.0': ..., '0.2.0': ...}
>>> filter_versions(versions, "0.1.0", "1.0.0") # fetch up to the latest in 1.x.y
{'0.1.0': ..., '0.2.0': ..., '1.0.0': ..., '1.1.0': ...}
>>> filter_versions(versions, "0.1.0", "1.0.0", excludes=["0.2.0"])
{'0.1.0': ..., '1.0.0': ..., '1.1.0': ...}
"""
if excludes is None:
excludes = []
# Prevent specifying non-existent versions
assert min_ver in versions
assert max_ver in versions or allow_unreleased_max_version
assert all(v in versions for v in excludes)
versions = {Version(v): t for v, t in versions.items() if v not in excludes}
def _is_final_or_post_release(v):
# final release: https://www.python.org/dev/peps/pep-0440/#final-releases
# post release: https://www.python.org/dev/peps/pep-0440/#post-releases
return (v.base_version == v.public) or (v.is_postrelease)
versions = {v: t for v, t in versions.items() if _is_final_or_post_release(v)}
versions = {v: t for v, t in versions.items() if v.major <= Version(max_ver).major}
versions = {str(v): t for v, t in versions.items() if v >= Version(min_ver)}
return versions
def get_changed_flavors(changed_files, flavors):
"""
Detects changed flavors from a list of changed files.
Examples
--------
>>> flavors = ["pytorch", "xgboost"]
>>> get_changed_flavors(["mlflow/pytorch/__init__.py", "mlflow/xgboost.py"], flavors)
['pytorch', 'xgboost']
>>> get_changed_flavors(["mlflow/xgboost.py"], flavors)
['xgboost']
>>> get_changed_flavors(["tests/xgboost/test_xxx.py"], flavors)
['xgboost']
>>> get_changed_flavors(["tests/xgboost_autolog/test_xxx.py"], flavors)
['xgboost']
>>> get_changed_flavors(["tests/xgboost_autologging/test_xxx.py"], flavors)
['xgboost']
>>> get_changed_flavors(["README.rst"], flavors)
[]
>>> get_changed_flavors([], flavors)
[]
"""
changed_flavors = []
for f in changed_files:
pattern = r"^(mlflow|tests)/(.+?)(_autolog(ging)?)?(\.py|/)"
# ~~~~~
# # This group captures a flavor name
match = re.search(pattern, f)
if (match is not None) and (match.group(2) in flavors):
changed_flavors.append(match.group(2))
return changed_flavors
def str_to_operator(s):
"""
Turns a string into the corresponding operator.
Examples
--------
>>> str_to_operator("<")(1, 2) # equivalent to '1 < 2'
True
"""
return {
# https://docs.python.org/3/library/operator.html#mapping-operators-to-functions
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}[s]
def get_operator_and_version(ver_spec):
"""
Converts a version specifier (e.g. "< 3") to a tuple of (operator, version).
Examples
--------
>>> get_operator_and_version("< 3")
(<built-in function lt>, '3')
>>> get_operator_and_version("!= dev")
(<built-in function ne>, 'dev')
"""
regexp = r"([<>=!]+)([\w\.]+)"
m = re.search(regexp, ver_spec.replace(" ", ""))
if m is None:
raise ValueError(
"Invalid value for `ver_spec`: '{}'. Must match this regular expression: '{}'".format(
ver_spec,
regexp,
)
)
return str_to_operator(m.group(1)), m.group(2)
def process_requirements(requirements, version=None):
"""
Examples
--------
>>> process_requirements(None)
[]
>>> process_requirements(["foo"])
['foo']
>>> process_requirements({"== 0.1": ["foo"]}, "0.1")
['foo']
>>> process_requirements({"< 0.2": ["foo"]}, "0.1")
['foo']
>>> process_requirements({"> 0.1, != 0.2": ["foo"]}, "0.3")
['foo']
>>> process_requirements({"== 0.1": ["foo"], "== 0.2": ["bar"]}, "0.2")
['bar']
>>> process_requirements({"== dev": ["foo"]}, "0.1")
[]
>>> process_requirements({"< dev": ["foo"]}, "0.1")
['foo']
>>> process_requirements({"> 0.1": ["foo"]}, "dev")
['foo']
>>> process_requirements({"== dev": ["foo"]}, "dev")
['foo']
>>> process_requirements({"> 0.1, != dev": ["foo"]}, "dev")
[]
"""
if requirements is None:
return []
if isinstance(requirements, list):
return requirements
if isinstance(requirements, dict):
# The version "dev" should always compare as greater than any exisiting versions.
dev_numeric = "9999.9999.9999"
if version == DEV_VERSION:
version = dev_numeric
for ver_spec, packages in requirements.items():
op_and_ver_pairs = map(get_operator_and_version, ver_spec.split(","))
match_all = all(
comp_op(
Version(version),
Version(dev_numeric if req_ver == DEV_VERSION else req_ver),
)
for comp_op, req_ver in op_and_ver_pairs
)
if match_all:
return packages
return []
raise TypeError("Invalid object type for `requirements`: '{}'".format(type(requirements)))
def remove_comments(s):
"""
Examples
--------
>>> code = '''
... # comment 1
... # comment 2
... echo foo
... '''
>>> remove_comments(code)
'echo foo'
"""
return "\n".join(l for l in s.strip().split("\n") if not l.strip().startswith("#"))
def make_pip_install_command(packages):
"""
Examples
--------
>>> make_pip_install_command(["foo", "bar"])
"pip install 'foo' 'bar'"
"""
return "pip install " + " ".join("'{}'".format(x) for x in packages)
def divider(title, length=None):
r"""
Examples
--------
>>> divider("1234", 20)
'\n======= 1234 ======='
"""
length = shutil.get_terminal_size(fallback=(80, 24))[0] if length is None else length
rest = length - len(title) - 2
left = rest // 2 if rest % 2 else (rest + 1) // 2
return "\n{} {} {}".format("=" * left, title, "=" * (rest - left))
def split_by_comma(x):
stripped = x.strip()
return list(map(str.strip, stripped.split(","))) if stripped != "" else []
def parse_args(args):
parser = argparse.ArgumentParser(description="Set a test matrix for the cross version tests")
parser.add_argument(
"--versions-yaml",
required=False,
default="mlflow/ml-package-versions.yml",
help=(
"URL or local file path of the config yaml. Defaults to "
"'mlflow/ml-package-versions.yml' on the branch where this script is running."
),
)
parser.add_argument(
"--ref-versions-yaml",
required=False,
default=None,
help=(
"URL or local file path of the reference config yaml which will be compared with the "
"config specified by `--versions-yaml` in order to identify the config updates."
),
)
parser.add_argument(
"--changed-files",
type=lambda x: [] if x.strip() == "" else x.strip().split("\n"),
required=False,
default=None,
help=("A string that represents a list of changed files"),
)
parser.add_argument(
"--flavors",
required=False,
type=split_by_comma,
help=(
"Comma-separated string specifying which flavors to test (e.g. 'sklearn, xgboost'). "
"If unspecified, all flavors are tested."
),
)
parser.add_argument(
"--versions",
required=False,
type=split_by_comma,
help=(
"Comma-separated string specifying which versions to test (e.g. '1.2.3, 4.5.6'). "
"If unspecified, all versions are tested."
),
)
parser.add_argument(
"--exclude-dev-versions",
action="store_true",
required=False,
default=False,
help="If True, exclude dev versions from the test matrix.",
)
return parser.parse_args(args)
class Hashabledict(dict):
def __hash__(self):
return hash(frozenset(self))
def expand_config(config):
matrix = []
for flavor_key, cfgs in config.items():
flavor = flavor_key.split("-")[0]
package_info = cfgs.pop("package_info")
all_versions = get_released_versions(package_info["pip_release"])
for key, cfg in cfgs.items():
# Released versions
min_ver = cfg["minimum"]
max_ver = cfg["maximum"]
versions = filter_versions(
all_versions,
min_ver,
max_ver,
cfg.get("unsupported"),
allow_unreleased_max_version=cfg.get("allow_unreleased_max_version", False),
)
versions = select_latest_micro_versions(versions)
# Explicitly include the minimum supported version
if min_ver not in versions:
versions.append(min_ver)
pip_release = package_info["pip_release"]
for ver in versions:
job_name = " / ".join([flavor_key, ver, key])
requirements = ["{}=={}".format(pip_release, ver)]
requirements.extend(process_requirements(cfg.get("requirements"), ver))
install = make_pip_install_command(requirements)
run = remove_comments(cfg["run"])
matrix.append(
Hashabledict(
flavor=flavor,
job_name=job_name,
install=install,
run=run,
package=pip_release,
version=ver,
supported=Version(ver) <= Version(max_ver),
)
)
# Development version
if "install_dev" in package_info:
job_name = " / ".join([flavor_key, DEV_VERSION, key])
requirements = process_requirements(cfg.get("requirements"), DEV_VERSION)
install = (
make_pip_install_command(requirements) + "\n" if requirements else ""
) + remove_comments(package_info["install_dev"])
run = remove_comments(cfg["run"])
matrix.append(
Hashabledict(
flavor=flavor,
job_name=job_name,
install=install,
run=run,
package=pip_release,
version=DEV_VERSION,
supported=False,
)
)
return matrix
def process_ref_versions_yaml(ref_versions_yaml, matrix_base):
if ref_versions_yaml is None:
return set()
config_ref = read_yaml(ref_versions_yaml, if_error={})
matrix_ref = set(expand_config(config_ref))
return matrix_base.difference(matrix_ref)
def process_changed_files(changed_files, matrix_base):
if changed_files is None:
return set()
flavors = set(x["flavor"] for x in matrix_base)
changed_flavors = (
# If this file (`dev/set_matrix.py`) has been changed, re-run all tests
flavors
if (__file__ in changed_files)
else get_changed_flavors(changed_files, flavors)
)
return set(filter(lambda x: x["flavor"] in changed_flavors, matrix_base))
def generate_matrix(args):
args = parse_args(args)
config_base = read_yaml(args.versions_yaml)
matrix_base = set(expand_config(config_base))
# If both `--ref-versions-yaml` and `--changed-files` are unspecified, no further processing is
# required.
if args.ref_versions_yaml is None and args.changed_files is None:
matrix_final = matrix_base
else:
# Matrix entries for changes on `ml-package-versions.yml`
matrix_diff_config = process_ref_versions_yaml(args.ref_versions_yaml, matrix_base)
# Matrix entries for changes on python scripts under `mlflow` and `tests`
matrix_diff_flavors = process_changed_files(args.changed_files, matrix_base)
# Merge `matrix_diff_config` and `matrix_diff_flavors`
matrix_final = matrix_diff_config.union(matrix_diff_flavors)
# Apply the filtering arguments
if args.exclude_dev_versions:
matrix_final = filter(lambda x: x["version"] != DEV_VERSION, matrix_final)
if args.flavors:
matrix_final = filter(lambda x: x["flavor"] in args.flavors, matrix_final)
if args.versions:
matrix_final = filter(lambda x: x["version"] in args.versions, matrix_final)
return set(matrix_final)
def main(args):
print(divider("Parameters"))
print(json.dumps(args, indent=2))
matrix = generate_matrix(args)
matrix = sorted(matrix, key=lambda x: x["job_name"])
job_names = [x["job_name"] for x in matrix]
matrix = {"job_name": job_names, "include": matrix}
print(divider("Result"))
print(json.dumps(matrix, indent=2))
if "GITHUB_ACTIONS" in os.environ:
# `::set-output` is a special syntax for GitHub Actions to set an action's output parameter.
# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter
# Note that this actually doesn't print anything to the console.
print("::set-output name=matrix::{}".format(json.dumps(matrix)))
# Set a flag that indicates whether or not the matrix is empty. If this flag is 'true',
# skip the subsequent jobs.
print("::set-output name=is_matrix_empty::{}".format("false" if job_names else "true"))
if __name__ == "__main__":
main(sys.argv[1:])
|
|
from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db.models import F, Q, Avg, Count, Max, StdDev, Sum, Variance
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(name="Jonno's House of Books").annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
|
|
#!/usr/bin/env python3
# Python library for Adafruit RGB-backlit LCD plate for Raspberry Pi.
# Written by Adafruit Industries. MIT license.
# This is essentially a complete rewrite, but the calling syntax
# and constants are based on code from lrvick and LiquidCrystal.
# lrvic - https://github.com/lrvick/raspi-hd44780/blob/master/hd44780.py
# LiquidCrystal - https://github.com/arduino/Arduino/blob/master/libraries/LiquidCrystal/LiquidCrystal.cpp
from Adafruit_I2C import Adafruit_I2C
from time import sleep
class Adafruit_CharLCDPlate(Adafruit_I2C):
# ----------------------------------------------------------------------
# Constants
# Port expander registers
MCP23017_IOCON_BANK0 = 0x0A # IOCON when Bank 0 active
MCP23017_IOCON_BANK1 = 0x15 # IOCON when Bank 1 active
# These are register addresses when in Bank 1 only:
MCP23017_GPIOA = 0x09
MCP23017_IODIRB = 0x10
MCP23017_GPIOB = 0x19
# Port expander input pin definitions
SELECT = 0
RIGHT = 1
DOWN = 2
UP = 3
LEFT = 4
# LED colors
OFF = 0x00
RED = 0x01
GREEN = 0x02
BLUE = 0x04
YELLOW = RED + GREEN
TEAL = GREEN + BLUE
VIOLET = RED + BLUE
WHITE = RED + GREEN + BLUE
ON = RED + GREEN + BLUE
# LCD Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# ----------------------------------------------------------------------
# Constructor
def __init__(self, busnum=-1, addr=0x20, debug=False):
self.i2c = Adafruit_I2C(addr, busnum, debug)
# I2C is relatively slow. MCP output port states are cached
# so we don't need to constantly poll-and-change bit states.
self.porta, self.portb, self.ddrb = 0, 0, 0b00010000
# Set MCP23017 IOCON register to Bank 0 with sequential operation.
# If chip is already set for Bank 0, this will just write to OLATB,
# which won't seriously bother anything on the plate right now
# (blue backlight LED will come on, but that's done in the next
# step anyway).
self.i2c.bus.write_byte_data(
self.i2c.address, self.MCP23017_IOCON_BANK1, 0)
# Brute force reload ALL registers to known state. This also
# sets up all the input pins, pull-ups, etc. for the Pi Plate.
self.i2c.bus.write_i2c_block_data(
self.i2c.address, 0,
[ 0b00111111, # IODIRA R+G LEDs=outputs, buttons=inputs
self.ddrb , # IODIRB LCD D7=input, Blue LED=output
0b00111111, # IPOLA Invert polarity on button inputs
0b00000000, # IPOLB
0b00000000, # GPINTENA Disable interrupt-on-change
0b00000000, # GPINTENB
0b00000000, # DEFVALA
0b00000000, # DEFVALB
0b00000000, # INTCONA
0b00000000, # INTCONB
0b00000000, # IOCON
0b00000000, # IOCON
0b00111111, # GPPUA Enable pull-ups on buttons
0b00000000, # GPPUB
0b00000000, # INTFA
0b00000000, # INTFB
0b00000000, # INTCAPA
0b00000000, # INTCAPB
self.porta, # GPIOA
self.portb, # GPIOB
self.porta, # OLATA 0 on all outputs; side effect of
self.portb ]) # OLATB turning on R+G+B backlight LEDs.
# Switch to Bank 1 and disable sequential operation.
# From this point forward, the register addresses do NOT match
# the list immediately above. Instead, use the constants defined
# at the start of the class. Also, the address register will no
# longer increment automatically after this -- multi-byte
# operations must be broken down into single-byte calls.
self.i2c.bus.write_byte_data(
self.i2c.address, self.MCP23017_IOCON_BANK0, 0b10100000)
self.displayshift = (self.LCD_CURSORMOVE |
self.LCD_MOVERIGHT)
self.displaymode = (self.LCD_ENTRYLEFT |
self.LCD_ENTRYSHIFTDECREMENT)
self.displaycontrol = (self.LCD_DISPLAYON |
self.LCD_CURSOROFF |
self.LCD_BLINKOFF)
self.write(0x33) # Init
self.write(0x32) # Init
self.write(0x28) # 2 line 5x8 matrix
self.write(self.LCD_CLEARDISPLAY)
self.write(self.LCD_CURSORSHIFT | self.displayshift)
self.write(self.LCD_ENTRYMODESET | self.displaymode)
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
self.write(self.LCD_RETURNHOME)
# ----------------------------------------------------------------------
# Write operations
# The LCD data pins (D4-D7) connect to MCP pins 12-9 (PORTB4-1), in
# that order. Because this sequence is 'reversed,' a direct shift
# won't work. This table remaps 4-bit data values to MCP PORTB
# outputs, incorporating both the reverse and shift.
flip = ( 0b00000000, 0b00010000, 0b00001000, 0b00011000,
0b00000100, 0b00010100, 0b00001100, 0b00011100,
0b00000010, 0b00010010, 0b00001010, 0b00011010,
0b00000110, 0b00010110, 0b00001110, 0b00011110 )
# Low-level 4-bit interface for LCD output. This doesn't actually
# write data, just returns a byte array of the PORTB state over time.
# Can concatenate the output of multiple calls (up to 8) for more
# efficient batch write.
def out4(self, bitmask, value):
hi = bitmask | self.flip[value >> 4]
lo = bitmask | self.flip[value & 0x0F]
return [hi | 0b00100000, hi, lo | 0b00100000, lo]
# The speed of LCD accesses is inherently limited by I2C through the
# port expander. A 'well behaved program' is expected to poll the
# LCD to know that a prior instruction completed. But the timing of
# most instructions is a known uniform 37 mS. The enable strobe
# can't even be twiddled that fast through I2C, so it's a safe bet
# with these instructions to not waste time polling (which requires
# several I2C transfers for reconfiguring the port direction).
# The D7 pin is set as input when a potentially time-consuming
# instruction has been issued (e.g. screen clear), as well as on
# startup, and polling will then occur before more commands or data
# are issued.
pollables = ( LCD_CLEARDISPLAY, LCD_RETURNHOME )
# Write byte, list or string value to LCD
def write(self, value, char_mode=False):
""" Send command/data to LCD """
# If pin D7 is in input state, poll LCD busy flag until clear.
if self.ddrb & 0b00010000:
lo = (self.portb & 0b00000001) | 0b01000000
hi = lo | 0b00100000 # E=1 (strobe)
self.i2c.bus.write_byte_data(
self.i2c.address, self.MCP23017_GPIOB, lo)
while True:
# Strobe high (enable)
self.i2c.bus.write_byte(self.i2c.address, hi)
# First nybble contains busy state
bits = self.i2c.bus.read_byte(self.i2c.address)
# Strobe low, high, low. Second nybble (A3) is ignored.
self.i2c.bus.write_i2c_block_data(
self.i2c.address, self.MCP23017_GPIOB, [lo, hi, lo])
if (bits & 0b00000010) == 0: break # D7=0, not busy
self.portb = lo
# Polling complete, change D7 pin to output
self.ddrb &= 0b11101111
self.i2c.bus.write_byte_data(self.i2c.address,
self.MCP23017_IODIRB, self.ddrb)
bitmask = self.portb & 0b00000001 # Mask out PORTB LCD control bits
if char_mode: bitmask |= 0b10000000 # Set data bit if not a command
# If string or list, iterate through multiple write ops
if isinstance(value, str):
last = len(value) - 1 # Last character in string
data = [] # Start with blank list
for i, v in enumerate(value): # For each character...
# Append 4 bytes to list representing PORTB over time.
# First the high 4 data bits with strobe (enable) set
# and unset, then same with low 4 data bits (strobe 1/0).
data.extend(self.out4(bitmask, ord(v)))
# I2C block data write is limited to 32 bytes max.
# If limit reached, write data so far and clear.
# Also do this on last byte if not otherwise handled.
if (len(data) >= 32) or (i == last):
self.i2c.bus.write_i2c_block_data(
self.i2c.address, self.MCP23017_GPIOB, data)
self.portb = data[-1] # Save state of last byte out
data = [] # Clear list for next iteration
elif isinstance(value, list):
# Same as above, but for list instead of string
last = len(value) - 1
data = []
for i, v in enumerate(value):
data.extend(self.out4(bitmask, v))
if (len(data) >= 32) or (i == last):
self.i2c.bus.write_i2c_block_data(
self.i2c.address, self.MCP23017_GPIOB, data)
self.portb = data[-1]
data = []
else:
# Single byte
data = self.out4(bitmask, value)
self.i2c.bus.write_i2c_block_data(
self.i2c.address, self.MCP23017_GPIOB, data)
self.portb = data[-1]
# If a poll-worthy instruction was issued, reconfigure D7
# pin as input to indicate need for polling on next call.
if (not char_mode) and (value in self.pollables):
self.ddrb |= 0b00010000
self.i2c.bus.write_byte_data(self.i2c.address,
self.MCP23017_IODIRB, self.ddrb)
# ----------------------------------------------------------------------
# Utility methods
def begin(self, cols, lines):
self.currline = 0
self.numlines = lines
self.numcols = cols
self.clear()
# Puts the MCP23017 back in Bank 0 + sequential write mode so
# that other code using the 'classic' library can still work.
# Any code using this newer version of the library should
# consider adding an atexit() handler that calls this.
def stop(self):
self.porta = 0b11000000 # Turn off LEDs on the way out
self.portb = 0b00000001
sleep(0.0015)
self.i2c.bus.write_byte_data(
self.i2c.address, self.MCP23017_IOCON_BANK1, 0)
self.i2c.bus.write_i2c_block_data(
self.i2c.address, 0,
[ 0b00111111, # IODIRA
self.ddrb , # IODIRB
0b00000000, # IPOLA
0b00000000, # IPOLB
0b00000000, # GPINTENA
0b00000000, # GPINTENB
0b00000000, # DEFVALA
0b00000000, # DEFVALB
0b00000000, # INTCONA
0b00000000, # INTCONB
0b00000000, # IOCON
0b00000000, # IOCON
0b00111111, # GPPUA
0b00000000, # GPPUB
0b00000000, # INTFA
0b00000000, # INTFB
0b00000000, # INTCAPA
0b00000000, # INTCAPB
self.porta, # GPIOA
self.portb, # GPIOB
self.porta, # OLATA
self.portb ]) # OLATB
def clear(self):
self.write(self.LCD_CLEARDISPLAY)
def home(self):
self.write(self.LCD_RETURNHOME)
row_offsets = ( 0x00, 0x40, 0x14, 0x54 )
def setCursor(self, col, row):
if row > self.numlines: row = self.numlines - 1
elif row < 0: row = 0
self.write(self.LCD_SETDDRAMADDR | (col + self.row_offsets[row]))
def display(self):
""" Turn the display on (quickly) """
self.displaycontrol |= self.LCD_DISPLAYON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def noDisplay(self):
""" Turn the display off (quickly) """
self.displaycontrol &= ~self.LCD_DISPLAYON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def cursor(self):
""" Underline cursor on """
self.displaycontrol |= self.LCD_CURSORON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def noCursor(self):
""" Underline cursor off """
self.displaycontrol &= ~self.LCD_CURSORON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def ToggleCursor(self):
""" Toggles the underline cursor On/Off """
self.displaycontrol ^= self.LCD_CURSORON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def blink(self):
""" Turn on the blinking cursor """
self.displaycontrol |= self.LCD_BLINKON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def noBlink(self):
""" Turn off the blinking cursor """
self.displaycontrol &= ~self.LCD_BLINKON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def ToggleBlink(self):
""" Toggles the blinking cursor """
self.displaycontrol ^= self.LCD_BLINKON
self.write(self.LCD_DISPLAYCONTROL | self.displaycontrol)
def scrollDisplayLeft(self):
""" These commands scroll the display without changing the RAM """
self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVELEFT
self.write(self.LCD_CURSORSHIFT | self.displayshift)
def scrollDisplayRight(self):
""" These commands scroll the display without changing the RAM """
self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT
self.write(self.LCD_CURSORSHIFT | self.displayshift)
def leftToRight(self):
""" This is for text that flows left to right """
self.displaymode |= self.LCD_ENTRYLEFT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def rightToLeft(self):
""" This is for text that flows right to left """
self.displaymode &= ~self.LCD_ENTRYLEFT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def autoscroll(self):
""" This will 'right justify' text from the cursor """
self.displaymode |= self.LCD_ENTRYSHIFTINCREMENT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def noAutoscroll(self):
""" This will 'left justify' text from the cursor """
self.displaymode &= ~self.LCD_ENTRYSHIFTINCREMENT
self.write(self.LCD_ENTRYMODESET | self.displaymode)
def createChar(self, location, bitmap):
self.write(self.LCD_SETCGRAMADDR | ((location & 7) << 3))
self.write(bitmap, True)
self.write(self.LCD_SETDDRAMADDR)
def message(self, text, limitMode = 0):
""" Send string to LCD. Newline wraps to next line"""
lines = str(text).split('\n') # Split at newline(s)
for i, line in enumerate(lines): # For each substring...
if i == 1: # If newline(s),
self.write(0xC0) # set DDRAM address to 2nd line
elif i == 2:
self.write(0x94)
elif i >= 3:
self.write(0xD4)
"""Now depending on the limit mode set by the function call this will handle """
lineLength = len(line)
limit = self.numcols
if limitMode <= 0:
self.write(line, True)
elif lineLength >= limit and limitMode == 1:
'''With the limit mode set to 1 the line is truncated
at the number of columns available on the display'''
limitedLine = line[0:self.numcols]
self.write(limitedLine, True)
elif lineLength >= limit and limitMode == 2:
'''With the limit mode set to 2 the line is truncated
at the number of columns minus 3 to add in an elipse'''
limitedLine = line[0:self.numcols-3]+'...'
self.write(limitedLine, True)
elif lineLength >= limit and limitMode >= 3:
'''Future todo, add in proper, "line after line" cariage return'''
else:
self.write(line, True)
def backlight(self, color):
c = ~color
self.porta = (self.porta & 0b00111111) | ((c & 0b011) << 6)
self.portb = (self.portb & 0b11111110) | ((c & 0b100) >> 2)
# Has to be done as two writes because sequential operation is off.
self.i2c.bus.write_byte_data(
self.i2c.address, self.MCP23017_GPIOA, self.porta)
self.i2c.bus.write_byte_data(
self.i2c.address, self.MCP23017_GPIOB, self.portb)
# Read state of single button
def buttonPressed(self, b):
return (self.i2c.readU8(self.MCP23017_GPIOA) >> b) & 1
# Read and return bitmask of combined button state
def buttons(self):
return self.i2c.readU8(self.MCP23017_GPIOA) & 0b11111
# ----------------------------------------------------------------------
# Test code
if __name__ == '__main__':
lcd = Adafruit_CharLCDPlate()
lcd.begin(16, 4)
lcd.clear()
lcd.message("Adafruit RGB LCD\nPlate w/Key\npa\nd!")
sleep(1)
col = (('Red' , lcd.RED) , ('Yellow', lcd.YELLOW), ('Green' , lcd.GREEN),
('Teal', lcd.TEAL), ('Blue' , lcd.BLUE) , ('Violet', lcd.VIOLET),
('Off' , lcd.OFF) , ('On' , lcd.ON))
print("Cycle thru backlight colors")
for c in col:
print(c[0])
lcd.clear()
lcd.message(c[0])
lcd.backlight(c[1])
sleep(0.5)
btn = ((lcd.SELECT, 'Select', lcd.ON),
(lcd.LEFT , 'Left' , lcd.RED),
(lcd.UP , 'Up' , lcd.BLUE),
(lcd.DOWN , 'Down' , lcd.GREEN),
(lcd.RIGHT , 'Right' , lcd.VIOLET))
print("Try buttons on plate")
lcd.clear()
lcd.message("Try buttons")
prev = -1
while True:
for b in btn:
if lcd.buttonPressed(b[0]):
if b is not prev:
print(b[1])
lcd.clear()
lcd.message(b[1])
lcd.backlight(b[2])
prev = b
break
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test simulates the first time a database has to be split.
- we start with a keyspace with a single shard and a single table
- we add and populate the sharding key
- we set the sharding key in the topology
- we clone into 2 instances
- we enable filtered replication
- we move all serving types
- we remove the source tablets
- we remove the original shard
"""
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
# use_l2vtgate is set if we want to use l2vtgate processes.
# We'll set them up to have:
# l2vtgate1: covers the initial shard, and -80
# l2vtgate2: covers 80-
use_l2vtgate = False
# the l2vtgate processes, if applicable
l2vtgate1 = None
l2vtgate2 = None
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_tablets = [shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestInitialSharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
def _create_schema(self):
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _add_sharding_key_to_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s add custom_ksid_col ' + t
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _mark_sharding_key_not_null(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s modify custom_ksid_col ' + t + ' not null'
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
def _insert_startup_value(self, tablet_obj, table, mid, msg):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'insert into %s(parent_id, id, msg) values(%d, %d, "%s")' %
(table, base_sharding.fixed_parent_id, mid, msg),
'commit'
], write=True)
def _insert_startup_values(self):
self._insert_startup_value(shard_master, 'resharding1', 1, 'msg1')
self._insert_startup_value(shard_master, 'resharding1', 2, 'msg2')
self._insert_startup_value(shard_master, 'resharding1', 3, 'msg3')
def _backfill_keyspace_id(self, tablet_obj):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'update resharding1 set custom_ksid_col=0x1000000000000000 where id=1',
'update resharding1 set custom_ksid_col=0x9000000000000000 where id=2',
'update resharding1 set custom_ksid_col=0xD000000000000000 where id=3',
'commit'
], write=True)
def _check_startup_values(self):
# check first value is in the left shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1', 0x1000000000000000)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1',
0x1000000000000000, should_be_here=False)
# check second value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
# check third value is in the right shard too
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('enough data went through', timeout)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_0_replica, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
global l2vtgate1, l2vtgate2
# create the keyspace with just one shard
shard_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='0',
tablet_index=0)
shard_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='0',
tablet_index=1)
shard_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='0',
tablet_index=2)
for t in [shard_master, shard_replica, shard_rdonly1]:
t.create_db('vt_test_keyspace')
shard_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_replica.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_rdonly1.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_master, shard_replica, shard_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True)
utils.wait_for_tablet_type(shard_replica.tablet_alias, 'replica')
utils.wait_for_tablet_type(shard_rdonly1.tablet_alias, 'rdonly')
for t in [shard_master, shard_replica, shard_rdonly1]:
t.wait_for_vttablet_state('SERVING')
# create the tables and add startup values
self._create_schema()
self._insert_startup_values()
# reload schema on all tablets so we can query them
for t in [shard_master, shard_replica, shard_rdonly1]:
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
# We must start vtgate after tablets are up, or else wait until 1min refresh
# (that is the tablet_refresh_interval parameter for discovery gateway)
# we want cache_ttl at zero so we re-read the topology for every test query.
if use_l2vtgate:
l2vtgate1 = utils.L2VtGate()
l2vtgate1.start(tablets=
[shard_master, shard_replica, shard_rdonly1])
l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1)
_, addr = l2vtgate1.rpc_endpoint()
l2vtgate1_param = '%s|test_keyspace|0' % addr
utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_param,])
else:
utils.VtGate().start(cache_ttl='0', tablets=[
shard_master, shard_replica, shard_rdonly1])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteShards,
# as we're not sharded yet.
# we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['shard_part']['shards'][0], '0')
# change the schema, backfill keyspace_id, and change schema again
self._add_sharding_key_to_schema()
self._backfill_keyspace_id(shard_master)
self._mark_sharding_key_not_null()
# now we can be a sharded keyspace (and propagate to SrvKeyspace)
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
# run a health check on source replica so it responds to discovery
utils.run_vtctl(['RunHealthCheck', shard_replica.tablet_alias])
# create the split shards
shard_0_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='-80',
tablet_index=0)
shard_0_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='-80',
tablet_index=1)
shard_0_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='-80',
tablet_index=2)
shard_1_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='80-',
tablet_index=0)
shard_1_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='80-',
tablet_index=1)
shard_1_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='80-',
tablet_index=2)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
for t in [shard_0_replica, shard_1_replica]:
utils.wait_for_tablet_type(t.tablet_alias, 'replica')
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.wait_for_tablet_type(t.tablet_alias, 'rdonly')
sharded_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
for t in sharded_tablets:
t.wait_for_vttablet_state('SERVING')
# must restart vtgate after tablets are up, or else wait until 1min refresh
# we want cache_ttl at zero so we re-read the topology for every test query.
utils.vtgate.kill()
if use_l2vtgate:
l2vtgate1.kill()
l2vtgate1 = utils.L2VtGate()
l2vtgate1.start(tablets=
[shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1],
tablet_filters='test_keyspace|0,test_keyspace|-80')
l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.-80.master', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.-80.replica', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
l2vtgate1.verify_no_endpoint('test_keyspace.80-.master')
l2vtgate1.verify_no_endpoint('test_keyspace.80-.replica')
l2vtgate1.verify_no_endpoint('test_keyspace.80-.rdonly')
# FIXME(alainjobart) we clear tablet_types_to_wait, as this
# l2vtgate2 doesn't serve the current test_keyspace shard, which
# is test_keyspace.0. This is not ideal, we should re-work
# which keyspace/shard a l2vtgate can wait for, as the ones
# filtered by tablet_filters.
l2vtgate2 = utils.L2VtGate()
l2vtgate2.start(tablets=
[shard_1_master, shard_1_replica, shard_1_rdonly1],
tablet_filters='test_keyspace|80-',
tablet_types_to_wait='')
l2vtgate2.wait_for_endpoints('test_keyspace.80-.master', 1)
l2vtgate2.wait_for_endpoints('test_keyspace.80-.replica', 1)
l2vtgate2.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
l2vtgate2.verify_no_endpoint('test_keyspace.0.master')
l2vtgate2.verify_no_endpoint('test_keyspace.0.replica')
l2vtgate2.verify_no_endpoint('test_keyspace.0.rdonly')
l2vtgate2.verify_no_endpoint('test_keyspace.-80.master')
l2vtgate2.verify_no_endpoint('test_keyspace.-80.replica')
l2vtgate2.verify_no_endpoint('test_keyspace.-80.rdonly')
_, addr1 = l2vtgate1.rpc_endpoint()
_, addr2 = l2vtgate2.rpc_endpoint()
l2vtgate1_param1 = '%s|test_keyspace|0' % addr1
l2vtgate1_param2 = '%s|test_keyspace|-80' % addr1
l2vtgate2_param = '%s|test_keyspace|80-' % addr2
utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_param1,
l2vtgate1_param2,
l2vtgate2_param,])
else:
utils.VtGate().start(cache_ttl='0', tablets=[
shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.replica', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteKeyRanges now,
# as we are sharded (with just one shard).
# again, we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
# There must be one empty KeyRange which represents the full keyspace.
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(s[0]['key_range_part']['key_ranges'][0], {})
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
utils.run_vtctl(['RunHealthCheck', shard_rdonly1.tablet_alias])
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
3, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 1 (provokes an insert).
shard_0_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=1', write=True)
# Delete row 2 (provokes an insert).
shard_1_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_1_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 (provokes a delete).
self._insert_value(shard_1_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 1, 1, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 3)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check the binlog players are running
logging.debug('Waiting for binlog players to start on new masters...')
self.check_destination_master(shard_0_master, ['test_keyspace/0'])
self.check_destination_master(shard_1_master, ['test_keyspace/0'])
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_replica, horizontal=True)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
self.check_binlog_player_vars(shard_0_master, ['test_keyspace/0'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_1_master, ['test_keyspace/0'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data
logging.debug('Running vtworker SplitDiff for -80')
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
auto_log=True)
logging.debug('Running vtworker SplitDiff for 80-')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for the destination master tablet, make sure we have it all
self.check_running_binlog_player(shard_0_master, 2000, 2000)
self.check_running_binlog_player(shard_1_master, 6000, 2000)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
expect_fail=True)
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# make sure rdonly tablets are back to serving before hitting vtgate.
for t in [shard_0_rdonly1, shard_1_rdonly1]:
t.wait_for_vttablet_state('SERVING')
if use_l2vtgate:
l2vtgate1.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
l2vtgate2.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
else:
utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteKeyRanges
# on both destination shards now.
# we ask for 2 splits to only have one per shard
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 2)
self.assertEqual(len(s), 2)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(s[1]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(len(s[1]['key_range_part']['key_ranges']), 1)
# then serve replica from the split shards
source_tablet = shard_replica
destination_tablets = [shard_0_replica, shard_1_replica]
utils.run_vtctl(
['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, source_tablet, True, False)
utils.check_tablet_query_services(self, destination_tablets, False, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, source_tablet, False, True)
utils.check_tablet_query_services(self, destination_tablets, True, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# then serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# check the binlog players are gone now
self.check_no_binlog_player(shard_0_master)
self.check_no_binlog_player(shard_1_master)
# make sure we can't delete a shard with tablets
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], expect_fail=True)
# remove the original tablets in the original shard
tablet.kill_tablets([shard_master, shard_replica, shard_rdonly1])
for t in [shard_replica, shard_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
if __name__ == '__main__':
utils.main()
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import unittest
from pyreach import core # pylint: disable=unused-import
from pyreach import text_instruction
from pyreach.common.python import types_gen
from pyreach.impl import test_utils
from pyreach.impl import text_instruction_impl
from pyreach.impl import thread_util
class TestPyReachTextInstruction(unittest.TestCase):
def test_test_text_instruction(self) -> None:
test_utils.run_test_client_test([TestTextInstructions()], [
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1, device_type="color-camera", data_type="frame-request"),
(types_gen.DeviceData(
ts=1,
device_type="instruction-generator",
data_type="text-instruction",
text_instruction=types_gen.TextInstruction(
intent="pick",
success_type="test-type",
instruction="test instruction",
success_detection="test-detection",
uid="test-uid")),)),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=2, device_type="color-camera", data_type="frame-reqquest"),
()),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=3,
tag="test-tag",
device_type="instruction-generator",
data_type="text-instruction-request"), (
types_gen.DeviceData(
ts=3,
tag="test-tag",
device_type="instruction-generator",
data_type="text-instruction",
text_instruction=types_gen.TextInstruction(
intent="pick",
success_type="test-type",
instruction="test instruction",
success_detection="test-detection",
uid="test-uid")),
types_gen.DeviceData(
ts=3,
tag="test-tag",
device_type="instruction-generator",
data_type="cmd-status",
status="done"),
)),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=3,
device_type="instruction-generator",
data_type="text-instruction-request"), (types_gen.DeviceData(
ts=3,
device_type="instruction-generator",
data_type="text-instruction",
text_instruction=types_gen.TextInstruction(
intent="pick",
success_type="test-type",
instruction="test instruction",
success_detection="test-detection",
uid="test-uid")),))
])
def test_text_instructions(self) -> None:
## Setup, ensure no cached image, and that tagged requests will be used.
rdev, dev = text_instruction_impl.TextInstructionDevice().get_wrapper()
with test_utils.TestDevice(rdev) as test_device:
global_callbacks: ("thread_util.CallbackCapturer["
"text_instruction.TextInstruction]") = (
thread_util.CallbackCapturer())
stop_callback = dev.add_update_callback(
global_callbacks.callback_false, global_callbacks.finished_callback)
self.assertIsNone(dev.text_instruction)
test_device.set_responder(TestTextInstructions())
test_device.send_cmd(
types_gen.CommandData(
ts=1, device_type="color-camera", data_type="frame-request"))
frame_0 = dev.text_instruction
self._verify_frame(frame_0)
frame_1 = dev.fetch_text_instruction()
self._verify_frame(frame_1)
self.assertNotEqual(frame_0, frame_1)
callbacks: (
"thread_util.DoubleCallbackCapturer["
"text_instruction.TextInstruction,"
" core.PyReachStatus]") = thread_util.DoubleCallbackCapturer()
dev.async_fetch_text_instruction(
callback=callbacks.first_callback_finish,
error_callback=callbacks.second_callback_finish)
frames = callbacks.wait()
self.assertEqual(len(frames), 1)
self.assertIsNone(frames[0][1])
frame_2 = frames[0][0]
self._verify_frame(frame_2)
test_device.set_callback(None)
self.assertIsNone(dev.fetch_text_instruction(timeout=0))
dev.async_fetch_text_instruction(
callback=callbacks.first_callback_finish,
error_callback=callbacks.second_callback_finish,
timeout=0)
frames = callbacks.wait()
self.assertEqual(len(frames), 1)
self.assertIsNone(frames[0][0])
status = frames[0][1]
self.assertIsNotNone(status)
if status:
self.assertEqual(status.status, "done")
self.assertEqual(status.error, "timeout")
test_device.set_responder(test_utils.RejectResponder())
self.assertIsNone(dev.fetch_text_instruction())
dev.async_fetch_text_instruction(
callback=callbacks.first_callback_finish,
error_callback=callbacks.second_callback_finish)
frames = callbacks.wait()
self.assertEqual(len(frames), 1)
self.assertIsNone(frames[0][0])
status = frames[0][1]
self.assertIsNotNone(status)
if status:
self.assertEqual(status.status, "rejected")
self.assertEqual(status.error, "")
stop_callback()
global_frames = global_callbacks.wait()
self.assertEqual(len(global_frames), 3)
self.assertEqual(global_frames[0], frame_0)
self.assertEqual(global_frames[1], frame_1)
self.assertEqual(global_frames[2], frame_2)
def _verify_frame(self,
frame: Optional[text_instruction.TextInstruction]) -> None:
self.assertIsNotNone(frame)
if frame:
self.assertEqual(frame.intent, "pick")
self.assertEqual(frame.success_type, "test-type")
self.assertEqual(frame.instruction, "test instruction")
self.assertEqual(frame.success_detection, "test-detection")
self.assertEqual(frame.uid, "test-uid")
class TestTextInstructions(test_utils.TestResponder):
"""A test text instructions for the test suite."""
_sent: bool
def __init__(self) -> None:
"""Init a TestTextInstructions."""
self._sent = False
def _generate_message(self, ts: int, tag: str) -> types_gen.DeviceData:
return types_gen.DeviceData(
device_type="instruction-generator",
tag=tag,
ts=ts,
data_type="text-instruction",
text_instruction=types_gen.TextInstruction(
intent="pick",
success_type="test-type",
instruction="test instruction",
success_detection="test-detection",
uid="test-uid"))
def step(self, cmd: types_gen.CommandData) -> List[types_gen.DeviceData]:
"""Test step, generates a response for testing framework data."""
output: List[types_gen.DeviceData] = []
if not self._sent:
self._sent = True
output.append(self._generate_message(cmd.ts, ""))
if (cmd.device_type == "instruction-generator" and not cmd.device_name and
cmd.data_type == "text-instruction-request"):
output.append(self._generate_message(cmd.ts, cmd.tag))
if cmd.tag:
output.append(
types_gen.DeviceData(
device_type="instruction-generator",
tag=cmd.tag,
ts=cmd.ts,
data_type="cmd-status",
status="done"))
return output
def start(self) -> List[types_gen.DeviceData]:
return []
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from ambari_agent.ActionQueue import ActionQueue
import ambari_agent.ActionQueue as AQM
from ambari_agent.AmbariConfig import AmbariConfig
from ambari_agent.UpgradeExecutor import UpgradeExecutor
from ambari_agent.PuppetExecutor import PuppetExecutor
from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
from ambari_agent.ActualConfigHandler import ActualConfigHandler
import os, errno, time, pprint, tempfile, threading
import TestStackVersionsFileHandler
from mock.mock import patch, MagicMock, call
class TestActionQueue(TestCase):
def test_ActionQueueStartStop(self):
actionQueue = ActionQueue(AmbariConfig().getConfig())
actionQueue.IDLE_SLEEP_TIME = 0.01
actionQueue.start()
actionQueue.stop()
actionQueue.join()
self.assertEqual(actionQueue.stopped(), True, 'Action queue is not stopped.')
def test_command_in_progress(self):
config = AmbariConfig().getConfig()
tmpfile = tempfile.gettempdir()
config.set('agent', 'prefix', tmpfile)
actionQueue = ActionQueue(config)
actionQueue.IDLE_SLEEP_TIME = 0.01
executor_started_event = threading.Event()
end_executor_event = threading.Event()
actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event)
before_start_result = actionQueue.result()
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'status' : 'IN_PROGRESS',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'roleCommand': "roleCommand",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}}
}
actionQueue.put(command)
actionQueue.start()
executor_started_event.wait()
#print ("ii: " + pprint.pformat(actionQueue.commandInProgress))
in_progress_result = actionQueue.result()
end_executor_event.set()
actionQueue.stop()
actionQueue.join()
after_start_result = actionQueue.result()
self.assertEquals(len(before_start_result['componentStatus']), 0)
self.assertEquals(len(before_start_result['reports']), 0)
self.assertEquals(len(in_progress_result['componentStatus']), 0)
self.assertEquals(len(in_progress_result['reports']), 1)
self.assertEquals(in_progress_result['reports'][0]['status'], "IN_PROGRESS")
self.assertEquals(in_progress_result['reports'][0]['stdout'], "Dummy output")
self.assertEquals(in_progress_result['reports'][0]['exitCode'], 777)
self.assertEquals(in_progress_result['reports'][0]['stderr'], 'Dummy err')
self.assertEquals(len(after_start_result['componentStatus']), 0)
self.assertEquals(len(after_start_result['reports']), 1)
self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED")
self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout")
self.assertEquals(after_start_result['reports'][0]['exitCode'], 0)
self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
#print("tmpout: " + pprint.pformat(actionQueue.tmpdir))
#print("before: " + pprint.pformat(before_start_result))
#print("in_progress: " + pprint.pformat(in_progress_result))
#print("after: " + pprint.pformat(after_start_result))
def test_configtags(self):
config = AmbariConfig().getConfig()
tmpfile = tempfile.gettempdir()
config.set('agent', 'prefix', tmpfile)
actionQueue = ActionQueue(config)
actionQueue.IDLE_SLEEP_TIME = 0.01
executor_started_event = threading.Event()
end_executor_event = threading.Event()
actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event)
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'status' : 'IN_PROGRESS',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'roleCommand': "roleCommand",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}},
'configurationTags':{'global' : { 'tag': 'v1' }}
}
actionQueue.put(command)
actionQueue.start()
executor_started_event.wait()
end_executor_event.set()
actionQueue.stop()
actionQueue.join()
after_start_result = actionQueue.result()
configname = os.path.join(tmpfile, 'config.json')
self.assertEquals(len(after_start_result['componentStatus']), 0)
self.assertEquals(len(after_start_result['reports']), 1)
self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED")
self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout")
self.assertEquals(after_start_result['reports'][0]['exitCode'], 0)
self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
self.assertEquals(len(after_start_result['reports'][0]['configurationTags']), 1)
self.assertEquals(True, os.path.isfile(configname))
os.remove(configname)
@patch.object(ActionQueue, "executeCommand")
@patch.object(ActionQueue, "stopped")
def test_upgradeCommand_dispatching(self, stopped_method, executeCommand_method):
queue = ActionQueue(config = MagicMock())
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'roleCommand' : 'UPGRADE',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}},
'roleParams': {},
'commandParams' : {
'source_stack_version' : 'HDP-1.2.1',
'target_stack_version' : 'HDP-1.3.0'
}
}
result = [{
'exitcode' : 0,
'stdout' : 'abc',
'stderr' : 'def'
}]
executeCommand_method.return_value = result
stopped_method.side_effect = [False, False, True, True, True]
queue.stopped = stopped_method
queue.IDLE_SLEEP_TIME = 0.001
queue.put(command)
queue.run()
self.assertTrue(executeCommand_method.called)
self.assertEquals(queue.resultQueue.qsize(), 1)
returned_result = queue.resultQueue.get()
self.assertTrue(returned_result[1] is result[0])
@patch.object(UpgradeExecutor, "perform_stack_upgrade")
@patch.object(PuppetExecutor, "runCommand")
@patch.object(ActualConfigHandler, "findRunDir")
def test_upgradeCommand_executeCommand(self, action_conf_handler_findRunDir_method,
puppet_executor_run_command_method, perform_stack_upgrade_method):
queue = ActionQueue(config = MagicMock())
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'roleCommand' : 'UPGRADE',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}},
'roleParams': {},
'commandParams' : {
'source_stack_version' : 'HDP-1.2.1',
'target_stack_version' : 'HDP-1.3.0'
}
}
upgrade_method_return_value = {'exitcode' : 0,
'stdout' : 'abc',
'stderr' : 'def'}
perform_stack_upgrade_method.return_value = upgrade_method_return_value
result = queue.executeCommand(command)
expected_result = [{'actionId': 17,
'clusterName': 'clusterName',
'exitCode': 0,
'role': 'role',
'serviceName': 'serviceName',
'status': 'COMPLETED',
'stderr': 'def',
'stdout': 'abc',
'taskId': 'taskId',
'roleCommand': 'UPGRADE'}]
self.assertEquals(result, expected_result)
puppet_executor_run_command_method.return_value = {'exitcode' : 0,
'stdout' : 'abc',
'stderr' : 'def'}
command['roleCommand'] = 'START'
action_conf_handler_findRunDir_method.return_value = AmbariConfig().getConfig().get("stack", "installprefix")
expected_result[0]['configurationTags'] = None
expected_result[0]['roleCommand'] = 'START'
result = queue.executeCommand(command)
self.assertEquals(result, expected_result)
#--------------------------------------------
command['roleCommand'] = 'UPGRADE'
upgrade_method_return_value['exitcode'] = 1
upgrade_method_return_value['stdout'] = ''
upgrade_method_return_value['stderr'] = ''
perform_stack_upgrade_method.return_value = upgrade_method_return_value
result = queue.executeCommand(command)
expected_result[0]['roleCommand'] = 'UPGRADE'
del expected_result[0]['configurationTags']
expected_result[0]['exitCode'] = 1
expected_result[0]['stderr'] = 'None'
expected_result[0]['stdout'] = 'None'
expected_result[0]['status'] = 'FAILED'
self.assertEquals(result, expected_result)
@patch.object(ActionQueue, "stopped")
@patch.object(AQM.logger, "warn")
def test_run_unrecognized_command(self, logger_method, stopped_method):
config = AmbariConfig().getConfig()
actionQueue = ActionQueue(config)
command = {
"serviceName" : 'HDFS',
"commandType" : "SOME_UNRECOGNIZED_COMMAND",
"clusterName" : "",
"componentName" : "DATANODE",
'configurations':{}
}
actionQueue.commandQueue.put(command)
actionQueue.stopped = stopped_method
stopped_method.side_effect = [False, False, True, True, True]
actionQueue.IDLE_SLEEP_TIME = 0.001
actionQueue.run()
self.assertTrue(logger_method.call_args[0][0].startswith('Unrecognized command'))
@patch.object(StackVersionsFileHandler, "read_stack_version")
@patch.object(ActionQueue, "stopped")
def test_status_command_without_globals_section(self, stopped_method,
read_stack_version_method):
config = AmbariConfig().getConfig()
config.set('agent', 'prefix', TestStackVersionsFileHandler.dummyVersionsFile)
queue = ActionQueue(config)
statusCommand = {
"serviceName" : 'HDFS',
"commandType" : "STATUS_COMMAND",
"clusterName" : "",
"componentName" : "DATANODE",
'configurations':{}
}
queue.stopped = stopped_method
stopped_method.side_effect = [False, False, True, True, True]
read_stack_version_method.return_value="1.3.0"
queue.IDLE_SLEEP_TIME = 0.001
queue.put(statusCommand)
queue.run()
returned_result = queue.resultQueue.get()
returned_result[1]['status'] = 'INSTALLED' # Patch live value
self.assertEquals(returned_result, ('STATUS_COMMAND',
{'clusterName': '',
'componentName': 'DATANODE',
'msg': '',
'serviceName': 'HDFS',
'stackVersion': '1.3.0',
'status': 'INSTALLED'}))
class FakeExecutor():
def __init__(self, executor_started_event, end_executor_event):
self.executor_started_event = executor_started_event
self.end_executor_event = end_executor_event
pass
def runCommand(self, command, tmpoutpath, tmperrpath):
tmpout= open(tmpoutpath, 'w')
tmpout.write("Dummy output")
tmpout.flush()
tmperr= open(tmperrpath, 'w')
tmperr.write("Dummy err")
tmperr.flush()
self.executor_started_event.set()
self.end_executor_event.wait()
return {
"exitcode": 0,
"stdout": "returned stdout",
"stderr": "returned stderr"
}
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon_lib import tables
from horizon_lib.utils.memoized import memoized # noqa
from openstack_horizon import api
from openstack_horizon.api import base
NOT_LAUNCHABLE_FORMATS = ['aki', 'ari']
class LaunchImage(tables.LinkAction):
name = "launch_image"
verbose_name = _("Launch")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
if get_image_type(datum) == "image":
source_type = "image_id"
else:
source_type = "instance_snapshot_id"
params = urlencode({"source_type": source_type,
"source_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, image=None):
if image and image.container_format not in NOT_LAUNCHABLE_FORMATS:
return image.status in ("active",)
return False
class DeleteImage(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Image",
u"Delete Images",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Image",
u"Deleted Images",
count
)
policy_rules = (("image", "delete_image"),)
def allowed(self, request, image=None):
# Protected images can not be deleted.
if image and image.protected:
return False
if image:
return image.owner == request.user.tenant_id
# Return True to allow table-level bulk delete action to appear.
return True
def delete(self, request, obj_id):
api.glance.image_delete(request, obj_id)
class CreateImage(tables.LinkAction):
name = "create"
verbose_name = _("Create Image")
url = "horizon:project:images:images:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("image", "add_image"),)
class EditImage(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:project:images:images:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("image", "modify_image"),)
def allowed(self, request, image=None):
if image:
return image.status in ("active",) and \
image.owner == request.user.tenant_id
# We don't have bulk editing, so if there isn't an image that's
# authorized, don't allow the action.
return False
class CreateVolumeFromImage(tables.LinkAction):
name = "create_volume_from_image"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"image_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, image=None):
if (image and image.container_format not in NOT_LAUNCHABLE_FORMATS
and base.is_service_enabled(request, 'volume')):
return image.status == "active"
return False
def filter_tenants():
return getattr(settings, 'IMAGES_LIST_FILTER_TENANTS', [])
@memoized
def filter_tenant_ids():
return map(lambda ft: ft['tenant'], filter_tenants())
class OwnerFilter(tables.FixedFilterAction):
def get_fixed_buttons(self):
def make_dict(text, tenant, icon):
return dict(text=text, value=tenant, icon=icon)
buttons = [make_dict(_('Project'), 'project', 'icon-home')]
for button_dict in filter_tenants():
new_dict = button_dict.copy()
new_dict['value'] = new_dict['tenant']
buttons.append(new_dict)
buttons.append(make_dict(_('Shared with Me'), 'shared', 'icon-share'))
buttons.append(make_dict(_('Public'), 'public', 'icon-fire'))
return buttons
def categorize(self, table, images):
user_tenant_id = table.request.user.tenant_id
tenants = defaultdict(list)
for im in images:
categories = get_image_categories(im, user_tenant_id)
for category in categories:
tenants[category].append(im)
return tenants
def get_image_categories(im, user_tenant_id):
categories = []
if im.is_public:
categories.append('public')
if im.owner == user_tenant_id:
categories.append('project')
elif im.owner in filter_tenant_ids():
categories.append(im.owner)
elif not im.is_public:
categories.append('shared')
return categories
def get_image_name(image):
return getattr(image, "name", None) or image.id
def get_image_type(image):
return getattr(image, "properties", {}).get("image_type", "image")
def get_format(image):
format = getattr(image, "disk_format", "")
# The "container_format" attribute can actually be set to None,
# which will raise an error if you call upper() on it.
if format is not None:
return format.upper()
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, image_id):
image = api.glance.image_get(request, image_id)
return image
def load_cells(self, image=None):
super(UpdateRow, self).load_cells(image)
# Tag the row with the image category for client-side filtering.
image = self.datum
my_tenant_id = self.table.request.user.tenant_id
image_categories = get_image_categories(image, my_tenant_id)
for category in image_categories:
self.classes.append('category-' + category)
class ImagesTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("saving", None),
("queued", None),
("pending_delete", None),
("killed", False),
("deleted", False),
)
name = tables.Column(get_image_name,
link=("horizon:project:images:images:detail"),
verbose_name=_("Image Name"))
image_type = tables.Column(get_image_type,
verbose_name=_("Type"),
filters=(filters.title,))
status = tables.Column("status",
filters=(filters.title,),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
public = tables.Column("is_public",
verbose_name=_("Public"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
protected = tables.Column("protected",
verbose_name=_("Protected"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
disk_format = tables.Column(get_format, verbose_name=_("Format"))
size = tables.Column("size",
filters=(filters.filesizeformat,),
verbose_name=_("Size"))
class Meta:
name = "images"
row_class = UpdateRow
status_columns = ["status"]
verbose_name = _("Images")
table_actions = (OwnerFilter, CreateImage, DeleteImage,)
row_actions = (LaunchImage, CreateVolumeFromImage,
EditImage, DeleteImage,)
pagination_param = "image_marker"
|
|
#
# PluginManager.py -- Simple class to manage plugins.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import threading
import traceback
from ginga.gw import Widgets
from ginga.misc import Bunch, Callback
from ginga.util.six.moves import filter
class PluginManagerError(Exception):
pass
class PluginManager(Callback.Callbacks):
"""
A PluginManager manages the start and stop of plugins.
"""
def __init__(self, logger, fitsview, ds, mm):
super(PluginManager, self).__init__()
self.logger = logger
self.fv = fitsview
self.ds = ds
self.mm = mm
self.lock = threading.RLock()
self.plugin = Bunch.caselessDict()
self.active = {}
self.focus = set([])
self.exclusive = set([])
for name in ('activate-plugin', 'deactivate-plugin',
'focus-plugin', 'unfocus-plugin'):
self.enable_callback(name)
def load_plugin(self, name, spec, chinfo=None):
try:
module = self.mm.get_module(spec.module)
className = spec.get('klass', spec.module)
klass = getattr(module, className)
if chinfo is None:
# global plug in
obj = klass(self.fv)
fitsimage = None
else:
# local plugin
fitsimage = chinfo.fitsimage
obj = klass(self.fv, fitsimage)
# Prepare configuration for module. This becomes the p_info
# object referred to in later code.
opname = name.lower()
self.plugin[opname] = Bunch.Bunch(klass=klass, obj=obj,
widget=None, name=name,
is_toplevel=False,
spec=spec,
fitsimage=fitsimage,
chinfo=chinfo)
self.logger.info("Plugin '%s' loaded." % name)
except Exception as e:
self.logger.error("Failed to load plugin '%s': %s" % (
name, str(e)))
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception as e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
#raise PluginManagerError(e)
def reload_plugin(self, plname, chinfo=None):
p_info = self.get_plugin_info(plname)
return self.load_plugin(p_info.name, p_info.spec, chinfo=chinfo)
def has_plugin(self, plname):
plname = plname.lower()
return plname in self.plugin
def get_plugin_info(self, plname):
plname = plname.lower()
p_info = self.plugin[plname]
return p_info
def get_plugin(self, name):
p_info = self.get_plugin_info(name)
return p_info.obj
def get_names(self):
return list(self.plugin.keys())
def deactivate_focused(self):
names = self.get_focus()
for name in names:
self.deactivate(name)
def get_active(self):
return list(self.active.keys())
def is_active(self, key):
lname = key.lower()
return lname in self.get_active()
def get_focus(self):
return list(self.focus)
def has_focus(self, name):
lname = name.lower()
names = self.get_focus()
return lname in names
def get_info(self, name):
lname = name.lower()
return self.active[lname]
def activate(self, p_info, exclusive=True):
name = p_info.tabname
lname = p_info.name.lower()
if lname not in self.active:
bnch = Bunch.Bunch(pInfo=p_info, lblname=None, widget=None,
exclusive=exclusive)
if p_info.chinfo is not None:
# local plugin
tup = name.split(':')
bnch.lblname = ' ' + tup[0] + ':\n' + tup[1] + ' '
self.make_callback('activate-plugin', bnch)
else:
# global plugin
bnch.exclusive = False
self.active[lname] = bnch
if bnch.exclusive:
self.exclusive.add(lname)
def deactivate(self, name):
self.logger.debug("deactivating %s" % (name))
lname = name.lower()
if lname in self.focus:
self.clear_focus(lname)
if lname in self.active:
self.logger.debug("removing from task bar: %s" % (lname))
bnch = self.active[lname]
self.make_callback('deactivate-plugin', bnch)
del self.active[lname]
try:
self.stop_plugin(bnch.pInfo)
except Exception as e:
self.logger.error("Error deactivating plugin: %s" % (str(e)))
# TODO: log traceback
# Set focus to another plugin if one is running
active = list(self.active.keys())
if len(active) > 0:
name = active[0]
self.logger.debug("focusing: %s" % (name))
self.set_focus(name)
def set_focus(self, name):
self.logger.debug("Focusing plugin '%s'" % (name))
lname = name.lower()
bnch = self.active[lname]
if bnch.exclusive:
self.logger.debug("focus=%s exclusive=%s" % (
self.focus, self.exclusive))
defocus = list(filter(lambda x: x in self.exclusive, self.focus))
self.logger.debug("defocus: %s" % (str(defocus)))
for xname in defocus:
self.clear_focus(xname)
p_info = bnch.pInfo
# If this is a local plugin, raise the channel associated with the
# plug in
if p_info.chinfo is not None:
itab = p_info.chinfo.name
self.logger.debug("raising channel tab %s" % (itab))
self.ds.raise_tab(itab)
self.logger.debug("resuming plugin %s" % (name))
p_info.obj.resume()
self.make_callback('focus-plugin', bnch)
self.focus.add(lname)
if p_info.widget is not None:
self.logger.debug("raising plugin tab %s" % (p_info.tabname))
if p_info.is_toplevel:
p_info.widget.raise_()
else:
self.ds.raise_tab(p_info.tabname)
def clear_focus(self, name):
self.logger.debug("Unfocusing plugin '%s'" % (name))
lname = name.lower()
bnch = self.active[lname]
p_info = bnch.pInfo
try:
self.focus.remove(lname)
if p_info.chinfo is not None:
p_info.obj.pause()
self.make_callback('unfocus-plugin', bnch)
except:
pass
def start_plugin(self, chname, opname, alreadyOpenOk=False):
return self.start_plugin_future(chname, opname, None,
alreadyOpenOk=alreadyOpenOk)
def start_plugin_future(self, chname, opname, future,
alreadyOpenOk=True):
try:
p_info = self.get_plugin_info(opname)
except KeyError:
self.fv.show_error("No plugin information for plugin '%s'" % (
opname))
return
if chname is not None:
# local plugin
plname = chname.upper() + ': ' + p_info.name
else:
# global plugin
plname = p_info.name
lname = p_info.name.lower()
if lname in self.active:
if alreadyOpenOk:
self.set_focus(p_info.name)
return
raise PluginManagerError("Plugin %s is already active." % (
plname))
# Raise tab with GUI
p_info.tabname = p_info.spec.get('tab', plname)
vbox = None
had_error = False
try:
if hasattr(p_info.obj, 'build_gui'):
vbox = Widgets.VBox()
in_ws = p_info.spec.ws
if in_ws.startswith('in:'):
# TODO: how to set this size appropriately
# Which plugins are actually using this attribute?
vbox.size = (400, 900)
else:
# attach size of workspace to container so plugin
# can plan for how to configure itself
wd, ht = self.ds.get_ws_size(in_ws)
vbox.size = (wd, ht)
if future:
p_info.obj.build_gui(vbox, future=future)
else:
p_info.obj.build_gui(vbox)
except Exception as e:
errstr = "Plugin UI failed to initialize: %s" % (
str(e))
self.logger.error(errstr)
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception as e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
self.plugin_build_error(vbox, errstr + '\n' + tb_str)
#raise PluginManagerError(e)
if not had_error:
try:
if future:
p_info.obj.start(future=future)
else:
p_info.obj.start()
except Exception as e:
had_error = True
errstr = "Plugin failed to start correctly: %s" % (
str(e))
self.logger.error(errstr)
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception as e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
self.plugin_build_error(vbox, errstr + '\n' + tb_str)
#raise PluginManagerError(e)
if vbox is not None:
self.finish_gui(p_info, vbox)
self.activate(p_info)
self.set_focus(p_info.name)
else:
# If this is a local plugin, raise the channel associated with the
# plug in
if p_info.chinfo is not None:
itab = p_info.chinfo.name
self.ds.raise_tab(itab)
def stop_plugin(self, p_info):
self.logger.debug("stopping plugin %s" % (str(p_info)))
wasError = False
e = None
try:
p_info.obj.stop()
except Exception as e:
wasError = True
self.logger.error("Plugin failed to stop correctly: %s" % (
str(e)))
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
self.logger.error("Traceback information unavailable.")
if p_info.widget is not None:
self.dispose_gui(p_info)
self.ds.remove_tab(p_info.tabname)
if wasError:
raise PluginManagerError(e)
def stop_all_plugins(self):
for plugin_name in self.plugin:
try:
self.stop_plugin(plugin_name)
except Exception as e:
self.logger.error('Exception while calling stop for plugin %s: %s' % (plugin_name, str(e)))
return True
def plugin_build_error(self, box, text):
textw = Widgets.TextArea(editable=False, wrap=True)
textw.append_text(text)
box.add_widget(textw, stretch=1)
def finish_gui(self, p_info, vbox):
# add container to workspace
# TODO: how to figure out the appropriate size for top-levels?
wd, ht = vbox.get_size()
try:
in_ws = p_info.spec.ws
if in_ws == 'in:toplevel':
topw = vbox.get_app().make_window()
topw.add_callback('close',
lambda *args: self.deactivate(p_info.name))
topw.resize(wd, ht)
topw.set_widget(vbox)
p_info.widget = topw
p_info.is_toplevel = True
topw.show()
elif in_ws == 'in:dialog':
dialog = Widgets.Dialog(title=p_info.name,
flags=0,
buttons=[],
parent=self.fv.w.root)
dialog.resize(wd, ht)
box = dialog.get_content_area()
box.add_widget(vbox, stretch=1)
p_info.widget = dialog
p_info.is_toplevel = True
# TODO: need to add callback to remove from Desktop
# dialog list?
self.ds.show_dialog(dialog)
else:
self.ds.add_tab(in_ws, vbox, 2, p_info.tabname, p_info.tabname)
ws_w = self.ds.get_nb(in_ws)
ws_w.add_callback('page-switch', self.tab_switched_cb)
p_info.widget = vbox
p_info.is_toplevel = False
except Exception as e:
self.fv.show_error("Error finishing plugin UI for '%s': %s" % (
p_info.name, str(e)))
def tab_switched_cb(self, tab_w, widget):
# A tab in a workspace in which we started a plugin has been
# raised. Check for this widget and focus the plugin
title = widget.extdata.get('tab_title', None)
if title is not None:
# is this a local plugin tab?
if ':' in title:
chname, plname = title.split(':')
plname = plname.strip()
try:
info = self.get_info(plname)
except KeyError:
# no
return
p_info = info.pInfo
# important: make sure channel matches ours!
if p_info.tabname == title:
if self.is_active(p_info.name):
if not self.has_focus(p_info.name):
self.set_focus(p_info.name)
elif p_info.chinfo is not None:
# raise the channel associated with the plugin
itab = p_info.chinfo.name
self.ds.raise_tab(itab)
def dispose_gui(self, p_info):
self.logger.debug("disposing of gui")
vbox = p_info.widget
p_info.widget = None
vbox.hide()
vbox.delete()
########################################################
### NON-PEP8 PREDECESSORS: TO BE DEPRECATED
loadPlugin = load_plugin
reloadPlugin = reload_plugin
getPluginInfo = get_plugin_info
getPlugin = get_plugin
getNames = get_names
#END
|
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.