repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringclasses 981 values | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15 values |
|---|---|---|---|---|---|
Lx37/pyqtgraph | pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate_pyside.py | 50 | 12205 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate.ui'
#
# Created: Mon Dec 23 10:10:52 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(481, 840)
self.averageGroup = QtGui.QGroupBox(Form)
self.averageGroup.setGeometry(QtCore.QRect(0, 640, 242, 182))
self.averageGroup.setCheckable(True)
self.averageGroup.setChecked(False)
self.averageGroup.setObjectName("averageGroup")
self.gridLayout_5 = QtGui.QGridLayout(self.averageGroup)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.avgParamList = QtGui.QListWidget(self.averageGroup)
self.avgParamList.setObjectName("avgParamList")
self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1)
self.decimateGroup = QtGui.QFrame(Form)
self.decimateGroup.setGeometry(QtCore.QRect(10, 140, 191, 171))
self.decimateGroup.setObjectName("decimateGroup")
self.gridLayout_4 = QtGui.QGridLayout(self.decimateGroup)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.clipToViewCheck = QtGui.QCheckBox(self.decimateGroup)
self.clipToViewCheck.setObjectName("clipToViewCheck")
self.gridLayout_4.addWidget(self.clipToViewCheck, 7, 0, 1, 3)
self.maxTracesCheck = QtGui.QCheckBox(self.decimateGroup)
self.maxTracesCheck.setObjectName("maxTracesCheck")
self.gridLayout_4.addWidget(self.maxTracesCheck, 8, 0, 1, 2)
self.downsampleCheck = QtGui.QCheckBox(self.decimateGroup)
self.downsampleCheck.setObjectName("downsampleCheck")
self.gridLayout_4.addWidget(self.downsampleCheck, 0, 0, 1, 3)
self.peakRadio = QtGui.QRadioButton(self.decimateGroup)
self.peakRadio.setChecked(True)
self.peakRadio.setObjectName("peakRadio")
self.gridLayout_4.addWidget(self.peakRadio, 6, 1, 1, 2)
self.maxTracesSpin = QtGui.QSpinBox(self.decimateGroup)
self.maxTracesSpin.setObjectName("maxTracesSpin")
self.gridLayout_4.addWidget(self.maxTracesSpin, 8, 2, 1, 1)
self.forgetTracesCheck = QtGui.QCheckBox(self.decimateGroup)
self.forgetTracesCheck.setObjectName("forgetTracesCheck")
self.gridLayout_4.addWidget(self.forgetTracesCheck, 9, 0, 1, 3)
self.meanRadio = QtGui.QRadioButton(self.decimateGroup)
self.meanRadio.setObjectName("meanRadio")
self.gridLayout_4.addWidget(self.meanRadio, 3, 1, 1, 2)
self.subsampleRadio = QtGui.QRadioButton(self.decimateGroup)
self.subsampleRadio.setObjectName("subsampleRadio")
self.gridLayout_4.addWidget(self.subsampleRadio, 2, 1, 1, 2)
self.autoDownsampleCheck = QtGui.QCheckBox(self.decimateGroup)
self.autoDownsampleCheck.setChecked(True)
self.autoDownsampleCheck.setObjectName("autoDownsampleCheck")
self.gridLayout_4.addWidget(self.autoDownsampleCheck, 1, 2, 1, 1)
spacerItem = QtGui.QSpacerItem(30, 20, QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem, 2, 0, 1, 1)
self.downsampleSpin = QtGui.QSpinBox(self.decimateGroup)
self.downsampleSpin.setMinimum(1)
self.downsampleSpin.setMaximum(100000)
self.downsampleSpin.setProperty("value", 1)
self.downsampleSpin.setObjectName("downsampleSpin")
self.gridLayout_4.addWidget(self.downsampleSpin, 1, 1, 1, 1)
self.transformGroup = QtGui.QFrame(Form)
self.transformGroup.setGeometry(QtCore.QRect(0, 0, 154, 79))
self.transformGroup.setObjectName("transformGroup")
self.gridLayout = QtGui.QGridLayout(self.transformGroup)
self.gridLayout.setObjectName("gridLayout")
self.fftCheck = QtGui.QCheckBox(self.transformGroup)
self.fftCheck.setObjectName("fftCheck")
self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1)
self.logXCheck = QtGui.QCheckBox(self.transformGroup)
self.logXCheck.setObjectName("logXCheck")
self.gridLayout.addWidget(self.logXCheck, 1, 0, 1, 1)
self.logYCheck = QtGui.QCheckBox(self.transformGroup)
self.logYCheck.setObjectName("logYCheck")
self.gridLayout.addWidget(self.logYCheck, 2, 0, 1, 1)
self.pointsGroup = QtGui.QGroupBox(Form)
self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58))
self.pointsGroup.setCheckable(True)
self.pointsGroup.setObjectName("pointsGroup")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.pointsGroup)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.autoPointsCheck = QtGui.QCheckBox(self.pointsGroup)
self.autoPointsCheck.setChecked(True)
self.autoPointsCheck.setObjectName("autoPointsCheck")
self.verticalLayout_5.addWidget(self.autoPointsCheck)
self.gridGroup = QtGui.QFrame(Form)
self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81))
self.gridGroup.setObjectName("gridGroup")
self.gridLayout_2 = QtGui.QGridLayout(self.gridGroup)
self.gridLayout_2.setObjectName("gridLayout_2")
self.xGridCheck = QtGui.QCheckBox(self.gridGroup)
self.xGridCheck.setObjectName("xGridCheck")
self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2)
self.yGridCheck = QtGui.QCheckBox(self.gridGroup)
self.yGridCheck.setObjectName("yGridCheck")
self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2)
self.gridAlphaSlider = QtGui.QSlider(self.gridGroup)
self.gridAlphaSlider.setMaximum(255)
self.gridAlphaSlider.setProperty("value", 128)
self.gridAlphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.gridAlphaSlider.setObjectName("gridAlphaSlider")
self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1)
self.label = QtGui.QLabel(self.gridGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1)
self.alphaGroup = QtGui.QGroupBox(Form)
self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60))
self.alphaGroup.setCheckable(True)
self.alphaGroup.setObjectName("alphaGroup")
self.horizontalLayout = QtGui.QHBoxLayout(self.alphaGroup)
self.horizontalLayout.setObjectName("horizontalLayout")
self.autoAlphaCheck = QtGui.QCheckBox(self.alphaGroup)
self.autoAlphaCheck.setChecked(False)
self.autoAlphaCheck.setObjectName("autoAlphaCheck")
self.horizontalLayout.addWidget(self.autoAlphaCheck)
self.alphaSlider = QtGui.QSlider(self.alphaGroup)
self.alphaSlider.setMaximum(1000)
self.alphaSlider.setProperty("value", 1000)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setObjectName("alphaSlider")
self.horizontalLayout.addWidget(self.alphaSlider)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.averageGroup.setToolTip(QtGui.QApplication.translate("Form", "Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available).", None, QtGui.QApplication.UnicodeUTF8))
self.averageGroup.setTitle(QtGui.QApplication.translate("Form", "Average", None, QtGui.QApplication.UnicodeUTF8))
self.clipToViewCheck.setToolTip(QtGui.QApplication.translate("Form", "Plot only the portion of each curve that is visible. This assumes X values are uniformly spaced.", None, QtGui.QApplication.UnicodeUTF8))
self.clipToViewCheck.setText(QtGui.QApplication.translate("Form", "Clip to View", None, QtGui.QApplication.UnicodeUTF8))
self.maxTracesCheck.setToolTip(QtGui.QApplication.translate("Form", "If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed.", None, QtGui.QApplication.UnicodeUTF8))
self.maxTracesCheck.setText(QtGui.QApplication.translate("Form", "Max Traces:", None, QtGui.QApplication.UnicodeUTF8))
self.downsampleCheck.setText(QtGui.QApplication.translate("Form", "Downsample", None, QtGui.QApplication.UnicodeUTF8))
self.peakRadio.setToolTip(QtGui.QApplication.translate("Form", "Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower.", None, QtGui.QApplication.UnicodeUTF8))
self.peakRadio.setText(QtGui.QApplication.translate("Form", "Peak", None, QtGui.QApplication.UnicodeUTF8))
self.maxTracesSpin.setToolTip(QtGui.QApplication.translate("Form", "If multiple curves are displayed in this plot, check \"Max Traces\" and set this value to limit the number of traces that are displayed.", None, QtGui.QApplication.UnicodeUTF8))
self.forgetTracesCheck.setToolTip(QtGui.QApplication.translate("Form", "If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden).", None, QtGui.QApplication.UnicodeUTF8))
self.forgetTracesCheck.setText(QtGui.QApplication.translate("Form", "Forget hidden traces", None, QtGui.QApplication.UnicodeUTF8))
self.meanRadio.setToolTip(QtGui.QApplication.translate("Form", "Downsample by taking the mean of N samples.", None, QtGui.QApplication.UnicodeUTF8))
self.meanRadio.setText(QtGui.QApplication.translate("Form", "Mean", None, QtGui.QApplication.UnicodeUTF8))
self.subsampleRadio.setToolTip(QtGui.QApplication.translate("Form", "Downsample by taking the first of N samples. This method is fastest and least accurate.", None, QtGui.QApplication.UnicodeUTF8))
self.subsampleRadio.setText(QtGui.QApplication.translate("Form", "Subsample", None, QtGui.QApplication.UnicodeUTF8))
self.autoDownsampleCheck.setToolTip(QtGui.QApplication.translate("Form", "Automatically downsample data based on the visible range. This assumes X values are uniformly spaced.", None, QtGui.QApplication.UnicodeUTF8))
self.autoDownsampleCheck.setText(QtGui.QApplication.translate("Form", "Auto", None, QtGui.QApplication.UnicodeUTF8))
self.downsampleSpin.setToolTip(QtGui.QApplication.translate("Form", "Downsample data before plotting. (plot every Nth sample)", None, QtGui.QApplication.UnicodeUTF8))
self.downsampleSpin.setSuffix(QtGui.QApplication.translate("Form", "x", None, QtGui.QApplication.UnicodeUTF8))
self.fftCheck.setText(QtGui.QApplication.translate("Form", "Power Spectrum (FFT)", None, QtGui.QApplication.UnicodeUTF8))
self.logXCheck.setText(QtGui.QApplication.translate("Form", "Log X", None, QtGui.QApplication.UnicodeUTF8))
self.logYCheck.setText(QtGui.QApplication.translate("Form", "Log Y", None, QtGui.QApplication.UnicodeUTF8))
self.pointsGroup.setTitle(QtGui.QApplication.translate("Form", "Points", None, QtGui.QApplication.UnicodeUTF8))
self.autoPointsCheck.setText(QtGui.QApplication.translate("Form", "Auto", None, QtGui.QApplication.UnicodeUTF8))
self.xGridCheck.setText(QtGui.QApplication.translate("Form", "Show X Grid", None, QtGui.QApplication.UnicodeUTF8))
self.yGridCheck.setText(QtGui.QApplication.translate("Form", "Show Y Grid", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Opacity", None, QtGui.QApplication.UnicodeUTF8))
self.alphaGroup.setTitle(QtGui.QApplication.translate("Form", "Alpha", None, QtGui.QApplication.UnicodeUTF8))
self.autoAlphaCheck.setText(QtGui.QApplication.translate("Form", "Auto", None, QtGui.QApplication.UnicodeUTF8))
| mit |
tensorflow/agents | tf_agents/networks/network_test.py | 1 | 14557 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents import specs
from tf_agents.distributions import utils as distribution_utils
from tf_agents.keras_layers import rnn_wrapper
from tf_agents.networks import network
from tf_agents.utils import common
tfd = tfp.distributions
class BaseNetwork(network.Network):
# pylint: disable=useless-super-delegation
def __init__(self, v1, **kwargs):
super(BaseNetwork, self).__init__(v1, **kwargs)
# pylint: enable=useless-super-delegation
class NetworkNoExtraKeywordsInCallSignature(network.Network):
def call(self, inputs):
return inputs, ()
class MockNetwork(BaseNetwork):
def __init__(self, param1, param2, kwarg1=2, kwarg2=3):
self.param1 = param1
self.param2 = param2
self.kwarg1 = kwarg1
self.kwarg2 = kwarg2
super(MockNetwork, self).__init__(param1,
state_spec=(),
name='mock')
def build(self, *args, **kwargs):
self.var1 = common.create_variable(
'variable', dtype=tf.float32, trainable=False)
self.var2 = common.create_variable(
'trainable_variable', dtype=tf.float32, trainable=True)
def call(self, observations, step_type, network_state=None):
return self.var1 + self.var2 + observations, ()
class NoInitNetwork(MockNetwork):
pass
class GnarlyNetwork(network.Network):
def __init__(self):
k1 = tf.keras.Sequential([
tf.keras.layers.Dense(
32,
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=1e-5, l2=1e-4),
bias_regularizer=tf.keras.regularizers.l2(1e-4),
),
tf.keras.layers.Dense(64),
tf.keras.layers.BatchNormalization()
], name='a')
k2 = tf.keras.layers.Dense(12, name='b')
super(GnarlyNetwork, self).__init__(
input_tensor_spec=tf.TensorSpec(dtype=tf.float32, shape=(2,)),
state_spec=(), name=None)
self._k1 = k1
self._k2 = k2
def call(self, observations, step_type, network_state=None):
return self._k2(self._k1(observations)), network_state
class NetworkTest(tf.test.TestCase):
def test_copy_works(self):
network1 = MockNetwork((), 1)
network2 = network1.copy()
self.assertNotEqual(network1, network2)
self.assertEqual((), network2.param1)
self.assertEqual(1, network2.param2)
self.assertEqual(2, network2.kwarg1)
self.assertEqual(3, network2.kwarg2)
def test_noinit_copy_works(self):
network1 = NoInitNetwork((), 1)
network2 = network1.copy()
self.assertNotEqual(network1, network2)
self.assertEqual((), network2.param1)
self.assertEqual(1, network2.param2)
self.assertEqual(2, network2.kwarg1)
self.assertEqual(3, network2.kwarg2)
def test_too_many_args_raises_appropriate_error(self):
with self.assertRaisesRegexp(TypeError, '__init__.*given'):
# pylint: disable=too-many-function-args
MockNetwork(0, 1, 2, 3, 4, 5, 6) # pytype: disable=wrong-arg-count
def test_assert_input_spec(self):
spec = specs.TensorSpec([], tf.int32, 'action')
net = MockNetwork(spec, 1)
with self.assertRaises(ValueError):
net((1, 2), 2)
def test_create_variables(self):
observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
action_spec = specs.TensorSpec([2], tf.float32, 'action')
net = MockNetwork(observation_spec, action_spec)
self.assertFalse(net.built)
with self.assertRaises(ValueError):
net.variables # pylint: disable=pointless-statement
output_spec = net.create_variables()
# MockNetwork adds some variables to observation, which has shape [bs, 1]
self.assertEqual(output_spec, tf.TensorSpec([1], dtype=tf.float32))
self.assertTrue(net.built)
self.assertLen(net.variables, 2)
self.assertLen(net.trainable_variables, 1)
def test_create_variables_distribution(self):
observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
action_spec = specs.TensorSpec([2], tf.float32, 'action')
net = MockNetwork(observation_spec, action_spec)
self.assertFalse(net.built)
with self.assertRaises(ValueError):
net.variables # pylint: disable=pointless-statement
output_spec = net.create_variables()
# MockNetwork adds some variables to observation, which has shape [bs, 1]
self.assertEqual(output_spec, tf.TensorSpec([1], dtype=tf.float32))
self.assertTrue(net.built)
self.assertLen(net.variables, 2)
self.assertLen(net.trainable_variables, 1)
def test_summary_no_exception(self):
"""Tests that Network.summary() does not throw an exception."""
observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
action_spec = specs.TensorSpec([2], tf.float32, 'action')
net = MockNetwork(observation_spec, action_spec)
net.create_variables()
net.summary()
def test_access_deep_layers_weights_and_losses(self):
net = GnarlyNetwork()
net.create_variables(training=True)
layer_names = sorted([l.name for l in net.layers])
losses = net.losses
trainable_weight_names = sorted([w.name for w in net.trainable_weights])
non_trainable_weight_names = sorted(
[w.name for w in net.non_trainable_weights])
self.assertEqual(layer_names, ['a', 'b'])
self.assertLen(losses, 2)
for loss in losses:
self.assertEqual(loss.dtype, tf.float32)
self.assertEqual(loss.shape, ())
self.assertEqual(
[x.lstrip('gnarly_network/') for x in trainable_weight_names],
['batch_normalization/beta:0',
'batch_normalization/gamma:0',
'dense/bias:0',
'dense/kernel:0',
'dense_1/bias:0',
'dense_1/kernel:0',
'b/bias:0',
'b/kernel:0'])
self.assertEqual(
[x.lstrip('gnarly_network/') for x in non_trainable_weight_names],
['batch_normalization/moving_mean:0',
'batch_normalization/moving_variance:0'])
def test_dont_complain_if_no_network_state_in_call_signature(self):
net = NetworkNoExtraKeywordsInCallSignature()
out, _ = net(1, network_state=None) # This shouldn't complain.
self.assertAllEqual(out, 1)
out, _ = net(1, step_type=3, network_state=None) # This shouldn't complain.
self.assertAllEqual(out, 1)
class CreateVariablesTest(parameterized.TestCase, tf.test.TestCase):
def testNetworkCreate(self):
observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
action_spec = specs.TensorSpec([2], tf.float32, 'action')
net = MockNetwork(observation_spec, action_spec)
self.assertFalse(net.built)
with self.assertRaises(ValueError):
net.variables # pylint: disable=pointless-statement
output_spec = network.create_variables(net)
# MockNetwork adds some variables to observation, which has shape [bs, 1]
self.assertEqual(output_spec, tf.TensorSpec([1], dtype=tf.float32))
self.assertTrue(net.built)
self.assertLen(net.variables, 2)
self.assertLen(net.trainable_variables, 1)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
(
'Dense',
lambda: tf.keras.layers.Dense(3),
tf.TensorSpec((5,), tf.float32), # input_spec
tf.TensorSpec((3,), tf.float32), # expected_output_spec
(), # expected_state_spec
),
(
'LSTMCell',
lambda: tf.keras.layers.LSTMCell(3),
tf.TensorSpec((5,), tf.float32),
tf.TensorSpec((3,), tf.float32),
[tf.TensorSpec((3,), tf.float32),
tf.TensorSpec((3,), tf.float32)],
),
(
'LSTMCellInRNN',
lambda: rnn_wrapper.RNNWrapper(
tf.keras.layers.RNN(
tf.keras.layers.LSTMCell(3),
return_state=True,
return_sequences=True)
),
tf.TensorSpec((5,), tf.float32),
tf.TensorSpec((3,), tf.float32),
[tf.TensorSpec((3,), tf.float32),
tf.TensorSpec((3,), tf.float32)],
),
(
'LSTM',
lambda: rnn_wrapper.RNNWrapper(
tf.keras.layers.LSTM(
3,
return_state=True,
return_sequences=True)
),
tf.TensorSpec((5,), tf.float32),
tf.TensorSpec((3,), tf.float32),
[tf.TensorSpec((3,), tf.float32),
tf.TensorSpec((3,), tf.float32)],
),
(
'TimeDistributed',
lambda: tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(3)),
tf.TensorSpec((5,), tf.float32),
tf.TensorSpec((3,), tf.float32),
()
),
(
'Conv2D',
lambda: tf.keras.layers.Conv2D(2, 3),
tf.TensorSpec((28, 28, 5), tf.float32),
tf.TensorSpec((26, 26, 2), tf.float32),
()
),
(
'SequentialOfDense',
lambda: tf.keras.Sequential([tf.keras.layers.Dense(3)] * 2),
tf.TensorSpec((5,), tf.float32),
tf.TensorSpec((3,), tf.float32),
()
),
(
'NormalDistribution',
lambda: tf.keras.Sequential(
[tf.keras.layers.Dense(3),
tf.keras.layers.Lambda(
lambda x: tfd.Normal(loc=x, scale=x**2))]),
tf.TensorSpec((5,), tf.float32),
distribution_utils.DistributionSpecV2(
event_shape=tf.TensorShape(()),
dtype=tf.float32,
parameters=distribution_utils.Params(
type_=tfd.Normal,
params=dict(
loc=tf.TensorSpec((3,), tf.float32),
scale=tf.TensorSpec((3,), tf.float32),
))),
()
),
)
# pylint: enable=g-long-λ
def testKerasLayerCreate(self, layer_fn, input_spec, expected_output_spec,
expected_state_spec):
layer = layer_fn()
with self.assertRaisesRegex(ValueError, 'an input_spec is required'):
network.create_variables(layer)
output_spec = network.create_variables(layer, input_spec)
self.assertTrue(layer.built)
self.assertEqual(
output_spec, expected_output_spec,
'\n{}\nvs.\n{}\n'.format(output_spec, expected_output_spec))
output_spec_2 = network.create_variables(layer, input_spec)
self.assertEqual(output_spec_2, expected_output_spec)
state_spec = getattr(layer, '_network_state_spec', None)
self.assertEqual(state_spec, expected_state_spec)
class MockStateFullNetwork(BaseNetwork):
def __init__(self, input_spec, state_spec):
super(MockStateFullNetwork, self).__init__(input_spec,
state_spec=state_spec,
name='statefullmock')
def build(self, *args, **kwargs):
self.var = common.create_variable(
'trainable_variable', dtype=tf.float32, trainable=True)
self.state = common.create_variable(
'state', dtype=tf.float32, trainable=False)
def call(self, observations, network_state=None):
return self.var + observations, self.state + network_state
class StateFullNetworkTest(tf.test.TestCase):
def test_specs(self):
input_spec = tf.TensorSpec([], tf.float32, 'inputs')
state_spec = tf.TensorSpec([], tf.float32, 'state')
net = MockStateFullNetwork(input_spec, state_spec)
self.assertEqual(input_spec, net.input_tensor_spec)
self.assertEqual(state_spec, net.state_spec)
def test_empty_state(self):
input_spec = tf.TensorSpec([], tf.float32, 'inputs')
net = MockStateFullNetwork(input_spec, ())
self.assertEqual(input_spec, net.input_tensor_spec)
self.assertEqual((), net.state_spec)
net.create_variables()
def test_wrong_new_state(self):
input_spec = tf.TensorSpec([], tf.float32, 'inputs')
net = MockStateFullNetwork(input_spec, ((), ()))
self.assertEqual(input_spec, net.input_tensor_spec)
self.assertEqual(((), ()), net.state_spec)
with self.assertRaises(ValueError):
net.create_variables()
def test_copy_works(self):
input_spec = tf.TensorSpec([], tf.float32, 'inputs')
state_spec = tf.TensorSpec([], tf.float32, 'state')
network1 = MockStateFullNetwork(input_spec, state_spec)
network2 = network1.copy()
self.assertNotEqual(network1, network2)
self.assertEqual(network1.input_tensor_spec, network2.input_tensor_spec)
self.assertEqual(network1.state_spec, network2.state_spec)
def test_create_variables(self):
observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
action_spec = specs.TensorSpec([1], tf.float32, 'action')
input_spec = (observation_spec, action_spec)
state_spec = tf.TensorSpec([], tf.float32, 'state')
net = MockStateFullNetwork(input_spec, state_spec)
self.assertFalse(net.built)
with self.assertRaises(ValueError):
net.variables # pylint: disable=pointless-statement
output_spec = net.create_variables()
self.assertEqual(output_spec, tf.TensorSpec([1, 1], dtype=tf.float32))
self.assertTrue(net.built)
self.assertLen(net.variables, 2)
self.assertLen(net.trainable_variables, 1)
def test_call(self):
observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
state_spec = tf.TensorSpec([], tf.float32, 'state')
net = MockStateFullNetwork(observation_spec, state_spec)
initial_state = net._get_initial_state(batch_size=1)
observation = tf.constant([1.0])
outputs, new_state = net(observation, initial_state)
# Only needed for TF1
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(outputs), 1.0)
self.assertEqual(self.evaluate(new_state), 0.0)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
Eficent/odoomrp-wip | sale_order_recalculate_prices/__init__.py | 61 | 1629 | # -*- encoding: utf-8 -*-
##############################################################################
# #
# OpenERP, Open Source Management Solution. #
# #
# @author Carlos Sánchez Cifuentes <csanchez@grupovermon.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from . import models
| agpl-3.0 |
ZhangChuann/awesome-python-webapp | www/models.py | 1 | 1538 | #!/usr/bin/env python
# coding=utf-8
__author__ = 'ZHang Chuan'
'''
Models for user, blog, comment.
'''
import time, uuid
from transwarp.db import next_id
from transwarp.orm import Model, StringField, BooleanField, FloatField, TextField
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(updatable=False, ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(updatable=False, default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(updatable=False, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
| gpl-3.0 |
marissazhou/django | django/utils/six.py | 408 | 30194 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
| bsd-3-clause |
dawran6/zulip | zerver/views/realm_emoji.py | 12 | 1372 | from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import Text
from zerver.models import UserProfile
from zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name, check_valid_emoji
from zerver.lib.request import JsonableError, REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji
def list_emoji(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
# We don't call check_emoji_admin here because the list of realm
# emoji is public.
return json_success({'emoji': user_profile.realm.get_emoji()})
@has_request_variables
def upload_emoji(request, user_profile, emoji_name, url=REQ()):
# type: (HttpRequest, UserProfile, Text, Text) -> HttpResponse
check_valid_emoji_name(emoji_name)
check_emoji_admin(user_profile)
check_add_realm_emoji(user_profile.realm, emoji_name, url, author=user_profile)
return json_success()
def delete_emoji(request, user_profile, emoji_name):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
check_emoji_admin(user_profile)
check_valid_emoji(user_profile.realm, emoji_name)
do_remove_realm_emoji(user_profile.realm, emoji_name)
return json_success()
| apache-2.0 |
absoludity/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/skipping.py | 168 | 12742 | """ support for skip/xfail functions and markers. """
import os
import sys
import traceback
import py
import pytest
from _pytest.mark import MarkInfo, MarkDecorator
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--runxfail',
action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config):
if config.option.runxfail:
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = XFailed
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
)
def pytest_namespace():
return dict(xfail=xfail)
class XFailed(pytest.fail.Exception):
""" raised from an explicit call to pytest.xfail() """
def xfail(reason=""):
""" xfail an executing test or setup functions with the given reason."""
__tracebackhide__ = True
raise XFailed(reason)
xfail.Exception = XFailed
class MarkEvaluator:
def __init__(self, item, name):
self.item = item
self.name = name
@property
def holder(self):
return self.item.keywords.get(self.name)
def __bool__(self):
return bool(self.holder)
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, 'exc')
def invalidraise(self, exc):
raises = self.get('raises')
if not raises:
return
return not isinstance(exc, raises)
def istrue(self):
try:
return self._istrue()
except Exception:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^",]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
pytest.fail("Error evaluating %r expression\n"
" %s\n"
"%s"
%(self.name, self.expr, "\n".join(msg)),
pytrace=False)
def _getglobals(self):
d = {'os': os, 'sys': sys, 'config': self.item.config}
func = self.item.obj
try:
d.update(func.__globals__)
except AttributeError:
d.update(func.func_globals)
return d
def _istrue(self):
if hasattr(self, 'result'):
return self.result
if self.holder:
d = self._getglobals()
if self.holder.args:
self.result = False
# "holder" might be a MarkInfo or a MarkDecorator; only
# MarkInfo keeps track of all parameters it received in an
# _arglist attribute
if hasattr(self.holder, '_arglist'):
arglist = self.holder._arglist
else:
arglist = [(self.holder.args, self.holder.kwargs)]
for args, kwargs in arglist:
for expr in args:
self.expr = expr
if isinstance(expr, py.builtin._basestring):
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in kwargs:
# XXX better be checked at collection time
msg = "you need to specify reason=STRING " \
"when using booleans as conditions."
pytest.fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = kwargs.get('reason', None)
self.expr = expr
return self.result
else:
self.result = True
return getattr(self, 'result', False)
def get(self, attr, default=None):
return self.holder.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, 'reason', None) or self.get('reason', None)
if not expl:
if not hasattr(self, 'expr'):
return ""
else:
return "condition: " + str(self.expr)
return expl
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
skipif_info = item.keywords.get('skipif')
if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
eval_skipif = MarkEvaluator(item, 'skipif')
if eval_skipif.istrue():
item._evalskip = eval_skipif
pytest.skip(eval_skipif.getexplanation())
skip_info = item.keywords.get('skip')
if isinstance(skip_info, (MarkInfo, MarkDecorator)):
item._evalskip = True
if 'reason' in skip_info.kwargs:
pytest.skip(skip_info.kwargs['reason'])
elif skip_info.args:
pytest.skip(skip_info.args[0])
else:
pytest.skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
@pytest.mark.hookwrapper
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, '_evalxfail', None)
evalskip = getattr(item, '_evalskip', None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
# we need to translate into how pytest encodes xpass
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
rep.outcome = "failed"
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
rep.outcome = "failed" # xpass outcome
rep.wasxfail = evalxfail.getexplanation()
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.failed:
return "xpassed", "X", ("XPASS", {'yellow': True})
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
#for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
lines = []
for char in tr.reportchars:
if char == "x":
show_xfailed(terminalreporter, lines)
elif char == "X":
show_xpassed(terminalreporter, lines)
elif char in "fF":
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
elif char in "sS":
show_skipped(terminalreporter, lines)
elif char == "E":
show_simple(terminalreporter, lines, 'error', "ERROR %s")
elif char == 'p':
show_simple(terminalreporter, lines, 'passed', "PASSED %s")
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat, format):
failed = terminalreporter.stats.get(stat)
if failed:
for rep in failed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
lines.append(format %(pos,))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XFAIL %s" % (pos,))
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
for rep in xpassed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XPASS %s %s" %(pos, reason))
def cached_eval(config, expr, d):
if not hasattr(config, '_evalcache'):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
d.setdefault(key, []).append(event)
l = []
for key, events in d.items():
l.append((len(events),) + key)
return l
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get('skipped', [])
if skipped:
#if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
#tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
lines.append("SKIP [%d] %s:%d: %s" %
(num, fspath, lineno, reason))
| mpl-2.0 |
neo1691/scorer.py | scorer/app.py | 1 | 1926 | import logging
from time import sleep
import scorer.fetch_scores as fs
import scorer.notification as notify
from scorer.system import exitApp
from scorer.ui import getUserInput
logger = logging.getLogger("scorer.app")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("scorer.log")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\
%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
NO_LIVE_MATCHES = "No Match in progress"
SLEEP_INTERVAL = 60
def main():
while True:
logger.debug("Getting the xml and matches list")
xml, matches = fs.findMatchesAvailable()
if matches[0] == NO_LIVE_MATCHES:
print "No Live matches are available now:"
exitApp()
matches.append("Quit the scorer app")
try:
matchChoice = getUserInput(matches)
except KeyboardInterrupt:
exitApp()
if matchChoice == len(matches) - 1:
logger.debug("User chose quit")
exitApp()
logger.debug("User's choice: {} {}".format(matchChoice,
matches[matchChoice - 1]))
logger.debug("Getting the latest score for the selected match")
matchID = fs.getMatchID(matchChoice, xml)
jsonurl = fs.getJsonURL(matchID)
playingTeams = fs.getPlayingTeamNames(jsonurl)
while True:
try:
title, score = fs.getLastestScore(jsonurl, playingTeams)
logger.debug("Sending notification for: title:{} score:\
{}".format(title, score))
notify.popUpMessage(title, score)
sleep(SLEEP_INTERVAL)
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
| gpl-2.0 |
AstroHuntsman/POCS | pocs/tests/test_config.py | 2 | 4469 | import os
import pytest
import uuid
import yaml
from astropy import units as u
from pocs.utils.config import load_config
from pocs.utils.config import save_config
def test_load_simulator(config):
assert 'camera' in config['simulator']
assert 'mount' in config['simulator']
assert 'weather' in config['simulator']
assert 'night' in config['simulator']
def test_no_clobber(config):
with pytest.warns(UserWarning):
save_config('pocs', config, clobber=False)
def test_clobber(config):
config01 = {
'foo': 'bar'
}
config02 = {
'bar': 'foo'
}
assert config01 != config02
save_config('foo', config01)
config03 = load_config('foo')
assert config01 == config03
save_config('foo', config02)
config04 = load_config('foo')
assert config02 == config04
assert config01 != config04
conf_fn = '{}/conf_files/foo.yaml'.format(os.getenv('POCS'))
os.remove(conf_fn)
assert os.path.exists(conf_fn) is False
def test_full_path():
temp_config_path = '/tmp/{}.yaml'.format(uuid.uuid4())
temp_config = {'foo': 42}
save_config(temp_config_path, temp_config)
c = load_config(temp_config_path)
assert c == temp_config
os.remove(temp_config_path)
def test_local_config():
_local_config_file = '{}/conf_files/pocs_local.yaml'.format(os.getenv('POCS'))
if not os.path.exists(_local_config_file):
conf = load_config(ignore_local=True)
assert conf['name'] == 'Generic PANOPTES Unit'
local_yaml = {
'name': 'ConfTestName'
}
with open(_local_config_file, 'w') as f:
f.write(yaml.dump(local_yaml))
conf = load_config()
assert conf['name'] != 'Generic PANOPTES Unit'
os.remove(_local_config_file)
else:
conf = load_config()
assert conf['name'] != 'Generic PANOPTES Unit'
def test_multiple_config():
config01 = {'foo': 1}
config02 = {'foo': 2, 'bar': 42}
config03 = {'bam': 'boo'}
assert config01 != config02
f01 = str(uuid.uuid4())
f02 = str(uuid.uuid4())
f03 = str(uuid.uuid4())
save_config(f01, config01)
save_config(f02, config02)
save_config(f03, config03)
config04 = load_config(f01)
config05 = load_config(f02)
config06 = load_config(f03)
assert config01 == config04
assert config02 == config05
assert config03 == config06
config07 = load_config([f01, f02], ignore_local=True)
config08 = load_config([f02, f01], ignore_local=True)
assert config07 != config01
assert config07 == config02
assert config08 != config01
assert config08 != config02
assert config08 != config05
assert 'foo' not in config06
assert 'bar' not in config06
assert 'foo' in config05
assert 'foo' in config07
assert 'foo' in config08
assert 'bar' in config05
assert 'bar' in config07
assert 'bar' in config08
assert 'bam' in config06
assert config07['foo'] == 2
assert config08['foo'] == 1
os.remove('{}/conf_files/{}.yaml'.format(os.getenv('POCS'), f01))
os.remove('{}/conf_files/{}.yaml'.format(os.getenv('POCS'), f02))
os.remove('{}/conf_files/{}.yaml'.format(os.getenv('POCS'), f03))
def test_no_config():
# Move existing config to temp
_config_file = '{}/conf_files/pocs.yaml'.format(os.getenv('POCS'))
_config_file_temp = '{}/conf_files/pocs_temp.yaml'.format(os.getenv('POCS'))
os.rename(_config_file, _config_file_temp)
config = load_config(ignore_local=True)
assert len(config.keys()) == 0
os.rename(_config_file_temp, _config_file)
def test_parse(config):
lat = config['location']['latitude']
assert isinstance(lat, u.Quantity)
def test_no_parse():
config = load_config(parse=False, ignore_local=True)
lat = config['location']['latitude']
assert isinstance(lat, u.Quantity) is False
assert isinstance(lat, float)
def test_location_latitude(config):
lat = config['location']['latitude']
assert lat >= -90 * u.degree and lat <= 90 * u.degree
def test_location_longitude(config):
lat = config['location']['longitude']
assert lat >= -360 * u.degree and lat <= 360 * u.degree
def test_location_positive_elevation(config):
elev = config['location']['elevation']
assert elev >= 0.0 * u.meter
def test_directories(config):
assert config['directories']['data'] == '{}/data'.format(os.getenv('PANDIR'))
| mit |
simonwydooghe/ansible | lib/ansible/modules/network/check_point/cp_mgmt_exception_group.py | 20 | 6632 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_exception_group
short_description: Manages exception-group objects on Check Point over Web Services API
description:
- Manages exception-group objects on Check Point devices including creating, updating and removing objects.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
applied_profile:
description:
- The threat profile to apply this group to in the case of apply-on threat-rules-with-specific-profile.
type: str
applied_threat_rules:
description:
- The threat rules to apply this group on in the case of apply-on manually-select-threat-rules.
type: dict
suboptions:
add:
description:
- Adds to collection of values
type: list
suboptions:
layer:
description:
- The layer of the threat rule to which the group is to be attached.
type: str
name:
description:
- The name of the threat rule to which the group is to be attached.
type: str
rule_number:
description:
- The rule-number of the threat rule to which the group is to be attached.
type: str
position:
description:
- Position in the rulebase.
type: str
apply_on:
description:
- An exception group can be set to apply on all threat rules, all threat rules which have a specific profile, or those rules manually chosen by the user.
type: str
choices: ['all-threat-rules', 'all-threat-rules-with-specific-profile', 'manually-select-threat-rules']
tags:
description:
- Collection of tag identifiers.
type: list
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: checkpoint_objects
"""
EXAMPLES = """
- name: add-exception-group
cp_mgmt_exception_group:
applied_threat_rules.0.layer: MyLayer
applied_threat_rules.0.name: MyThreatRule
apply_on: manually-select-threat-rules
name: exception_group_2
state: present
- name: set-exception-group
cp_mgmt_exception_group:
apply_on: all-threat-rules
name: exception_group_2
state: present
tags: tag3
- name: delete-exception-group
cp_mgmt_exception_group:
name: exception_group_2
state: absent
"""
RETURN = """
cp_mgmt_exception_group:
description: The checkpoint object created or updated.
returned: always, except when deleting the object.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_objects, api_call
def main():
argument_spec = dict(
name=dict(type='str', required=True),
applied_profile=dict(type='str'),
applied_threat_rules=dict(type='dict', options=dict(
add=dict(type='list', options=dict(
layer=dict(type='str'),
name=dict(type='str'),
rule_number=dict(type='str'),
position=dict(type='str')
))
)),
apply_on=dict(type='str', choices=['all-threat-rules', 'all-threat-rules-with-specific-profile', 'manually-select-threat-rules']),
tags=dict(type='list'),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)
argument_spec.update(checkpoint_argument_spec_for_objects)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
api_call_object = 'exception-group'
result = api_call(module, api_call_object)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kneufeld/crossbarexamples | rest/needs_cleanup/python/example/signed/test.py | 12 | 1213 | ###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import crossbarconnect
if __name__ == '__main__':
## create a new Crossbar.io push client (once), providing key/secret
## for signed requests
##
client = crossbarconnect.Client("http://127.0.0.1:8080/push",
key = "foobar", secret = "secret")
## publish an event without payload
##
event_id = client.publish("com.myapp.topic1", "Hello, world!", 23)
print("event published with ID {0}".format(event_id))
| apache-2.0 |
marctc/wagtail | wagtail/wagtailadmin/tests/test_password_reset.py | 25 | 1304 | from django.test import TestCase, override_settings
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
class TestUserPasswordReset(TestCase, WagtailTestUtils):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls")
def test_email_found_default_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("testserver", mail.outbox[0].body)
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls", BASE_URL='http://mysite.com')
def test_email_found_base_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("mysite.com", mail.outbox[0].body)
| bsd-3-clause |
ssbarnea/ansible | test/support/windows-integration/plugins/action/win_copy.py | 87 | 23564 | # This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import os.path
import shutil
import tempfile
import traceback
import zipfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
"""
Walk a filesystem tree returning enough information to copy the files.
This is similar to the _walk_dirs function in ``copy.py`` but returns
a dict instead of a tuple for each entry and includes the checksum of
a local file if wanted.
:arg topdir: The directory that the filesystem tree is rooted at
:arg loader: The self._loader object from ActionBase
:kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:kawrg whether to get the checksum of the local file and add to the dict
:returns: dictionary of dictionaries. All of the path elements in the structure are text string.
This separates all the files, directories, and symlinks along with
import information about each::
{
'files'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to',
checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
}, ...],
'directories'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to'
}, ...],
'symlinks'; [{
src: '/symlink/target/path',
dest: 'relative/path/to/copy/to'
}, ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
to None if checksum_check=False.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
else:
# Just a normal file
real_file = loader.get_real_file(filepath, decrypt=decrypt)
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a a circular symlink. So add it as
# a symlink
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents),
rel_base=dest_dirpath,
checksum_check=checksum_check)
else:
# Add the symlink to the destination
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Just a normal directory
r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents, checksum_check=checksum_check)
return r_files
def _get_local_checksum(get_checksum, local_path):
if get_checksum:
return checksum(local_path)
else:
return None
class ActionModule(ActionBase):
WIN_PATH_SEPARATOR = "\\"
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _create_zip_tempfile(self, files, directories):
tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
zip_file_path = os.path.join(tmpdir, "win_copy.zip")
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True)
# encoding the file/dir name with base64 so Windows can unzip a unicode
# filename and get the right name, Windows doesn't handle unicode names
# very well
for directory in directories:
directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
for file in files:
file_path = to_bytes(file['src'], errors='surrogate_or_strict')
archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
return zip_file_path
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp, backup):
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
# copy the file across to the server
tmp_src = self._connection._shell.join_path(tmp, 'source')
self._transfer_file(local_file, tmp_src)
copy_args = self._task.args.copy()
copy_args.update(
dict(
dest=dest,
src=tmp_src,
_original_basename=source_rel,
_copy_mode="single",
backup=backup,
)
)
copy_args.pop('content', None)
copy_result = self._execute_module(module_name="copy",
module_args=copy_args,
task_vars=task_vars)
return copy_result
def _copy_zip_file(self, dest, files, directories, task_vars, tmp, backup):
# create local zip file containing all the files and directories that
# need to be copied to the server
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
try:
zip_file = self._create_zip_tempfile(files, directories)
except Exception as e:
module_return = dict(
changed=False,
failed=True,
msg="failed to create tmp zip file: %s" % to_text(e),
exception=traceback.format_exc()
)
return module_return
zip_path = self._loader.get_real_file(zip_file)
# send zip file to remote, file must end in .zip so
# Com Shell.Application works
tmp_src = self._connection._shell.join_path(tmp, 'source.zip')
self._transfer_file(zip_path, tmp_src)
# run the explode operation of win_copy on remote
copy_args = self._task.args.copy()
copy_args.update(
dict(
src=tmp_src,
dest=dest,
_copy_mode="explode",
backup=backup,
)
)
copy_args.pop('content', None)
module_return = self._execute_module(module_name='copy',
module_args=copy_args,
task_vars=task_vars)
shutil.rmtree(os.path.dirname(zip_path))
return module_return
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
force = boolean(self._task.args.get('force', True), strict=False)
decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
backup = boolean(self._task.args.get('backup', False), strict=False)
result['src'] = source
result['dest'] = dest
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and (
dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# If content is defined make a temp file and write the content into it
content_tempfile = None
if content is not None:
try:
# if content comes to us as a dict it should be decoded json.
# We need to encode it back into a string and write it out
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content tmp file: %s" % to_native(err)
return result
# all actions should occur on the remote server, run win_copy module
elif remote_src:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
_copy_mode="remote",
dest=dest,
src=source,
force=force,
backup=backup,
)
)
new_module_args.pop('content', None)
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
# find_needle returns a path that may not have a trailing slash on a
# directory so we need to find that out first and append at the end
else:
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
result['operation'] = 'folder_copy'
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
checksum_check=force)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - win_copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
check_dest = dest
# Source is a file, add details to source_files dict
else:
result['operation'] = 'file_copy'
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
return result
original_basename = os.path.basename(source)
result['original_basename'] = original_basename
# check if dest ends with / or \ and append source filename to dest
if self._connection._shell.path_has_trailing_slash(dest):
check_dest = dest
filename = original_basename
result['dest'] = self._connection._shell.join_path(dest, filename)
else:
# replace \\ with / so we can use os.path to get the filename or dirname
unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
filename = os.path.basename(unix_path)
check_dest = os.path.dirname(unix_path)
file_checksum = _get_local_checksum(force, source_full)
source_files['files'].append(
dict(
src=source_full,
dest=filename,
checksum=file_checksum
)
)
result['checksum'] = file_checksum
result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
# find out the files/directories/symlinks that we need to copy to the server
query_args = self._task.args.copy()
query_args.update(
dict(
_copy_mode="query",
dest=check_dest,
force=force,
files=source_files['files'],
directories=source_files['directories'],
symlinks=source_files['symlinks'],
)
)
# src is not required for query, will fail path validation is src has unix allowed chars
query_args.pop('src', None)
query_args.pop('content', None)
query_return = self._execute_module(module_args=query_args,
task_vars=task_vars)
if query_return.get('failed') is True:
result.update(query_return)
return result
if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None:
self._connection._shell.tmpdir = self._make_tmp_path()
if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
# we only need to copy 1 file, don't mess around with zips
file_src = query_return['files'][0]['src']
file_dest = query_return['files'][0]['dest']
result.update(self._copy_single_file(file_src, dest, file_dest,
task_vars, self._connection._shell.tmpdir, backup))
if result.get('failed') is True:
result['msg'] = "failed to copy file %s: %s" % (file_src, result['msg'])
result['changed'] = True
elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
# either multiple files or directories need to be copied, compress
# to a zip and 'explode' the zip on the server
# TODO: handle symlinks
result.update(self._copy_zip_file(dest, source_files['files'],
source_files['directories'],
task_vars, self._connection._shell.tmpdir, backup))
result['changed'] = True
else:
# no operations need to occur
result['failed'] = False
result['changed'] = False
# remove the content tmp file and remote tmp file if it was created
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| gpl-3.0 |
botioni/aml_linux_kernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
andela-ifageyinbo/django | django/template/backends/django.py | 53 | 3918 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
from importlib import import_module
from pkgutil import walk_packages
from django.apps import apps
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.context import make_context
from django.template.engine import Engine
from django.template.library import InvalidTemplateLibrary
from django.utils import six
from .base import BaseEngine
class DjangoTemplates(BaseEngine):
app_dirname = 'templates'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
options.setdefault('debug', settings.DEBUG)
options.setdefault('file_charset', settings.FILE_CHARSET)
libraries = options.get('libraries', {})
options['libraries'] = self.get_templatetag_libraries(libraries)
super(DjangoTemplates, self).__init__(params)
self.engine = Engine(self.dirs, self.app_dirs, **options)
def from_string(self, template_code):
return Template(self.engine.from_string(template_code), self)
def get_template(self, template_name):
try:
return Template(self.engine.get_template(template_name), self)
except TemplateDoesNotExist as exc:
reraise(exc, self)
def get_templatetag_libraries(self, custom_libraries):
"""
Return a collation of template tag libraries from installed
applications and the supplied custom_libraries argument.
"""
libraries = get_installed_libraries()
libraries.update(custom_libraries)
return libraries
class Template(object):
def __init__(self, template, backend):
self.template = template
self.backend = backend
@property
def origin(self):
return self.template.origin
def render(self, context=None, request=None):
context = make_context(context, request)
try:
return self.template.render(context)
except TemplateDoesNotExist as exc:
reraise(exc, self.backend)
def reraise(exc, backend):
"""
Reraise TemplateDoesNotExist while maintaining template debug information.
"""
new = exc.__class__(*exc.args, tried=exc.tried, backend=backend)
if hasattr(exc, 'template_debug'):
new.template_debug = exc.template_debug
six.reraise(exc.__class__, new, sys.exc_info()[2])
def get_installed_libraries():
"""
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
"""
libraries = {}
candidates = ['django.templatetags']
candidates.extend(
'%s.templatetags' % app_config.name
for app_config in apps.get_app_configs())
for candidate in candidates:
try:
pkg = import_module(candidate)
except ImportError:
# No templatetags package defined. This is safe to ignore.
continue
if hasattr(pkg, '__path__'):
for name in get_package_libraries(pkg):
libraries[name[len(candidate) + 1:]] = name
return libraries
def get_package_libraries(pkg):
"""
Recursively yield template tag libraries defined in submodules of a
package.
"""
for entry in walk_packages(pkg.__path__, pkg.__name__ + '.'):
try:
module = import_module(entry[1])
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (entry[1], e)
)
if hasattr(module, 'register'):
yield entry[1]
| bsd-3-clause |
AdrianHuang/rt-thread | bsp/tm4c129x/rtconfig.py | 21 | 2501 | # BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D)
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
PART_TYPE = 'PART_TM4C129XNCZAD'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:\ArdaArmTools\Sourcery_Lite\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:\Keil_v5'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# tool-chains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-tm4c129x.map,-cref,-u,Reset_Handler -T tm4c_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter tm4c_rom.sct --info sizes --info totals --info unused --info veneers --list rtthread-tm4c129x.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
| gpl-2.0 |
Backflipz/plugin.video.excubed | resources/lib/js2py/legecy_translators/translator.py | 96 | 5676 | from flow import translate_flow
from constants import remove_constants, recover_constants
from objects import remove_objects, remove_arrays, translate_object, translate_array, set_func_translator
from functions import remove_functions, reset_inline_count
from jsparser import inject_before_lval, indent, dbg
TOP_GLOBAL = '''from js2py.pyjs import *\nvar = Scope( JS_BUILTINS )\nset_global_object(var)\n'''
def translate_js(js, top=TOP_GLOBAL):
"""js has to be a javascript source code.
returns equivalent python code."""
# Remove constant literals
no_const, constants = remove_constants(js)
#print 'const count', len(constants)
# Remove object literals
no_obj, objects, obj_count = remove_objects(no_const)
#print 'obj count', len(objects)
# Remove arrays
no_arr, arrays, arr_count = remove_arrays(no_obj)
#print 'arr count', len(arrays)
# Here remove and replace functions
reset_inline_count()
no_func, hoisted, inline = remove_functions(no_arr)
#translate flow and expressions
py_seed, to_register = translate_flow(no_func)
# register variables and hoisted functions
#top += '# register variables\n'
top += 'var.registers(%s)\n' % str(to_register + hoisted.keys())
#Recover functions
# hoisted functions recovery
defs = ''
#defs += '# define hoisted functions\n'
#print len(hoisted) , 'HH'*40
for nested_name, nested_info in hoisted.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args)
new_code += 'PyJsLvalTempHoisted.func_name = %s\n' %repr(nested_name)
defs += new_code +'\nvar.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name)
#defs += '# Everting ready!\n'
# inline functions recovery
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func(nested_name, nested_block, nested_args)
py_seed = inject_before_lval(py_seed, nested_name.split('@')[0], new_code)
# add hoisted definitiond - they have literals that have to be recovered
py_seed = defs + py_seed
#Recover arrays
for arr_lval, arr_code in arrays.iteritems():
translation, obj_count, arr_count = translate_array(arr_code, arr_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, arr_lval, translation)
#Recover objects
for obj_lval, obj_code in objects.iteritems():
translation, obj_count, arr_count = translate_object(obj_code, obj_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, obj_lval, translation)
#Recover constants
py_code = recover_constants(py_seed, constants)
return top + py_code
def translate_func(name, block, args):
"""Translates functions and all nested functions to Python code.
name - name of that function (global functions will be available under var while
inline will be available directly under this name )
block - code of the function (*with* brackets {} )
args - arguments that this function takes"""
inline = name.startswith('PyJsLvalInline')
real_name = ''
if inline:
name, real_name = name.split('@')
arglist = ', '.join(args) +', ' if args else ''
code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist)
# register local variables
scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary
for arg in args:
scope += ', %s:%s' %(repr(arg), arg)
if real_name:
scope += ', %s:%s' % (repr(real_name), name)
code += indent('var = Scope({%s}, var)\n' % scope)
block, nested_hoisted, nested_inline = remove_functions(block)
py_code, to_register = translate_flow(block)
#register variables declared with var and names of hoisted functions.
to_register += nested_hoisted.keys()
if to_register:
code += indent('var.registers(%s)\n'% str(to_register))
for nested_name, info in nested_hoisted.iteritems():
nested_block, nested_args = info
new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args)
# Now put definition of hoisted function on the top
code += indent(new_code)
code += indent('PyJsLvalTempHoisted.func_name = %s\n' %repr(nested_name))
code += indent('var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name))
for nested_name, info in nested_inline.iteritems():
nested_block, nested_args = info
new_code = translate_func(nested_name, nested_block, nested_args)
# Inject definitions of inline functions just before usage
# nested inline names have this format : LVAL_NAME@REAL_NAME
py_code = inject_before_lval(py_code, nested_name.split('@')[0], new_code)
if py_code.strip():
code += indent(py_code)
return code
set_func_translator(translate_func)
#print inject_before_lval(' chuj\n moj\n lval\nelse\n', 'lval', 'siema\njestem piter\n')
import time
#print time.time()
#print translate_js('if (1) console.log("Hello, World!"); else if (5) console.log("Hello world?");')
#print time.time()
t = """
var x = [1,2,3,4,5,6];
for (var e in x) {console.log(e); delete x[3];}
console.log(5 in [1,2,3,4,5]);
"""
SANDBOX ='''
import traceback
try:
%s
except:
print traceback.format_exc()
print
raw_input('Press Enter to quit')
'''
if __name__=='__main__':
# test with jq if works then it really works :)
#with open('jq.js', 'r') as f:
#jq = f.read()
#res = translate_js(jq)
res = translate_js(t)
dbg(SANDBOX% indent(res))
print 'Done' | gpl-2.0 |
pokowaka/atreus-firmware | tmk/tmk_core/tool/mbed/mbed-sdk/workspace_tools/host_tests/tcpecho_client_auto.py | 101 | 3319 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
from sys import stdout
from SocketServer import BaseRequestHandler, TCPServer
class TCPEchoClient_Handler(BaseRequestHandler):
def handle(self):
""" One handle per connection
"""
print "HOST: Connection received...",
count = 1;
while True:
data = self.request.recv(1024)
if not data: break
self.request.sendall(data)
if '{{end}}' in str(data):
print
print str(data)
else:
if not count % 10:
sys.stdout.write('.')
count += 1
stdout.flush()
class TCPEchoClientTest():
def send_server_ip_port(self, selftest, ip_address, port_no):
""" Set up network host. Reset target and and send server IP via serial to Mbed
"""
c = selftest.mbed.serial_readline() # 'TCPCllient waiting for server IP and port...'
if c is None:
self.print_result(selftest.RESULT_IO_SERIAL)
return
selftest.notify(c.strip())
selftest.notify("HOST: Sending server IP Address to target...")
connection_str = ip_address + ":" + str(port_no) + "\n"
selftest.mbed.serial_write(connection_str)
selftest.notify(connection_str)
# Two more strings about connection should be sent by MBED
for i in range(0, 2):
c = selftest.mbed.serial_readline()
if c is None:
selftest.print_result(self.RESULT_IO_SERIAL)
return
selftest.notify(c.strip())
def test(self, selftest):
# We need to discover SERVEP_IP and set up SERVER_PORT
# Note: Port 7 is Echo Protocol:
#
# Port number rationale:
#
# The Echo Protocol is a service in the Internet Protocol Suite defined
# in RFC 862. It was originally proposed for testing and measurement
# of round-trip times[citation needed] in IP networks.
#
# A host may connect to a server that supports the Echo Protocol using
# the Transmission Control Protocol (TCP) or the User Datagram Protocol
# (UDP) on the well-known port number 7. The server sends back an
# identical copy of the data it received.
SERVER_IP = str(socket.gethostbyname(socket.getfqdn()))
SERVER_PORT = 7
# Returning none will suppress host test from printing success code
server = TCPServer((SERVER_IP, SERVER_PORT), TCPEchoClient_Handler)
print "HOST: Listening for TCP connections: " + SERVER_IP + ":" + str(SERVER_PORT)
self.send_server_ip_port(selftest, SERVER_IP, SERVER_PORT)
server.serve_forever()
| gpl-3.0 |
anryko/ansible | lib/ansible/modules/storage/vexata/vexata_eg.py | 25 | 5824 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vexata_eg
version_added: 2.9
short_description: Manage export groups on Vexata VX100 storage arrays
description:
- Create or delete export groups on a Vexata VX100 array.
- An export group is a tuple of a volume group, initiator group and port
group that allows a set of volumes to be exposed to one or more hosts
through specific array ports.
author:
- Sandeep Kasargod (@vexata)
options:
name:
description:
- Export group name.
required: true
type: str
state:
description:
- Creates export group when present or delete when absent.
default: present
choices: [ present, absent ]
type: str
vg:
description:
- Volume group name.
type: str
ig:
description:
- Initiator group name.
type: str
pg:
description:
- Port group name.
type: str
extends_documentation_fragment:
- vexata.vx100
'''
EXAMPLES = r'''
- name: Create export group named db_export.
vexata_eg:
name: db_export
vg: dbvols
ig: dbhosts
pg: pg1
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Delete export group named db_export
vexata_eg:
name: db_export
state: absent
array: vx100_ultra.test.com
user: admin
password: secret
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vexata import (
argument_spec, get_array, required_together)
def get_eg(module, array):
"""Retrieve a named vg if it exists, None if absent."""
name = module.params['name']
try:
egs = array.list_egs()
eg = filter(lambda eg: eg['name'] == name, egs)
if len(eg) == 1:
return eg[0]
else:
return None
except Exception:
module.fail_json(msg='Error while attempting to retrieve export groups.')
def get_vg_id(module, array):
"""Retrieve a named vg's id if it exists, error if absent."""
name = module.params['vg']
try:
vgs = array.list_vgs()
vg = filter(lambda vg: vg['name'] == name, vgs)
if len(vg) == 1:
return vg[0]['id']
else:
module.fail_json(msg='Volume group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve volume groups.')
def get_ig_id(module, array):
"""Retrieve a named ig's id if it exists, error if absent."""
name = module.params['ig']
try:
igs = array.list_igs()
ig = filter(lambda ig: ig['name'] == name, igs)
if len(ig) == 1:
return ig[0]['id']
else:
module.fail_json(msg='Initiator group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve initiator groups.')
def get_pg_id(module, array):
"""Retrieve a named pg's id if it exists, error if absent."""
name = module.params['pg']
try:
pgs = array.list_pgs()
pg = filter(lambda pg: pg['name'] == name, pgs)
if len(pg) == 1:
return pg[0]['id']
else:
module.fail_json(msg='Port group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve port groups.')
def create_eg(module, array):
""""Create a new export group."""
changed = False
eg_name = module.params['name']
vg_id = get_vg_id(module, array)
ig_id = get_ig_id(module, array)
pg_id = get_pg_id(module, array)
if module.check_mode:
module.exit_json(changed=changed)
try:
eg = array.create_eg(
eg_name,
'Ansible export group',
(vg_id, ig_id, pg_id))
if eg:
module.log(msg='Created export group {0}'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
module.exit_json(changed=changed)
def delete_eg(module, array, eg):
changed = False
eg_name = eg['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.delete_eg(
eg['id'])
if ok:
module.log(msg='Export group {0} deleted.'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
module.exit_json(changed=changed)
def main():
arg_spec = argument_spec()
arg_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
vg=dict(type='str'),
ig=dict(type='str'),
pg=dict(type='str')
)
)
module = AnsibleModule(arg_spec,
supports_check_mode=True,
required_together=required_together())
state = module.params['state']
array = get_array(module)
eg = get_eg(module, array)
if state == 'present' and not eg:
create_eg(module, array)
elif state == 'absent' and eg:
delete_eg(module, array, eg)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
Trinak/PyHopeEngine | src/pyHopeEngine/graphics/sprite.py | 1 | 1261 | '''
Created on May 29, 2013
@author: Devon
Defines a game sprite
'''
import pygame
from pyHopeEngine import engineCommon as ECOM
class GameSprite(pygame.sprite.Sprite):
'''Base game sprite image'''
def __init__(self, file):
pygame.sprite.Sprite.__init__(self)
file = ECOM.engine.resourceManager.getFile(file)
self.image = pygame.image.load(file)
self.oriImage = pygame.image.load(file)
class SpriteSheet(object):
'''A SpriteSheet object'''
def __init__(self, image, rect, numImages):
self.image = image
self.rect = rect
self.numImages = numImages
self.imageList = []
self.createImageList()
def createImageList(self):
self.imageList =[self.splitSpriteSheet(self.rect, i) for i in range(0, self.numImages)]
return
def splitSpriteSheet(self, rect, index):
tempRect = pygame.Rect(rect)
tempRect.left *= index
surface = pygame.Surface(rect.size)
surface.blit(self.image, (0, 0), tempRect)
return surface
def returnImageList(self):
return self.imageList
def returnImageAtIndex(self, index):
return self.imageList[index] | gpl-3.0 |
espadrine/opera | chromium/src/third_party/trace-viewer/third_party/pywebsocket/src/example/bench_wsh.py | 495 | 2322 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
| bsd-3-clause |
h3biomed/ansible-modules-core | network/cumulus/cl_interface_policy.py | 34 | 5248 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_interface_policy
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure interface enforcement policy on Cumulus Linux
description:
- This module affects the configuration files located in the interfaces
folder defined by ifupdown2. Interfaces port and port ranges listed in the
"allowed" parameter define what interfaces will be available on the
switch. If the user runs this module and has an interface configured on
the switch, but not found in the "allowed" list, this interface will be
unconfigured. By default this is `/etc/network/interface.d`
For more details go the Configuring Interfaces at
U(http://docs.cumulusnetworks.com).
notes:
- lo must be included in the allowed list.
- eth0 must be in allowed list if out of band management is done
options:
allowed:
description:
- List of ports to run initial run at 10G.
required: true
location:
description:
- Directory to store interface files.
default: '/etc/network/interfaces.d/'
'''
EXAMPLES = '''
Example playbook entries using the cl_interface_policy module.
- name: shows types of interface ranges supported
cl_interface_policy:
allowed: "lo eth0 swp1-9, swp11, swp12-13s0, swp12-30s1, swp12-30s2, bond0-12"
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# get list of interface files that are currently "configured".
# doesn't mean actually applied to the system, but most likely are
def read_current_int_dir(module):
module.custom_currentportlist = os.listdir(module.params.get('location'))
# take the allowed list and convert it to into a list
# of ports.
def convert_allowed_list_to_port_range(module):
allowedlist = module.params.get('allowed')
for portrange in allowedlist:
module.custom_allowedportlist += breakout_portrange(portrange)
def breakout_portrange(prange):
_m0 = re.match(r'(\w+[a-z.])(\d+)?-?(\d+)?(\w+)?', prange.strip())
# no range defined
if _m0.group(3) is None:
return [_m0.group(0)]
else:
portarray = []
intrange = range(int(_m0.group(2)), int(_m0.group(3)) + 1)
for _int in intrange:
portarray.append(''.join([_m0.group(1),
str(_int),
str(_m0.group(4) or '')
]
)
)
return portarray
# deletes the interface files
def unconfigure_interfaces(module):
currentportset = set(module.custom_currentportlist)
allowedportset = set(module.custom_allowedportlist)
remove_list = currentportset.difference(allowedportset)
fileprefix = module.params.get('location')
module.msg = "remove config for interfaces %s" % (', '.join(remove_list))
for _file in remove_list:
os.unlink(fileprefix + _file)
# check to see if policy should be enforced
# returns true if policy needs to be enforced
# that is delete interface files
def int_policy_enforce(module):
currentportset = set(module.custom_currentportlist)
allowedportset = set(module.custom_allowedportlist)
return not currentportset.issubset(allowedportset)
def main():
module = AnsibleModule(
argument_spec=dict(
allowed=dict(type='list', required=True),
location=dict(type='str', default='/etc/network/interfaces.d/')
),
)
module.custom_currentportlist = []
module.custom_allowedportlist = []
module.changed = False
module.msg = 'configured port list is part of allowed port list'
read_current_int_dir(module)
convert_allowed_list_to_port_range(module)
if int_policy_enforce(module):
module.changed = True
unconfigure_interfaces(module)
module.exit_json(changed=module.changed, msg=module.msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
import os
import shutil
if __name__ == '__main__':
main()
| gpl-3.0 |
louisdijkstra/gonl-sv | back-up/version4/postprocess_tag_snps.py | 1 | 4919 | #!/usr/bin/env python
"""
Copyright (C) 2015 Louis Dijkstra
This file is part of gonl-sv
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
import math
import random
import scipy as sp
import scipy.stats
from scipy.stats.stats import pearsonr
from SNPReader import *
__author__ = "Louis Dijkstra"
usage = """%prog [options] <tag_snp_file>
Adds the reference and alternative allele to the tag SNP data.
The output is presented as a table. Every row is for one deletion. The columns (seperated by a tab) represent:
CHR - autosome (integer)
POS - position of the deletion
LENGTH - length of the deletion
SNP_POS - position of the tag SNP
ALLELE #1 - the allele of the tag SNP with the higher allele frequency
ALLELE #2 - the allele of the tag SNP with the lower allele frequency
A - entry of the 2x2 contingency table (see below)
B - entry of the 2x2 contingency table (see below)
C - entry of the 2x2 contingency table (see below)
D - entry of the 2x2 contingency table (see below)
R - Pearson's R computed on the basis of the contingency table
p - p-value of Fisher's exact test the basis of the contingency table
P(DEL|ALLELE #1)- the estimated probability of observing a deletion given the presence of allele #1
P(DEL|ALLELE #2)- the estimated probability of observing a deletion given the presence of allele #2
The columns A, B, C and D provide all information about the 2x2 contigency table:
Deletion
| Present | Absent | total
----------------------------------------------------------------------------------------
allele #1 | A | C | A + C
SNP allele #2 | B | D | B + D
----------------------------------------------------------------------------------------
total | A + B | C + D | A+B+C+D
"""
class DeletionTAGSNPPair:
def __init__(self, line):
self.line = line # store original
values = line.split('\t') # all values are stored here
self.chromosome = int(values[0])
self.pos = int(values[1])
self.length = int(values[2])
self.snp_pos = int(values[3])
self.a = int(values[4])
self.b = int(values[5])
self.c = int(values[6])
self.d = int(values[7])
self.R = float(values[8])
self.p = float(values[9])
def addReferenceAlternativeAllele(self, ref, alt):
self.ref = ref
self.alt = alt
def update(self):
if self.b + self.d > self.a + self.c: # turn reference and alternative allele around
self.R = -1.0 * self.R
temp = self.ref
self.ref = self.alt
self.alt = temp
temp = self.b
self.b = self.a
self.a = temp
temp = self.d
self.d = self.c
self.c = temp
def print(self):
# print data related to the deletion:
print(self.chromosome, '\t', self.pos, '\t', self.length, '\t', end = '')
# print data related to the SNP:
print(self.snp_pos, '\t', self.ref, '\t', self.alt, '\t', end = '')
# print data about the table
print(self.R, '\t', self.p, '\t', self.a, '\t', self.b, '\t', self.c, '\t', self.d, '\t', end = '')
# compute conditional probabilities and print them
print(float(self.a) / float(self.a + self.c), '\t', float(self.b) / float(self.b + self.d))
def main():
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
list_of_autosomes = range(1,23)
for autosome in list_of_autosomes:
# read tag SNP file
tag_snp_file = open("Results/tags_chr" + str(autosome) + ".txt")
tag_snp_file.next() # discard header
#print('Processing tag SNP file...')
del_snp_pairs, snp_positions = [], []
for line in tag_snp_file:
del_snp_pairs.append(DeletionTAGSNPPair(line))
snp_positions.append(del_snp_pairs[-1].snp_pos)
#print('DONE processing tag SNP file...')
# read VCF file
#print('Processing VCF file...')
snp_reader = SNPReader(1, max_major_af = .96)
snp_reader.readRawList(snp_positions)
#print('DONE Processing VCF file...')
# Postprocess every tag SNP-deletion pair
for del_snp_pair in del_snp_pairs:
for snp in snp_reader.snps:
if del_snp_pair.snp_pos == snp.position:
del_snp_pair.addReferenceAlternativeAllele(snp.vcf_record.REF, snp.vcf_record.ALT[0])
del_snp_pair.update()
del_snp_pair.print()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
kelvin13/Knockout | pygments/lexers/fortran.py | 23 | 9748 | # -*- coding: utf-8 -*-
"""
pygments.lexers.fortran
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Fortran languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, words, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['FortranLexer', 'FortranFixedLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS',
'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL',
'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE',
'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
],
}
class FortranFixedLexer(RegexLexer):
"""
Lexer for fixed format Fortran.
.. versionadded:: 2.1
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code')
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text), 'root'),
(r'', Text, 'root')]
}
| gpl-3.0 |
murygin/pebble | src/main/webapp/FCKeditor/fckeditor.py | 86 | 4371 | """
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| bsd-3-clause |
Atlas-Sailed-Co/oppia | core/storage/config/gae_models.py | 35 | 1632 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models relating to configuration properties."""
__author__ = 'Sean Lip'
import core.storage.base_model.gae_models as base_models
from google.appengine.ext import ndb
class ConfigPropertySnapshotMetadataModel(
base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a config property snapshot."""
pass
class ConfigPropertySnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content for a config property snapshot."""
pass
class ConfigPropertyModel(base_models.VersionedModel):
"""A class that represents a named configuration property.
The id is the name of the property.
"""
SNAPSHOT_METADATA_CLASS = ConfigPropertySnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = ConfigPropertySnapshotContentModel
# The property value.
value = ndb.JsonProperty(indexed=False)
def commit(self, committer_id, commit_cmds):
super(ConfigPropertyModel, self).commit(committer_id, '', commit_cmds)
| apache-2.0 |
Fuzion24/mitmproxy | libmproxy/contrib/wbxml/InvalidDataException.py | 47 | 1333 | #!/usr/bin/env python
'''
@author: David Shaw, david.shaw.aw@gmail.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: InvalidDataException.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class InvalidDataException(Exception):
pass | mit |
roboogle/gtkmvc3 | gtkmvco/examples/converter/src/views/application.py | 1 | 1745 | # Author: Roberto Cavada <roboogle@gmail.com>
#
# Copyright (C) 2006-2015 by Roberto Cavada
#
# gtkmvc3 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# gtkmvc3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110, USA.
#
# For more information on gtkmvc3 see <https://github.com/roboogle/gtkmvc3>
# or email to the author <roboogle@gmail.com>.
# Please report bugs to <https://github.com/roboogle/gtkmvc3/issues>
# or to <roboogle@gmail.com>.
import utils._importer
import utils.globals
from converter import ConverterView
from gtkmvc3 import View
import os.path
# ----------------------------------------------------------------------
class ApplicationView (View):
"""A view for the top level window (application)"""
glade = os.path.join(utils.globals.GLADE_DIR, "app.glade")
top = 'window_app'
def __init__(self):
View.__init__(self)
self.converter = ConverterView(False) # not a top level
vbox = self['vbox_top']
vbox.pack_start(self.converter.get_top_widget())
return
pass # end of class
# ----------------------------------------------------------------------
| lgpl-2.1 |
nvoron23/hue | desktop/core/ext-py/python-openid-2.2.5/openid/yadis/discover.py | 143 | 4445 | # -*- test-case-name: openid.test.test_yadis_discover -*-
__all__ = ['discover', 'DiscoveryResult', 'DiscoveryFailure']
from cStringIO import StringIO
from openid import fetchers
from openid.yadis.constants import \
YADIS_HEADER_NAME, YADIS_CONTENT_TYPE, YADIS_ACCEPT_HEADER
from openid.yadis.parsehtml import MetaNotFound, findHTMLMeta
class DiscoveryFailure(Exception):
"""Raised when a YADIS protocol error occurs in the discovery process"""
identity_url = None
def __init__(self, message, http_response):
Exception.__init__(self, message)
self.http_response = http_response
class DiscoveryResult(object):
"""Contains the result of performing Yadis discovery on a URI"""
# The URI that was passed to the fetcher
request_uri = None
# The result of following redirects from the request_uri
normalized_uri = None
# The URI from which the response text was returned (set to
# None if there was no XRDS document found)
xrds_uri = None
# The content-type returned with the response_text
content_type = None
# The document returned from the xrds_uri
response_text = None
def __init__(self, request_uri):
"""Initialize the state of the object
sets all attributes to None except the request_uri
"""
self.request_uri = request_uri
def usedYadisLocation(self):
"""Was the Yadis protocol's indirection used?"""
return self.normalized_uri != self.xrds_uri
def isXRDS(self):
"""Is the response text supposed to be an XRDS document?"""
return (self.usedYadisLocation() or
self.content_type == YADIS_CONTENT_TYPE)
def discover(uri):
"""Discover services for a given URI.
@param uri: The identity URI as a well-formed http or https
URI. The well-formedness and the protocol are not checked, but
the results of this function are undefined if those properties
do not hold.
@return: DiscoveryResult object
@raises Exception: Any exception that can be raised by fetching a URL with
the given fetcher.
@raises DiscoveryFailure: When the HTTP response does not have a 200 code.
"""
result = DiscoveryResult(uri)
resp = fetchers.fetch(uri, headers={'Accept': YADIS_ACCEPT_HEADER})
if resp.status not in (200, 206):
raise DiscoveryFailure(
'HTTP Response status from identity URL host is not 200. '
'Got status %r' % (resp.status,), resp)
# Note the URL after following redirects
result.normalized_uri = resp.final_url
# Attempt to find out where to go to discover the document
# or if we already have it
result.content_type = resp.headers.get('content-type')
result.xrds_uri = whereIsYadis(resp)
if result.xrds_uri and result.usedYadisLocation():
resp = fetchers.fetch(result.xrds_uri)
if resp.status not in (200, 206):
exc = DiscoveryFailure(
'HTTP Response status from Yadis host is not 200. '
'Got status %r' % (resp.status,), resp)
exc.identity_url = result.normalized_uri
raise exc
result.content_type = resp.headers.get('content-type')
result.response_text = resp.body
return result
def whereIsYadis(resp):
"""Given a HTTPResponse, return the location of the Yadis document.
May be the URL just retrieved, another URL, or None, if I can't
find any.
[non-blocking]
@returns: str or None
"""
# Attempt to find out where to go to discover the document
# or if we already have it
content_type = resp.headers.get('content-type')
# According to the spec, the content-type header must be an exact
# match, or else we have to look for an indirection.
if (content_type and
content_type.split(';', 1)[0].lower() == YADIS_CONTENT_TYPE):
return resp.final_url
else:
# Try the header
yadis_loc = resp.headers.get(YADIS_HEADER_NAME.lower())
if not yadis_loc:
# Parse as HTML if the header is missing.
#
# XXX: do we want to do something with content-type, like
# have a whitelist or a blacklist (for detecting that it's
# HTML)?
try:
yadis_loc = findHTMLMeta(StringIO(resp.body))
except MetaNotFound:
pass
return yadis_loc
| apache-2.0 |
GeoLabs/QgsWPSClient | __init__.py | 1 | 1589 | # -*- coding: utf-8 -*-
"""
***************************************************************************
qgswps.py QGIS Web Processing Service Plugin
-------------------------------------------------------------------
Date : 09 November 2009
Copyright : (C) 2009 by Dr. Horst Duester
email : horst dot duester at kappasys dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
def name():
return "QgsWPSClient"
def description():
return "Client for Web Processing Services"
def version():
return "2.0.17"
def qgisMinimumVersion():
return "1.5"
def qgisMaximumVersion():
return "2.99"
def date():
return '2016-05-16'
def email():
return 'horst.duester@sourcepole.ch'
def author():
return "Dr. Horst Duester / Sourcepole AG Zurich"
def icon():
return "images/icon.png"
def homepage():
return "http://www.geolabs.fr"
def classFactory(iface):
from qgswps import QgsWPSClient
return QgsWPSClient(iface)
| gpl-2.0 |
adamchainz/ansible | test/units/plugins/connection/test_network_cli.py | 57 | 5561 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from io import StringIO
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleConnectionFailure
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import network_cli
class TestConnectionClass(unittest.TestCase):
@patch("ansible.plugins.connection.network_cli._Connection._connect")
def test_network_cli__connect_error(self, mocked_super):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
conn.ssh = MagicMock()
conn.receive = MagicMock()
conn._terminal = MagicMock()
pc.network_os = None
self.assertRaises(AnsibleConnectionFailure, conn._connect)
@patch("ansible.plugins.connection.network_cli._Connection._connect")
def test_network_cli__invalid_os(self, mocked_super):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
conn.ssh = MagicMock()
conn.receive = MagicMock()
conn._terminal = MagicMock()
pc.network_os = None
self.assertRaises(AnsibleConnectionFailure, conn._connect)
@patch("ansible.plugins.connection.network_cli.terminal_loader")
@patch("ansible.plugins.connection.network_cli._Connection._connect")
def test_network_cli__connect(self, mocked_super, mocked_terminal_loader):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
pc.network_os = 'ios'
conn.ssh = MagicMock()
conn.receive = MagicMock()
mock_terminal = MagicMock()
conn._terminal = mock_terminal
conn._connect()
self.assertTrue(conn._terminal.on_open_shell.called)
self.assertFalse(conn._terminal.on_authorize.called)
conn._play_context.become = True
conn._play_context.become_pass = 'password'
conn._connect()
conn._terminal.on_authorize.assert_called_with(passwd='password')
@patch("ansible.plugins.connection.network_cli._Connection.close")
def test_network_cli_close(self, mocked_super):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
terminal = MagicMock(supports_multiplexing=False)
conn._terminal = terminal
conn.close()
conn._shell = MagicMock()
conn.close()
self.assertTrue(terminal.on_close_shell.called)
terminal.supports_multiplexing = True
conn.close()
self.assertIsNone(conn._shell)
@patch("ansible.plugins.connection.network_cli._Connection._connect")
def test_network_cli_exec_command(self, mocked_super):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
mock_send = MagicMock(return_value=b'command response')
conn.send = mock_send
# test sending a single command and converting to dict
rc, out, err = conn.exec_command('command')
self.assertEqual(out, b'command response')
mock_send.assert_called_with(b'command', None, None, None)
# test sending a json string
rc, out, err = conn.exec_command(json.dumps({'command': 'command'}))
self.assertEqual(out, b'command response')
mock_send.assert_called_with(b'command', None, None, None)
conn._shell = MagicMock()
# test _shell already open
rc, out, err = conn.exec_command('command')
self.assertEqual(out, b'command response')
mock_send.assert_called_with(b'command', None, None, None)
def test_network_cli_send(self):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
mock__terminal = MagicMock()
mock__terminal.terminal_stdout_re = [re.compile(b'device#')]
mock__terminal.terminal_stderr_re = [re.compile(b'^ERROR')]
conn._terminal = mock__terminal
mock__shell = MagicMock()
conn._shell = mock__shell
response = b"""device#command
command response
device#
"""
mock__shell.recv.return_value = response
output = conn.send(b'command', None, None, None)
mock__shell.sendall.assert_called_with(b'command\r')
self.assertEqual(output, b'command response')
mock__shell.reset_mock()
mock__shell.recv.return_value = b"ERROR: error message device#"
with self.assertRaises(AnsibleConnectionFailure) as exc:
conn.send(b'command', None, None, None)
self.assertEqual(str(exc.exception), 'ERROR: error message device#')
| gpl-3.0 |
siosio/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_set_literal.py | 326 | 1699 | """
Optional fixer to transform set() calls to set literals.
"""
# Author: Benjamin Peterson
from lib2to3 import fixer_base, pytree
from lib2to3.fixer_util import token, syms
class FixSetLiteral(fixer_base.BaseFix):
BM_compatible = True
explicit = True
PATTERN = """power< 'set' trailer< '('
(atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
|
single=any) ']' >
|
atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
)
')' > >
"""
def transform(self, node, results):
single = results.get("single")
if single:
# Make a fake listmaker
fake = pytree.Node(syms.listmaker, [single.clone()])
single.replace(fake)
items = fake
else:
items = results["items"]
# Build the contents of the literal
literal = [pytree.Leaf(token.LBRACE, u"{")]
literal.extend(n.clone() for n in items.children)
literal.append(pytree.Leaf(token.RBRACE, u"}"))
# Set the prefix of the right brace to that of the ')' or ']'
literal[-1].prefix = items.next_sibling.prefix
maker = pytree.Node(syms.dictsetmaker, literal)
maker.prefix = node.prefix
# If the original was a one tuple, we need to remove the extra comma.
if len(maker.children) == 4:
n = maker.children[2]
n.remove()
maker.children[-1].prefix = n.prefix
# Finally, replace the set call with our shiny new literal.
return maker
| apache-2.0 |
hejunbok/apm_planner | libs/mavlink/share/pyshared/pymavlink/tools/mavplayback.py | 28 | 8476 | #!/usr/bin/env python
'''
play back a mavlink log as a FlightGear FG NET stream, and as a
realtime mavlink stream
Useful for visualising flights
'''
import sys, time, os, struct
import Tkinter
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import fgFDM
from optparse import OptionParser
parser = OptionParser("mavplayback.py [options]")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by condition")
parser.add_option("--gpsalt", action='store_true', default=False, help="Use GPS altitude")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--out", help="MAVLink output port (IP:port)",
action='append', default=['127.0.0.1:14550'])
parser.add_option("--fgout", action='append', default=['127.0.0.1:5503'],
help="flightgear FDM NET output (IP:port)")
parser.add_option("--baudrate", type='int', default=57600, help='baud rate')
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
if len(args) < 1:
parser.print_help()
sys.exit(1)
filename = args[0]
def LoadImage(filename):
'''return an image from the images/ directory'''
app_dir = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(app_dir, 'files/images', filename)
return Tkinter.PhotoImage(file=path)
class App():
def __init__(self, filename):
self.root = Tkinter.Tk()
self.filesize = os.path.getsize(filename)
self.filepos = 0.0
self.mlog = mavutil.mavlink_connection(filename, planner_format=opts.planner,
robust_parsing=True)
self.mout = []
for m in opts.out:
self.mout.append(mavutil.mavlink_connection(m, input=False, baud=opts.baudrate))
self.fgout = []
for f in opts.fgout:
self.fgout.append(mavutil.mavudp(f, input=False))
self.fdm = fgFDM.fgFDM()
self.msg = self.mlog.recv_match(condition=opts.condition)
if self.msg is None:
sys.exit(1)
self.last_timestamp = getattr(self.msg, '_timestamp')
self.paused = False
self.topframe = Tkinter.Frame(self.root)
self.topframe.pack(side=Tkinter.TOP)
self.frame = Tkinter.Frame(self.root)
self.frame.pack(side=Tkinter.LEFT)
self.slider = Tkinter.Scale(self.topframe, from_=0, to=1.0, resolution=0.01,
orient=Tkinter.HORIZONTAL, command=self.slew)
self.slider.pack(side=Tkinter.LEFT)
self.clock = Tkinter.Label(self.topframe,text="")
self.clock.pack(side=Tkinter.RIGHT)
self.playback = Tkinter.Spinbox(self.topframe, from_=0, to=20, increment=0.1, width=3)
self.playback.pack(side=Tkinter.BOTTOM)
self.playback.delete(0, "end")
self.playback.insert(0, 1)
self.buttons = {}
self.button('quit', 'gtk-quit.gif', self.frame.quit)
self.button('pause', 'media-playback-pause.gif', self.pause)
self.button('rewind', 'media-seek-backward.gif', self.rewind)
self.button('forward', 'media-seek-forward.gif', self.forward)
self.button('status', 'Status', self.status)
self.flightmode = Tkinter.Label(self.frame,text="")
self.flightmode.pack(side=Tkinter.RIGHT)
self.next_message()
self.root.mainloop()
def button(self, name, filename, command):
'''add a button'''
try:
img = LoadImage(filename)
b = Tkinter.Button(self.frame, image=img, command=command)
b.image = img
except Exception:
b = Tkinter.Button(self.frame, text=filename, command=command)
b.pack(side=Tkinter.LEFT)
self.buttons[name] = b
def pause(self):
'''pause playback'''
self.paused = not self.paused
def rewind(self):
'''rewind 10%'''
pos = int(self.mlog.f.tell() - 0.1*self.filesize)
if pos < 0:
pos = 0
self.mlog.f.seek(pos)
self.find_message()
def forward(self):
'''forward 10%'''
pos = int(self.mlog.f.tell() + 0.1*self.filesize)
if pos > self.filesize:
pos = self.filesize - 2048
self.mlog.f.seek(pos)
self.find_message()
def status(self):
'''show status'''
for m in sorted(self.mlog.messages.keys()):
print(str(self.mlog.messages[m]))
def find_message(self):
'''find the next valid message'''
while True:
self.msg = self.mlog.recv_match(condition=opts.condition)
if self.msg is not None and self.msg.get_type() != 'BAD_DATA':
break
if self.mlog.f.tell() > self.filesize - 10:
self.paused = True
break
self.last_timestamp = getattr(self.msg, '_timestamp')
def slew(self, value):
'''move to a given position in the file'''
if float(value) != self.filepos:
pos = float(value) * self.filesize
self.mlog.f.seek(int(pos))
self.find_message()
def next_message(self):
'''called as each msg is ready'''
msg = self.msg
if msg is None:
self.paused = True
if self.paused:
self.root.after(100, self.next_message)
return
speed = float(self.playback.get())
timestamp = getattr(msg, '_timestamp')
now = time.strftime("%H:%M:%S", time.localtime(timestamp))
self.clock.configure(text=now)
if speed == 0.0:
self.root.after(200, self.next_message)
else:
self.root.after(int(1000*(timestamp - self.last_timestamp) / speed), self.next_message)
self.last_timestamp = timestamp
while True:
self.msg = self.mlog.recv_match(condition=opts.condition)
if self.msg is None and self.mlog.f.tell() > self.filesize - 10:
self.paused = True
return
if self.msg is not None and self.msg.get_type() != "BAD_DATA":
break
pos = float(self.mlog.f.tell()) / self.filesize
self.slider.set(pos)
self.filepos = self.slider.get()
if msg.get_type() != "BAD_DATA":
for m in self.mout:
m.write(msg.get_msgbuf().tostring())
if msg.get_type() == "GPS_RAW":
self.fdm.set('latitude', msg.lat, units='degrees')
self.fdm.set('longitude', msg.lon, units='degrees')
if opts.gpsalt:
self.fdm.set('altitude', msg.alt, units='meters')
if msg.get_type() == "VFR_HUD":
if not opts.gpsalt:
self.fdm.set('altitude', msg.alt, units='meters')
self.fdm.set('num_engines', 1)
self.fdm.set('vcas', msg.airspeed, units='mps')
if msg.get_type() == "ATTITUDE":
self.fdm.set('phi', msg.roll, units='radians')
self.fdm.set('theta', msg.pitch, units='radians')
self.fdm.set('psi', msg.yaw, units='radians')
self.fdm.set('phidot', msg.rollspeed, units='rps')
self.fdm.set('thetadot', msg.pitchspeed, units='rps')
self.fdm.set('psidot', msg.yawspeed, units='rps')
if msg.get_type() == "RC_CHANNELS_SCALED":
self.fdm.set("right_aileron", msg.chan1_scaled*0.0001)
self.fdm.set("left_aileron", -msg.chan1_scaled*0.0001)
self.fdm.set("rudder", msg.chan4_scaled*0.0001)
self.fdm.set("elevator", msg.chan2_scaled*0.0001)
self.fdm.set('rpm', msg.chan3_scaled*0.01)
if msg.get_type() == 'STATUSTEXT':
print("APM: %s" % msg.text)
if msg.get_type() == 'SYS_STATUS':
self.flightmode.configure(text=self.mlog.flightmode)
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
if self.fdm.get('latitude') != 0:
for f in self.fgout:
f.write(self.fdm.pack())
app=App(filename)
| agpl-3.0 |
ryuunosukeyoshi/PartnerPoi-Bot | lib/youtube_dl/extractor/ruutu.py | 33 | 5321 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ruutu|supla)\.fi/(?:video|supla)/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 114,
'age_limit': 0,
},
},
{
'url': 'http://www.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
'description': 'md5:bfb7336df2a12dc21d18fa696c9f8f23',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
},
},
{
'url': 'http://www.supla.fi/supla/2231370',
'md5': 'df14e782d49a2c0df03d3be2a54ef949',
'info_dict': {
'id': '2231370',
'ext': 'mp4',
'title': 'Osa 1: Mikael Jungner',
'description': 'md5:7d90f358c47542e3072ff65d7b1bcffe',
'thumbnail': r're:^https?://.*\.jpg$',
'age_limit': 0,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
def extract_formats(node):
for child in node:
if child.tag.endswith('Files'):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
if (not video_url or video_url in processed_urls or
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
return
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds', fatal=False))
elif ext == 'mpd':
# video-only and audio-only streams are of different
# duration resulting in out of sync issue
continue
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id='dash', fatal=False))
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
continue
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto
if not self._is_valid_url(video_url, video_id, format_id):
continue
width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': format_id,
'url': video_url,
'width': width,
'height': height,
'tbr': tbr,
'preference': preference,
})
extract_formats(video_xml.find('./Clip'))
drm = xpath_text(video_xml, './Clip/DRM', default=None)
if not formats and drm:
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
return {
'id': video_id,
'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
}
| gpl-3.0 |
barachka/odoo | addons/purchase/__openerp__.py | 49 | 3896 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Management',
'version': '1.1',
'category': 'Purchase Management',
'sequence': 19,
'summary': 'Purchase Orders, Receptions, Supplier Invoices',
'description': """
Manage goods requirement by Purchase Orders easily
==================================================
Purchase management enables you to track your suppliers' price quotations and convert them into purchase orders if necessary.
OpenERP has several methods of monitoring invoices and tracking the receipt of ordered goods. You can handle partial deliveries in OpenERP, so you can keep track of items that are still to be delivered in your orders, and you can issue reminders automatically.
OpenERP’s replenishment management rules enable the system to generate draft purchase orders automatically, or you can configure it to run a lean process driven entirely by current production needs.
Dashboard / Reports for Purchase Management will include:
---------------------------------------------------------
* Request for Quotations
* Purchase Orders Waiting Approval
* Monthly Purchases by Category
* Receptions Analysis
* Purchase Analysis
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/purchase_order.jpeg', 'images/purchase_analysis.jpeg', 'images/request_for_quotation.jpeg'],
'depends': ['stock_account', 'report'],
'data': [
'security/purchase_security.xml',
'security/ir.model.access.csv',
'purchase_workflow.xml',
'purchase_sequence.xml',
'company_view.xml',
'purchase_data.xml',
'purchase_data.yml',
'wizard/purchase_order_group_view.xml',
'wizard/purchase_line_invoice_view.xml',
'purchase_report.xml',
'purchase_view.xml',
'stock_view.xml',
'partner_view.xml',
'report/purchase_report_view.xml',
'edi/purchase_order_action_data.xml',
'res_config_view.xml',
'views/report_purchaseorder.xml',
'views/report_purchasequotation.xml',
],
'test': [
'test/ui/purchase_users.yml',
'test/process/run_scheduler.yml',
'test/fifo_price.yml',
'test/fifo_returns.yml',
#'test/costmethodchange.yml',
'test/process/cancel_order.yml',
'test/process/rfq2order2done.yml',
'test/process/generate_invoice_from_reception.yml',
'test/process/merge_order.yml',
'test/process/edi_purchase_order.yml',
'test/process/invoice_on_poline.yml',
'test/ui/duplicate_order.yml',
'test/ui/delete_order.yml',
'test/average_price.yml',
],
'demo': [
'purchase_order_demo.yml',
'purchase_demo.xml',
'purchase_stock_demo.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HuayraLinux/python-apt | doc/examples/versiontest.py | 4 | 1134 | #!/usr/bin/python
# This is a simple clone of tests/versiontest.cc
import apt_pkg
import sys
import re
apt_pkg.init_config()
apt_pkg.init_system()
TestFile = apt_pkg.parse_commandline(apt_pkg.config, [], sys.argv)
if len(TestFile) != 1:
print "Must have exactly 1 file name"
sys.exit(0)
# Go over the file..
list = open(TestFile[0], "r")
CurLine = 0
while(1):
Line = list.readline()
CurLine = CurLine + 1
if Line == "":
break
Line = Line.strip()
if len(Line) == 0 or Line[0] == '#':
continue
Split = re.split("[ \n]", Line)
# Check forward
if apt_pkg.version_compare(Split[0], Split[1]) != int(Split[2]):
print "Comparision failed on line %u. '%s' ? '%s' %i != %i" % (CurLine,
Split[0], Split[1], apt_pkg.version_compare(Split[0], Split[1]),
int(Split[2]))
# Check reverse
if apt_pkg.version_compare(Split[1], Split[0]) != -1 * int(Split[2]):
print "Comparision failed on line %u. '%s' ? '%s' %i != %i" % (CurLine,
Split[1], Split[0], apt_pkg.version_compare(Split[1], Split[0]),
-1 * int(Split[2]))
| gpl-2.0 |
cwmartin/rez | src/rez/vendor/pygraph/classes/exceptions.py | 23 | 2371 | # Copyright (c) 2008-2009 Pedro Matiello <pmatiello@gmail.com>
# Salim Fadhley <sal@stodge.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Exceptions.
"""
# Graph errors
class GraphError(RuntimeError):
"""
A base-class for the various kinds of errors that occur in the the python-graph class.
"""
pass
class AdditionError(GraphError):
"""
This error is raised when trying to add a node or edge already added to the graph or digraph.
"""
pass
class NodeUnreachable(GraphError):
"""
Goal could not be reached from start.
"""
def __init__(self, start, goal):
msg = "Node %s could not be reached from node %s" % ( repr(goal), repr(start) )
InvalidGraphType.__init__(self, msg)
self.start = start
self.goal = goal
class InvalidGraphType(GraphError):
"""
Invalid graph type.
"""
pass
# Algorithm errors
class AlgorithmError(RuntimeError):
"""
A base-class for the various kinds of errors that occur in the the
algorithms package.
"""
pass
class NegativeWeightCycleError(AlgorithmError):
"""
Algorithms like the Bellman-Ford algorithm can detect and raise an exception
when they encounter a negative weight cycle.
@see: pygraph.algorithms.shortest_path_bellman_ford
"""
pass
| lgpl-3.0 |
dysya92/monkeys | flask/lib/python2.7/site-packages/wtforms/validators.py | 35 | 16995 | from __future__ import unicode_literals
import re
import warnings
from wtforms.compat import string_types, text_type
__all__ = (
'DataRequired', 'data_required', 'Email', 'email', 'EqualTo', 'equal_to',
'IPAddress', 'ip_address', 'InputRequired', 'input_required', 'Length',
'length', 'NumberRange', 'number_range', 'Optional', 'optional',
'Required', 'required', 'Regexp', 'regexp', 'URL', 'url', 'AnyOf',
'any_of', 'NoneOf', 'none_of', 'MacAddress', 'mac_address', 'UUID'
)
class ValidationError(ValueError):
"""
Raised when a validator fails to validate its input.
"""
def __init__(self, message='', *args, **kwargs):
ValueError.__init__(self, message, *args, **kwargs)
class StopValidation(Exception):
"""
Causes the validation chain to stop.
If StopValidation is raised, no more validators in the validation chain are
called. If raised with a message, the message will be added to the errors
list.
"""
def __init__(self, message='', *args, **kwargs):
Exception.__init__(self, message, *args, **kwargs)
class EqualTo(object):
"""
Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
self.message = message
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(field.gettext("Invalid field name '%s'.") % self.fieldname)
if field.data != other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text or self.fieldname,
'other_name': self.fieldname
}
message = self.message
if message is None:
message = field.gettext('Field must be equal to %(other_name)s.')
raise ValidationError(message % d)
class Length(object):
"""
Validates the length of a string.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)d` and `%(max)d` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=-1, max=-1, message=None):
assert min != -1 or max != -1, 'At least one of `min` or `max` must be specified.'
assert max == -1 or min <= max, '`min` cannot be more than `max`.'
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
l = field.data and len(field.data) or 0
if l < self.min or self.max != -1 and l > self.max:
message = self.message
if message is None:
if self.max == -1:
message = field.ngettext('Field must be at least %(min)d character long.',
'Field must be at least %(min)d characters long.', self.min)
elif self.min == -1:
message = field.ngettext('Field cannot be longer than %(max)d character.',
'Field cannot be longer than %(max)d characters.', self.max)
else:
message = field.gettext('Field must be between %(min)d and %(max)d characters long.')
raise ValidationError(message % dict(min=self.min, max=self.max, length=l))
class NumberRange(object):
"""
Validates that a number is of a minimum and/or maximum value, inclusive.
This will work with any comparable number type, such as floats and
decimals, not just integers.
:param min:
The minimum required value of the number. If not provided, minimum
value will not be checked.
:param max:
The maximum value of the number. If not provided, maximum value
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)s` and `%(max)s` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=None, max=None, message=None):
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
data = field.data
if data is None or (self.min is not None and data < self.min) or \
(self.max is not None and data > self.max):
message = self.message
if message is None:
# we use %(min)s interpolation to support floats, None, and
# Decimals without throwing a formatting exception.
if self.max is None:
message = field.gettext('Number must be at least %(min)s.')
elif self.min is None:
message = field.gettext('Number must be at most %(max)s.')
else:
message = field.gettext('Number must be between %(min)s and %(max)s.')
raise ValidationError(message % dict(min=self.min, max=self.max))
class Optional(object):
"""
Allows empty input and stops the validation chain from continuing.
If input is empty, also removes prior errors (such as processing errors)
from the field.
:param strip_whitespace:
If True (the default) also stop the validation chain on input which
consists of only whitespace.
"""
field_flags = ('optional', )
def __init__(self, strip_whitespace=True):
if strip_whitespace:
self.string_check = lambda s: s.strip()
else:
self.string_check = lambda s: s
def __call__(self, form, field):
if not field.raw_data or isinstance(field.raw_data[0], string_types) and not self.string_check(field.raw_data[0]):
field.errors[:] = []
raise StopValidation()
class DataRequired(object):
"""
Checks the field's data is 'truthy' otherwise stops the validation chain.
This validator checks that the ``data`` attribute on the field is a 'true'
value (effectively, it does ``if field.data``.) Furthermore, if the data
is a string type, a string containing only whitespace characters is
considered false.
If the data is empty, also removes prior errors (such as processing errors)
from the field.
**NOTE** this validator used to be called `Required` but the way it behaved
(requiring coerced data, not input data) meant it functioned in a way
which was not symmetric to the `Optional` validator and furthermore caused
confusion with certain fields which coerced data to 'falsey' values like
``0``, ``Decimal(0)``, ``time(0)`` etc. Unless a very specific reason
exists, we recommend using the :class:`InputRequired` instead.
:param message:
Error message to raise in case of a validation error.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.data or isinstance(field.data, string_types) and not field.data.strip():
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class Required(DataRequired):
"""
Legacy alias for DataRequired.
This is needed over simple aliasing for those who require that the
class-name of required be 'Required.'
"""
def __init__(self, *args, **kwargs):
super(Required, self).__init__(*args, **kwargs)
warnings.warn('Required is going away in WTForms 3.0, use DataRequired', DeprecationWarning)
class InputRequired(object):
"""
Validates that input was provided for this field.
Note there is a distinction between this and DataRequired in that
InputRequired looks that form-input data was provided, and DataRequired
looks at the post-coercion data.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.raw_data or not field.raw_data[0]:
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class Regexp(object):
"""
Validates the field against a user provided regexp.
:param regex:
The regular expression string to use. Can also be a compiled regular
expression pattern.
:param flags:
The regexp flags to use, for example re.IGNORECASE. Ignored if
`regex` is not a string.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, regex, flags=0, message=None):
if isinstance(regex, string_types):
regex = re.compile(regex, flags)
self.regex = regex
self.message = message
def __call__(self, form, field, message=None):
if not self.regex.match(field.data or ''):
if message is None:
if self.message is None:
message = field.gettext('Invalid input.')
else:
message = self.message
raise ValidationError(message)
class Email(Regexp):
"""
Validates an email address. Note that this uses a very primitive regular
expression and should only be used in instances where you later verify by
other means, such as email activation or lookups.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid email address.')
super(Email, self).__call__(form, field, message)
class IPAddress(object):
"""
Validates an IP address.
:param ipv4:
If True, accept IPv4 addresses as valid (default True)
:param ipv6:
If True, accept IPv6 addresses as valid (default False)
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, ipv4=True, ipv6=False, message=None):
if not ipv4 and not ipv6:
raise ValueError('IP Address Validator must have at least one of ipv4 or ipv6 enabled.')
self.ipv4 = ipv4
self.ipv6 = ipv6
self.message = message
def __call__(self, form, field):
value = field.data
valid = False
if value:
valid = (self.ipv4 and self.check_ipv4(value)) or (self.ipv6 and self.check_ipv6(value))
if not valid:
message = self.message
if message is None:
message = field.gettext('Invalid IP address.')
raise ValidationError(message)
def check_ipv4(self, value):
parts = value.split('.')
if len(parts) == 4 and all(x.isdigit() for x in parts):
numbers = list(int(x) for x in parts)
return all(num >= 0 and num < 256 for num in numbers)
return False
def check_ipv6(self, value):
parts = value.split(':')
if len(parts) > 8:
return False
num_blank = 0
for part in parts:
if not part:
num_blank += 1
else:
try:
value = int(part, 16)
except ValueError:
return False
else:
if value < 0 or value >= 65536:
return False
if num_blank < 2:
return True
elif num_blank == 2 and not parts[0] and not parts[1]:
return True
return False
class MacAddress(Regexp):
"""
Validates a MAC address.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
pattern = r'^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$'
super(MacAddress, self).__init__(pattern, message=message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid Mac address.')
super(MacAddress, self).__call__(form, field, message)
class URL(Regexp):
"""
Simple regexp based url validation. Much like the email validator, you
probably want to validate the url later by other means if the url must
resolve.
:param require_tld:
If true, then the domain-name portion of the URL must contain a .tld
suffix. Set this to false if you want to allow domains like
`localhost`.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, require_tld=True, message=None):
tld_part = (require_tld and r'\.[a-z]{2,10}' or '')
regex = r'^[a-z]+://([^/:]+%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$' % tld_part
super(URL, self).__init__(regex, re.IGNORECASE, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid URL.')
super(URL, self).__call__(form, field, message)
class UUID(Regexp):
"""
Validates a UUID.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
pattern = r'^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$'
super(UUID, self).__init__(pattern, message=message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid UUID.')
super(UUID, self).__call__(form, field, message)
class AnyOf(object):
"""
Compares the incoming data to a sequence of valid inputs.
:param values:
A sequence of valid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = self.default_values_formatter
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data not in self.values:
message = self.message
if message is None:
message = field.gettext('Invalid value, must be one of: %(values)s.')
raise ValidationError(message % dict(values=self.values_formatter(self.values)))
@staticmethod
def default_values_formatter(values):
return ', '.join(text_type(x) for x in values)
class NoneOf(object):
"""
Compares the incoming data to a sequence of invalid inputs.
:param values:
A sequence of invalid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = lambda v: ', '.join(text_type(x) for x in v)
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data in self.values:
message = self.message
if message is None:
message = field.gettext('Invalid value, can\'t be any of: %(values)s.')
raise ValidationError(message % dict(values=self.values_formatter(self.values)))
email = Email
equal_to = EqualTo
ip_address = IPAddress
mac_address = MacAddress
length = Length
number_range = NumberRange
optional = Optional
required = Required
input_required = InputRequired
data_required = DataRequired
regexp = Regexp
url = URL
any_of = AnyOf
none_of = NoneOf
| bsd-3-clause |
Darkmoth/python-django-4 | Thing/env/Lib/site-packages/django/contrib/admin/options.py | 66 | 84355 | import copy
import operator
import warnings
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, validation, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, flatten_fieldsets, get_deleted_objects,
lookup_needs_distinct, model_format_dict, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core import checks
from django.core.exceptions import (
FieldDoesNotExist, FieldError, ImproperlyConfigured, PermissionDenied,
ValidationError,
)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat, ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
# Validation of ModelAdmin definitions
# Old, deprecated style:
validator_class = None
default_validator_class = validation.BaseValidator
# New style:
checks_class = BaseModelAdminChecks
@classmethod
def validate(cls, model):
warnings.warn(
'ModelAdmin.validate() is deprecated. Use "check()" instead.',
RemovedInDjango19Warning)
if cls.validator_class:
validator = cls.validator_class()
else:
validator = cls.default_validator_class()
validator.validate(cls, model)
@classmethod
def check(cls, model, **kwargs):
if cls.validator_class:
warnings.warn(
'ModelAdmin.validator_class is deprecated. '
'ModelAdmin validators must be converted to use '
'the system check framework.',
RemovedInDjango19Warning)
validator = cls.validator_class()
try:
validator.validate(cls, model)
except ImproperlyConfigured as e:
return [checks.Error(e.args[0], hint=None, obj=cls)]
else:
return []
else:
return cls.checks_class().check(cls, model, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.rel, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.rel.to, None)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.rel.to._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.rel.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
@property
def declared_fieldsets(self):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2
)
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
# We access the property and check if it triggers a warning.
# If it does, then it's ours and we can safely ignore it, but if
# it doesn't then it has been overridden so we must warn about the
# deprecation.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
declared_fieldsets = self.declared_fieldsets
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning
)
if declared_fieldsets:
return declared_fieldsets
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
if hasattr(field.rel, 'get_related_field'):
rel_name = field.rel.get_related_field().name
else:
rel_name = None
elif isinstance(field, ForeignObjectRel):
model = field.related_model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.rel.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
# validation
# Old, deprecated style:
default_validator_class = validation.ModelAdminValidator
# New style:
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'jquery%s.js' % extra,
'jquery.init.js'
]
if self.actions is not None:
js.append('actions%s.js' % extra)
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def _get_formsets(self, request, obj):
"""
Helper function that exists to allow the deprecation warning to be
executed while this function continues to return a generator.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj)
def get_formsets(self, request, obj=None):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning, stacklevel=2
)
return self._get_formsets(request, obj)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
# We call get_formsets() [deprecated] and check if it triggers a
# warning. If it does, then it's ours and we can safely ignore it, but
# if it doesn't then it has been overridden so we must warn about the
# deprecation.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
formsets = self.get_formsets(request, obj)
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning, stacklevel=2
)
if formsets:
zipped = zip(formsets, self.get_inline_instances(request, None))
for formset, inline in zipped:
yield formset, inline
else:
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'pk_value': escape(pk_value), # for possible backwards-compatibility
'value': escape(value),
'obj': escapejs(obj)
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'change',
'value': escape(value),
'obj': escapejs(obj),
'new_value': escape(new_value),
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'delete',
'value': escape(obj_id),
})
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
)
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' % (
opts.app_label, opts.model_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
if add:
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, self.model(), change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra]
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| gpl-2.0 |
nicoboss/Floatmotion | OpenGL/GL/ARB/cl_event.py | 9 | 1133 | '''OpenGL extension ARB.cl_event
This module customises the behaviour of the
OpenGL.raw.GL.ARB.cl_event to provide a more
Python-friendly API
Overview (from the spec)
This extension allows creating OpenGL sync objects linked to OpenCL
event objects, potentially improving efficiency of sharing images
and buffers between the two APIs. The companion cl_khr_gl_event
OpenCL extension provides the complementary functionality of
creating an OpenCL event object from an OpenGL fence sync object.
That extension is located in the OpenCL API Registry.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/cl_event.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.cl_event import *
from OpenGL.raw.GL.ARB.cl_event import _EXTENSION_NAME
def glInitClEventARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | agpl-3.0 |
trungnt13/dnntoolkit | tests/benchmark_dataset.py | 1 | 5679 | # python -m memory_profiler
# dataset used for benchmark: X=(None, 500, 120)
# dataset used for benchmark: y=(None, 2)
from __future__ import print_function, division
import os
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32"
import theano
from theano import tensor
import numpy as np
import scipy as sp
import dnntoolkit
from memory_profiler import profile
import time
from itertools import izip
# ======================================================================
# Test
# ======================================================================
@profile
def main():
t = dnntoolkit.dataset(
'/volumes/backup/data/logmel_500_100_50_fre', 'r')
# print(t)
# print()
X = t['X_train', 'X_valid']
y = t['y_train', 'y_valid']
it = izip(X.iter(512, shuffle=True, mode=1),
y.iter(512, shuffle=True, mode=1))
n_it = 0
size = 0
start = time.time()
for i, j in it:
if i.shape[0] != j.shape[0]:
print('Shit happened')
n_it += 1
size += i.shape[0]
end = time.time() - start
print('Total %d iterations' % n_it)
print('Time:', end)
print('Avr/batch:', end / n_it)
print('Avr/sample:', end / size)
t.close()
pass
if __name__ == '__main__':
main()
# ======================================================================
# batch=512, block=20
# ======================================================================
# Total 27 iterations
# Time: 49.5420119762
# Avr/batch: 1.83488933245
# Avr/sample: 0.00398536014611
# Total 27 iterations
# Time: 43.8584640026
# Avr/batch: 1.62438755565
# Avr/sample: 0.00352815252213
# Total 27 iterations
# Time: 49.1226298809
# Avr/batch: 1.81935666226
# Avr/sample: 0.00395162335137
# Line # Mem usage Increment Line Contents
# ================================================
# 19 92.2 MiB 0.0 MiB @profile
# 20 def main():
# 21 92.2 MiB 0.0 MiB t = dnntoolkit.dataset(
# 22 93.3 MiB 1.1 MiB '/volumes/backup/data/logmel_500_100_50_fre', 'r')
# 23 93.4 MiB 0.0 MiB print(t)
# 24
# 25 93.4 MiB 0.1 MiB X = t['X_train', 'X_valid']
# 26 93.4 MiB 0.0 MiB y = t['y_train', 'y_valid']
# 27 93.4 MiB 0.0 MiB it = izip(X.iter(512, block_size=20, shuffle=True, mode=0),
# 28 93.4 MiB 0.0 MiB y.iter(512, block_size=20, shuffle=True, mode=0))
# 29 93.4 MiB 0.0 MiB n_it = 0
# 30 93.4 MiB 0.0 MiB size = 0
# 31 93.4 MiB 0.0 MiB start = time.time()
# 32 1678.8 MiB 1585.4 MiB for i, j in it:
# 33 1678.8 MiB 0.0 MiB if i.shape[0] != j.shape[0]:
# 34 print('Shit happened')
# 35 1678.8 MiB 0.0 MiB n_it += 1
# 36 1678.8 MiB 0.0 MiB size += i.shape[0]
# 37 305.6 MiB -1373.2 MiB end = time.time() - start
# 38 305.6 MiB 0.0 MiB print('Total %d iterations' % n_it)
# 39 305.6 MiB 0.0 MiB print('Time:', end)
# 40 305.6 MiB 0.0 MiB print('Avr/batch:', end / n_it)
# 41 305.6 MiB 0.0 MiB print('Avr/sample:', end / size)
# 42 307.4 MiB 1.7 MiB t.close()
# 43 307.4 MiB 0.0 MiB pass
# ======================================================================
# batch=512, block=10
# ======================================================================
# Total 27 iterations
# Time: 35.2815492153
# Avr/batch: 1.30672404501
# Avr/sample: 0.00283819075017
# Total 27 iterations
# Time: 41.0986790657
# Avr/batch: 1.52217329873
# Avr/sample: 0.00330614424147
# Total 27 iterations
# Time: 44.3804409504
# Avr/batch: 1.6437200352
# Avr/sample: 0.00357014246242
# Line # Mem usage Increment Line Contents
# ================================================
# 20 92.1 MiB 0.0 MiB @profile
# 21 def main():
# 22 92.1 MiB 0.0 MiB t = dnntoolkit.dataset(
# 23 93.2 MiB 1.1 MiB '/volumes/backup/data/logmel_500_100_50_fre', 'r')
# 24 # print(t)
# 25 # print()
# 26
# 27 93.3 MiB 0.1 MiB X = t['X_train', 'X_valid']
# 28 93.3 MiB 0.0 MiB y = t['y_train', 'y_valid']
# 29 93.3 MiB 0.0 MiB it = izip(X.iter(512, block_size=10, shuffle=True, mode=0),
# 30 93.3 MiB 0.0 MiB y.iter(512, block_size=10, shuffle=True, mode=0))
# 31 93.3 MiB 0.0 MiB n_it = 0
# 32 93.3 MiB 0.0 MiB size = 0
# 33 93.3 MiB 0.0 MiB start = time.time()
# 34 1332.7 MiB 1239.4 MiB for i, j in it:
# 35 1332.7 MiB 0.0 MiB if i.shape[0] != j.shape[0]:
# 36 print('Shit happened')
# 37 1332.7 MiB 0.0 MiB n_it += 1
# 38 1332.7 MiB 0.0 MiB size += i.shape[0]
# 39 491.1 MiB -841.6 MiB end = time.time() - start
# 40 491.1 MiB 0.0 MiB print('Total %d iterations' % n_it)
# 41 491.2 MiB 0.0 MiB print('Time:', end)
# 42 491.2 MiB 0.0 MiB print('Avr/batch:', end / n_it)
# 43 491.2 MiB 0.0 MiB print('Avr/sample:', end / size)
# 44 493.5 MiB 2.4 MiB t.close()
# 45 493.5 MiB 0.0 MiB pass
| apache-2.0 |
suoto/hdlcc | hdl_checker/types.py | 1 | 3057 | # This file is part of HDL Checker.
#
# Copyright (c) 2015 - 2019 suoto (Andre Souto)
#
# HDL Checker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HDL Checker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDL Checker. If not, see <http://www.gnu.org/licenses/>.
"Common type definitions for type hinting"
from collections import namedtuple
from enum import Enum
from typing import NamedTuple, Optional, Tuple, Union
from hdl_checker.exceptions import UnknownTypeExtension
from hdl_checker.parsers.elements.identifier import Identifier
from hdl_checker.path import Path
class DesignUnitType(str, Enum):
"Specifies tracked design unit types"
package = "package"
entity = "entity"
context = "context"
BuildFlags = Tuple[str, ...]
LibraryAndUnit = namedtuple("LibraryAndUnit", ["library", "unit"])
RebuildUnit = NamedTuple(
"RebuildUnit", (("name", Identifier), ("type_", DesignUnitType))
)
RebuildLibraryUnit = NamedTuple(
"RebuildLibraryUnit", (("name", Identifier), ("library", Identifier))
)
RebuildPath = NamedTuple("RebuildPath", (("path", Path),))
RebuildInfo = Union[RebuildUnit, RebuildLibraryUnit, RebuildPath]
class FileType(Enum):
"RTL file types"
vhdl = "vhdl"
verilog = "verilog"
systemverilog = "systemverilog"
@staticmethod
def fromPath(path):
# type: (Path) -> FileType
"Extracts FileType from the given path's extension"
ext = path.name.split(".")[-1].lower()
if ext in ("vhd", "vhdl"):
return FileType.vhdl
if ext in ("v", "vh"):
return FileType.verilog
if ext in ("sv", "svh"):
return FileType.systemverilog
raise UnknownTypeExtension(path)
def __jsonEncode__(self):
"""
Gets a dict that describes the current state of this object
"""
return {"value": self.name}
@classmethod
def __jsonDecode__(cls, state):
"""Returns an object of cls based on a given state"""
return cls(state["value"])
class BuildFlagScope(Enum):
"""
Scopes of a given set of flags. Values of the items control the actual
fields extracted from the JSON config
"""
single = "single"
dependencies = "dependencies"
all = "global"
class MarkupKind(Enum):
"LSP Markup kinds"
PlainText = "plaintext"
Markdown = "markdown"
# A location on a source file
Location = NamedTuple("Location", (("line", Optional[int]), ("column", Optional[int])))
# A location range within a source file
Range = NamedTuple("Range", (("start", Location), ("end", Optional[Location])))
| gpl-3.0 |
dplorimer/osf | website/addons/osfstorage/tests/test_utils.py | 13 | 2418 | #!/usr/bin/env python
# encoding: utf-8
from nose.tools import * # noqa
from framework import sessions
from framework.flask import request
from website.models import Session
from website.addons.osfstorage.tests import factories
from website.addons.osfstorage import utils
from website.addons.osfstorage.tests.utils import StorageTestCase
class TestSerializeRevision(StorageTestCase):
def setUp(self):
super(TestSerializeRevision, self).setUp()
self.path = 'kind-of-magic.webm'
self.record = self.node_settings.root_node.append_file(self.path)
self.versions = [
factories.FileVersionFactory(creator=self.user)
for __ in range(3)
]
self.record.versions = self.versions
self.record.save()
def test_serialize_revision(self):
sessions.sessions[request._get_current_object()] = Session()
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 2)
expected = {
'index': 1,
'user': {
'name': self.user.fullname,
'url': self.user.url,
},
'date': self.versions[0].date_created.isoformat(),
'downloads': 2,
}
observed = utils.serialize_revision(
self.project,
self.record,
self.versions[0],
0,
)
assert_equal(expected, observed)
assert_equal(self.record.get_download_count(), 3)
assert_equal(self.record.get_download_count(version=2), 1)
assert_equal(self.record.get_download_count(version=0), 2)
def test_anon_revisions(self):
sessions.sessions[request._get_current_object()] = Session()
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 2)
expected = {
'index': 2,
'user': None,
'date': self.versions[0].date_created.isoformat(),
'downloads': 0,
}
observed = utils.serialize_revision(
self.project,
self.record,
self.versions[0],
1,
anon=True
)
assert_equal(expected, observed)
| apache-2.0 |
mrmoss/exorcist | exorcist.py | 1 | 2875 | #!/usr/bin/python2
import hashlib
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import os
from scapy.all import *
import sys
#returns [(session,carving)]
def carve_http(streams):
ret=[]
requests=[]
for stream in streams:
start_pos=0
raw=stream[1]+"\r\n\r\n"
carving=""
header=""
while raw.find("\r\n\r\n",start_pos)>=0:
end_pos=raw.index("\r\n\r\n",start_pos)
header=raw[start_pos:end_pos]
end_pos+=4
if header[:4]=="HTTP":
try:
header=dict(re.findall(r'(?P<name>.*?):(?P<value>.*?)\r\n',header))
header=dict((key.lower(),value) for key,value in header.iteritems())
if "transfer-encoding" in header and header["transfer-encoding"].strip()=="chunked":
while raw.find("\r\n",end_pos)>=0:
chunk_size=raw[end_pos:raw.find("\r\n",end_pos)]
if len(chunk_size)<=0:
break
chunk_size=int(chunk_size,16)
end_pos=raw.find("\r\n",end_pos)+2
carving+=raw[end_pos:end_pos+chunk_size]
end_pos+=chunk_size+2
elif "content-length" in header:
size=int(header["content-length"].strip())
carving=raw[end_pos:end_pos+size]
end_pos+=size
except Exception:
pass
if len(carving)>0 and carving!="\r\n\r\n":
ret.append((stream,carving))
start_pos=end_pos
return ret
#expects carvings in [(session,carving)]
def save_carvings(carvings,out,count_start=0):
count=count_start
try:
for carving in carvings:
file_folder=str(carving[0][0])
file_folder=file_folder.replace(" ","_")
file_folder=file_folder.replace(">","TO")
file_path=out+"/"+file_folder+"/"
if not os.path.isdir(file_path):
os.makedirs(file_path)
full_path=file_path+hashlib.sha1(carving[1]).hexdigest()
print("\tSaving \""+full_path+"\"")
file=open(full_path,'w')
file.write(carving[1])
file.close()
count+=1
except Exception as error:
print(error)
raise Exception("Error saving files.")
finally:
return count
#returns [(session,payload)]
def get_streams(filename):
try:
cap=rdpcap(filename)
sessions=cap.sessions()
ret=[]
for session in sessions:
payload=""
payload_chunked=""
for packet in sessions[session]:
if TCP in packet and type(packet[TCP].payload)==Raw:
packet_payload=str(packet[TCP].payload)
payload+=packet_payload
ret.append((session,payload));
return ret
except Exception:
raise Exception("Error opening pcap \""+filename+"\".")
if __name__=="__main__":
if len(sys.argv)<=1:
print("Usage: ./exorcist.py file.pcap ...")
exit(1)
files_wrote=0
for ii in range(1,len(sys.argv)):
filename=str(sys.argv[ii])
print("Processing \""+filename+"\"")
try:
streams=get_streams(filename)
carvings=carve_http(streams)
files_wrote=save_carvings(carvings,"out/"+filename,files_wrote)
except Exception as error:
print(error)
| unlicense |
rushiagr/keystone | keystone/tests/unit/test_v2.py | 2 | 52189 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import uuid
from keystoneclient.common import cms
from oslo_config import cfg
import six
from testtools import matchers
from keystone.common import extension as keystone_extension
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import rest
CONF = cfg.CONF
class CoreApiTests(object):
def assertValidError(self, error):
self.assertIsNotNone(error.get('code'))
self.assertIsNotNone(error.get('title'))
self.assertIsNotNone(error.get('message'))
def assertValidVersion(self, version):
self.assertIsNotNone(version)
self.assertIsNotNone(version.get('id'))
self.assertIsNotNone(version.get('status'))
self.assertIsNotNone(version.get('updated'))
def assertValidExtension(self, extension):
self.assertIsNotNone(extension)
self.assertIsNotNone(extension.get('name'))
self.assertIsNotNone(extension.get('namespace'))
self.assertIsNotNone(extension.get('alias'))
self.assertIsNotNone(extension.get('updated'))
def assertValidExtensionLink(self, link):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('type'))
self.assertIsNotNone(link.get('href'))
def assertValidTenant(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def assertValidUser(self, user):
self.assertIsNotNone(user.get('id'))
self.assertIsNotNone(user.get('name'))
def assertValidRole(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def test_public_not_found(self):
r = self.public_request(
path='/%s' % uuid.uuid4().hex,
expected_status=404)
self.assertValidErrorResponse(r)
def test_admin_not_found(self):
r = self.admin_request(
path='/%s' % uuid.uuid4().hex,
expected_status=404)
self.assertValidErrorResponse(r)
def test_public_multiple_choice(self):
r = self.public_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_admin_multiple_choice(self):
r = self.admin_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_public_version(self):
r = self.public_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_admin_version(self):
r = self.admin_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_public_extensions(self):
r = self.public_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.PUBLIC_EXTENSIONS)
def test_admin_extensions(self):
r = self.admin_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_admin_extensions_404(self):
self.admin_request(path='/v2.0/extensions/invalid-extension',
expected_status=404)
def test_public_osksadm_extension_404(self):
self.public_request(path='/v2.0/extensions/OS-KSADM',
expected_status=404)
def test_admin_osksadm_extension(self):
r = self.admin_request(path='/v2.0/extensions/OS-KSADM')
self.assertValidExtensionResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_authenticate(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': self.tenant_bar['id'],
},
},
expected_status=200)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_authenticate_unscoped(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
},
},
expected_status=200)
self.assertValidAuthenticationResponse(r)
def test_get_tenants_for_token(self):
r = self.public_request(path='/v2.0/tenants',
token=self.get_scoped_token())
self.assertValidTenantListResponse(r)
def test_validate_token(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token)
self.assertValidAuthenticationResponse(r)
def test_invalid_token_404(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': 'invalid',
},
token=token,
expected_status=404)
def test_validate_token_service_role(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(tenant_id='service')
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
def test_remove_role_revokes_token(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(tenant_id='service')
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
self.assignment_api.remove_role_from_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token,
expected_status=401)
def test_validate_token_belongs_to(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token,
self.tenant_bar['id']))
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_no_belongs_to_still_returns_catalog(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s' % token)
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_head(self):
"""The same call as above, except using HEAD.
There's no response to validate here, but this is included for the
sake of completely covering the core API.
"""
token = self.get_scoped_token()
self.admin_request(
method='HEAD',
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token,
expected_status=200)
def test_endpoints(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s/endpoints' % {
'token_id': token,
},
token=token)
self.assertValidEndpointListResponse(r)
def test_get_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s' % {
'tenant_id': self.tenant_bar['id'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_tenant_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants?name=%(tenant_name)s' % {
'tenant_name': self.tenant_bar['name'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_user_roles_with_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': self.tenant_bar['id'],
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidRoleListResponse(r)
def test_get_user(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidUserResponse(r)
def test_get_user_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users?name=%(user_name)s' % {
'user_name': self.user_foo['name'],
},
token=token)
self.assertValidUserResponse(r)
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': "False",
},
},
token=token,
expected_status=400)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, 0|1 are not booleans
'enabled': 0,
},
},
token=token,
expected_status=400)
self.assertValidErrorResponse(r)
# Test UPDATE request
path = '/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
}
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
'enabled': "False",
},
},
token=token,
expected_status=400)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
# In JSON, 0|1 are not booleans
'enabled': 1,
},
},
token=token,
expected_status=400)
self.assertValidErrorResponse(r)
def test_create_update_user_valid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
self.admin_request(method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': False,
},
},
token=token,
expected_status=200)
def test_error_response(self):
"""This triggers assertValidErrorResponse by convention."""
self.public_request(path='/v2.0/tenants', expected_status=401)
def test_invalid_parameter_error_response(self):
token = self.get_scoped_token()
bad_body = {
'OS-KSADM:service%s' % uuid.uuid4().hex: {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
res = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=bad_body,
token=token,
expected_status=400)
self.assertValidErrorResponse(res)
res = self.admin_request(method='POST',
path='/v2.0/users',
body=bad_body,
token=token,
expected_status=400)
self.assertValidErrorResponse(res)
def _get_user_id(self, r):
"""Helper method to return user ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_id(self, r):
"""Helper method to return a role ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_name(self, r):
"""Helper method to return role NAME from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_project_id(self, r):
"""Helper method to return project ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def assertNoRoles(self, r):
"""Helper method to assert No Roles
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def test_update_user_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Create a new tenant
r = self.admin_request(
method='POST',
path='/v2.0/tenants',
body={
'tenant': {
'name': 'test_update_user',
'description': 'A description ...',
'enabled': True,
},
},
token=token,
expected_status=200)
project_id = self._get_project_id(r.result)
# Update user's tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': project_id,
},
},
token=token,
expected_status=200)
# 'member_role' should be in new_tenant
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': project_id,
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual('_member_', self._get_role_name(r.result))
# 'member_role' should not be in tenant_bar any more
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertNoRoles(r.result)
def test_update_user_with_invalid_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=404)
def test_update_user_with_invalid_tenant_no_prev_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=404)
def test_update_user_with_old_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Update user's tenant with old tenant id
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': self.tenant_bar['id'],
},
},
token=token,
expected_status=200)
# 'member_role' should still be in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual('_member_', self._get_role_name(r.result))
def test_authenticating_a_user_with_no_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
# create the user
self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'enabled': True,
},
},
token=token)
# fail to authenticate
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': 'password',
},
},
},
expected_status=401)
self.assertValidErrorResponse(r)
def test_www_authenticate_header(self):
r = self.public_request(
path='/v2.0/tenants',
expected_status=401)
self.assertEqual('Keystone uri="http://localhost"',
r.headers.get('WWW-Authenticate'))
def test_www_authenticate_header_host(self):
test_url = 'http://%s:4187' % uuid.uuid4().hex
self.config_fixture.config(public_endpoint=test_url)
r = self.public_request(
path='/v2.0/tenants',
expected_status=401)
self.assertEqual('Keystone uri="%s"' % test_url,
r.headers.get('WWW-Authenticate'))
class LegacyV2UsernameTests(object):
"""Tests to show the broken username behavior in V2.
The V2 API is documented to use `username` instead of `name`. The
API forced used to use name and left the username to fall into the
`extra` field.
These tests ensure this behavior works so fixes to `username`/`name`
will be backward compatible.
"""
def create_user(self, **user_attrs):
"""Creates a users and returns the response object.
:param user_attrs: attributes added to the request body (optional)
"""
token = self.get_scoped_token()
body = {
'user': {
'name': uuid.uuid4().hex,
'enabled': True,
},
}
body['user'].update(user_attrs)
return self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body=body,
expected_status=200)
def test_create_with_extra_username(self):
"""The response for creating a user will contain the extra fields."""
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_get_returns_username_from_extra(self):
"""The response for getting a user will contain the extra fields."""
token = self.get_scoped_token()
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_update_returns_new_username_when_adding_username(self):
"""The response for updating a user will contain the extra fields.
This is specifically testing for updating a username when a value
was not previously set.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_update_returns_new_username_when_updating_username(self):
"""The response for updating a user will contain the extra fields.
This tests updating a username that was previously set.
"""
token = self.get_scoped_token()
r = self.create_user(username='original_username')
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_username_is_always_returned_create(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
r = self.create_user()
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get_by_name(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
name = self.get_user_attribute_from_response(r, 'name')
r = self.admin_request(path='/v2.0/users?name=%s' % name, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_update_no_username_provided(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_updated_username_is_returned(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_create(self):
token = self.get_scoped_token()
r = self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body={
'user': {
'username': uuid.uuid4().hex,
'enabled': True,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_update(self):
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
new_username = uuid.uuid4().hex
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'username': new_username,
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(new_username, user.get('name'))
self.assertEqual(user.get('name'), user.get('username'))
class RestfulTestCase(rest.RestfulTestCase):
def setUp(self):
super(RestfulTestCase, self).setUp()
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
def _get_user_id(self, r):
return r['user']['id']
def _get_role_name(self, r):
return r['roles'][0]['name']
def _get_role_id(self, r):
return r['roles'][0]['id']
def _get_project_id(self, r):
return r['tenant']['id']
def _get_token_id(self, r):
return r.result['access']['token']['id']
def assertNoRoles(self, r):
self.assertEqual([], r['roles'])
def assertValidErrorResponse(self, r):
self.assertIsNotNone(r.result.get('error'))
self.assertValidError(r.result['error'])
self.assertEqual(r.result['error']['code'], r.status_code)
def assertValidExtension(self, extension, expected):
super(V2TestCase, self).assertValidExtension(extension)
descriptions = [ext['description'] for ext in six.itervalues(expected)]
description = extension.get('description')
self.assertIsNotNone(description)
self.assertIn(description, descriptions)
self.assertIsNotNone(extension.get('links'))
self.assertNotEmpty(extension.get('links'))
for link in extension.get('links'):
self.assertValidExtensionLink(link)
def assertValidExtensionListResponse(self, r, expected):
self.assertIsNotNone(r.result.get('extensions'))
self.assertIsNotNone(r.result['extensions'].get('values'))
self.assertNotEmpty(r.result['extensions'].get('values'))
for extension in r.result['extensions']['values']:
self.assertValidExtension(extension, expected)
def assertValidExtensionResponse(self, r, expected):
self.assertValidExtension(r.result.get('extension'), expected)
def assertValidUser(self, user):
super(V2TestCase, self).assertValidUser(user)
self.assertNotIn('default_project_id', user)
if 'tenantId' in user:
# NOTE(morganfainberg): tenantId should never be "None", it gets
# filtered out of the object if it is there. This is suspenders
# and a belt check to avoid unintended regressions.
self.assertIsNotNone(user.get('tenantId'))
def assertValidAuthenticationResponse(self, r,
require_service_catalog=False):
self.assertIsNotNone(r.result.get('access'))
self.assertIsNotNone(r.result['access'].get('token'))
self.assertIsNotNone(r.result['access'].get('user'))
# validate token
self.assertIsNotNone(r.result['access']['token'].get('id'))
self.assertIsNotNone(r.result['access']['token'].get('expires'))
tenant = r.result['access']['token'].get('tenant')
if tenant is not None:
# validate tenant
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
# validate user
self.assertIsNotNone(r.result['access']['user'].get('id'))
self.assertIsNotNone(r.result['access']['user'].get('name'))
if require_service_catalog:
# roles are only provided with a service catalog
roles = r.result['access']['user'].get('roles')
self.assertNotEmpty(roles)
for role in roles:
self.assertIsNotNone(role.get('name'))
serviceCatalog = r.result['access'].get('serviceCatalog')
# validate service catalog
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
self.assertIsInstance(serviceCatalog, list)
if require_service_catalog:
self.assertNotEmpty(serviceCatalog)
for service in r.result['access']['serviceCatalog']:
# validate service
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
# services contain at least one endpoint
self.assertIsNotNone(service.get('endpoints'))
self.assertNotEmpty(service['endpoints'])
for endpoint in service['endpoints']:
# validate service endpoint
self.assertIsNotNone(endpoint.get('publicURL'))
def assertValidTenantListResponse(self, r):
self.assertIsNotNone(r.result.get('tenants'))
self.assertNotEmpty(r.result['tenants'])
for tenant in r.result['tenants']:
self.assertValidTenant(tenant)
self.assertIsNotNone(tenant.get('enabled'))
self.assertIn(tenant.get('enabled'), [True, False])
def assertValidUserResponse(self, r):
self.assertIsNotNone(r.result.get('user'))
self.assertValidUser(r.result['user'])
def assertValidTenantResponse(self, r):
self.assertIsNotNone(r.result.get('tenant'))
self.assertValidTenant(r.result['tenant'])
def assertValidRoleListResponse(self, r):
self.assertIsNotNone(r.result.get('roles'))
self.assertNotEmpty(r.result['roles'])
for role in r.result['roles']:
self.assertValidRole(role)
def assertValidVersion(self, version):
super(V2TestCase, self).assertValidVersion(version)
self.assertIsNotNone(version.get('links'))
self.assertNotEmpty(version.get('links'))
for link in version.get('links'):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('href'))
self.assertIsNotNone(version.get('media-types'))
self.assertNotEmpty(version.get('media-types'))
for media in version.get('media-types'):
self.assertIsNotNone(media.get('base'))
self.assertIsNotNone(media.get('type'))
def assertValidMultipleChoiceResponse(self, r):
self.assertIsNotNone(r.result.get('versions'))
self.assertIsNotNone(r.result['versions'].get('values'))
self.assertNotEmpty(r.result['versions']['values'])
for version in r.result['versions']['values']:
self.assertValidVersion(version)
def assertValidVersionResponse(self, r):
self.assertValidVersion(r.result.get('version'))
def assertValidEndpointListResponse(self, r):
self.assertIsNotNone(r.result.get('endpoints'))
self.assertNotEmpty(r.result['endpoints'])
for endpoint in r.result['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('name'))
self.assertIsNotNone(endpoint.get('type'))
self.assertIsNotNone(endpoint.get('publicURL'))
self.assertIsNotNone(endpoint.get('internalURL'))
self.assertIsNotNone(endpoint.get('adminURL'))
def get_user_from_response(self, r):
return r.result.get('user')
def get_user_attribute_from_response(self, r, attribute_name):
return r.result['user'][attribute_name]
def test_service_crud_requires_auth(self):
"""Service CRUD should 401 without an X-Auth-Token (bug 1006822)."""
# values here don't matter because we should 401 before they're checked
service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex
service_body = {
'OS-KSADM:service': {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
r = self.admin_request(method='GET',
path='/v2.0/OS-KSADM/services',
expected_status=401)
self.assertValidErrorResponse(r)
r = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=service_body,
expected_status=401)
self.assertValidErrorResponse(r)
r = self.admin_request(method='GET',
path=service_path,
expected_status=401)
self.assertValidErrorResponse(r)
r = self.admin_request(method='DELETE',
path=service_path,
expected_status=401)
self.assertValidErrorResponse(r)
def test_user_role_list_requires_auth(self):
"""User role list should 401 without an X-Auth-Token (bug 1006815)."""
# values here don't matter because we should 401 before they're checked
path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': uuid.uuid4().hex,
'user_id': uuid.uuid4().hex,
}
r = self.admin_request(path=path, expected_status=401)
self.assertValidErrorResponse(r)
def test_fetch_revocation_list_nonadmin_fails(self):
self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
expected_status=401)
def test_fetch_revocation_list_admin_200(self):
token = self.get_scoped_token()
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token,
expected_status=200)
self.assertValidRevocationListResponse(r)
def assertValidRevocationListResponse(self, response):
self.assertIsNotNone(response.result['signed'])
def _fetch_parse_revocation_list(self):
token1 = self.get_scoped_token()
# TODO(morganfainberg): Because this is making a restful call to the
# app a change to UTCNOW via mock.patch will not affect the returned
# token. The only surefire way to ensure there is not a transient bug
# based upon when the second token is issued is with a sleep. This
# issue all stems from the limited resolution (no microseconds) on the
# expiry time of tokens and the way revocation events utilizes token
# expiry to revoke individual tokens. This is a stop-gap until all
# associated issues with resolution on expiration and revocation events
# are resolved.
time.sleep(1)
token2 = self.get_scoped_token()
self.admin_request(method='DELETE',
path='/v2.0/tokens/%s' % token2,
token=token1)
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token1,
expected_status=200)
signed_text = r.result['signed']
data_json = cms.cms_verify(signed_text, CONF.signing.certfile,
CONF.signing.ca_certs)
data = json.loads(data_json)
return (data, token2)
def test_fetch_revocation_list_md5(self):
"""If the server is configured for md5, then the revocation list has
tokens hashed with MD5.
"""
# The default hash algorithm is md5.
hash_algorithm = 'md5'
(data, token) = self._fetch_parse_revocation_list()
token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_fetch_revocation_list_sha256(self):
"""If the server is configured for sha256, then the revocation list has
tokens hashed with SHA256
"""
hash_algorithm = 'sha256'
self.config_fixture.config(group='token',
hash_algorithm=hash_algorithm)
(data, token) = self._fetch_parse_revocation_list()
token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=400)
self.assertValidErrorResponse(r)
# Test UPDATE request
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
body={
'user': {
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=400)
self.assertValidErrorResponse(r)
def test_authenticating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
password = uuid.uuid4().hex
# create the user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'OS-KSADM:password': password,
'enabled': True,
},
},
token=token)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': password,
},
},
},
expected_status=200)
# ensure password doesn't leak
user_id = r.result['user']['id']
r = self.admin_request(
method='GET',
path='/v2.0/users/%s' % user_id,
token=token,
expected_status=200)
self.assertNotIn('OS-KSADM:password', r.result['user'])
def test_updating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
user_id = self.user_foo['id']
password = uuid.uuid4().hex
# update the user
self.admin_request(
method='PUT',
path='/v2.0/users/%s/OS-KSADM/password' % user_id,
body={
'user': {
'password': password,
},
},
token=token,
expected_status=200)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': password,
},
},
},
expected_status=200)
class RevokeApiTestCase(V2TestCase):
def config_overrides(self):
super(RevokeApiTestCase, self).config_overrides()
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.kvs.Revoke')
self.config_fixture.config(
group='token',
provider='keystone.token.providers.pki.Provider',
revoke_by_id=False)
def test_fetch_revocation_list_admin_200(self):
self.skipTest('Revoke API disables revocation_list.')
def test_fetch_revocation_list_md5(self):
self.skipTest('Revoke API disables revocation_list.')
def test_fetch_revocation_list_sha256(self):
self.skipTest('Revoke API disables revocation_list.')
class TestFernetTokenProviderV2(RestfulTestCase):
def setUp(self):
super(TestFernetTokenProviderV2, self).setUp()
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
# Used by RestfulTestCase
def _get_token_id(self, r):
return r.result['access']['token']['id']
def new_project_ref(self):
return {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': 'default',
'enabled': True}
def config_overrides(self):
super(TestFernetTokenProviderV2, self).config_overrides()
self.config_fixture.config(
group='token',
provider='keystone.token.providers.fernet.Provider')
def test_authenticate_unscoped_token(self):
unscoped_token = self.get_unscoped_token()
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(unscoped_token), 255)
def test_validate_unscoped_token(self):
# Grab an admin token to validate with
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
unscoped_token = self.get_unscoped_token()
path = ('/v2.0/tokens/%s' % unscoped_token)
self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=200)
def test_authenticate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project_ref['id'], self.role_service['id'])
token = self.get_scoped_token(tenant_id=project_ref['id'])
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(token), 255)
def test_validate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
project2_ref = self.new_project_ref()
self.resource_api.create_project(project2_ref['id'], project2_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project2_ref['id'], self.role_member['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
member_token = self.get_scoped_token(tenant_id=project2_ref['id'])
path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token,
project2_ref['id']))
# Validate token belongs to project
self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=200)
def test_token_authentication_and_validation(self):
"""Test token authentication for Fernet token provider.
Verify that token authentication returns validate response code and
valid token belongs to project.
"""
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
unscoped_token = self.get_unscoped_token()
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': unscoped_token.encode('ascii')
}
}
},
expected_status=200)
token_id = self._get_token_id(r)
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id']))
# Validate token belongs to project
self.admin_request(
method='GET',
path=path,
token=CONF.admin_token,
expected_status=200)
| apache-2.0 |
iabdalkader/micropython | ports/esp8266/modules/flashbdev.py | 14 | 1414 | import esp
class FlashBdev:
SEC_SIZE = 4096
def __init__(self, start_sec, blocks):
self.start_sec = start_sec
self.blocks = blocks
def readblocks(self, n, buf, off=0):
# print("readblocks(%s, %x(%d), %d)" % (n, id(buf), len(buf), off))
esp.flash_read((n + self.start_sec) * self.SEC_SIZE + off, buf)
def writeblocks(self, n, buf, off=None):
# print("writeblocks(%s, %x(%d), %d)" % (n, id(buf), len(buf), off))
# assert len(buf) <= self.SEC_SIZE, len(buf)
if off is None:
esp.flash_erase(n + self.start_sec)
off = 0
esp.flash_write((n + self.start_sec) * self.SEC_SIZE + off, buf)
def ioctl(self, op, arg):
# print("ioctl(%d, %r)" % (op, arg))
if op == 4: # MP_BLOCKDEV_IOCTL_BLOCK_COUNT
return self.blocks
if op == 5: # MP_BLOCKDEV_IOCTL_BLOCK_SIZE
return self.SEC_SIZE
if op == 6: # MP_BLOCKDEV_IOCTL_BLOCK_ERASE
esp.flash_erase(arg + self.start_sec)
return 0
size = esp.flash_size()
if size < 1024 * 1024:
bdev = None
else:
start_sec = esp.flash_user_start() // FlashBdev.SEC_SIZE
if start_sec < 256:
start_sec += 1 # Reserve space for native code
# 20K at the flash end is reserved for SDK params storage
bdev = FlashBdev(start_sec, (size - 20480) // FlashBdev.SEC_SIZE - start_sec)
| mit |
oscarolar/odoo | addons/crm_project_issue/project_issue.py | 380 | 2373 |
from openerp.osv import osv, fields
class crm_lead_to_project_issue_wizard(osv.TransientModel):
""" wizard to convert a Lead into a Project Issue and move the Mail Thread """
_name = "crm.lead2projectissue.wizard"
_inherit = 'crm.partner.binding'
_columns = {
"lead_id": fields.many2one("crm.lead", "Lead", domain=[("type", "=", "lead")]),
"project_id": fields.many2one("project.project", "Project", domain=[("use_issues", "=", True)])
}
_defaults = {
"lead_id": lambda self, cr, uid, context=None: context.get('active_id')
}
def action_lead_to_project_issue(self, cr, uid, ids, context=None):
# get the wizards and models
wizards = self.browse(cr, uid, ids, context=context)
Lead = self.pool["crm.lead"]
Issue = self.pool["project.issue"]
for wizard in wizards:
# get the lead to transform
lead = wizard.lead_id
partner = self._find_matching_partner(cr, uid, context=context)
if not partner and (lead.partner_name or lead.contact_name):
partner_ids = Lead.handle_partner_assignation(cr, uid, [lead.id], context=context)
partner = partner_ids[lead.id]
# create new project.issue
vals = {
"name": lead.name,
"description": lead.description,
"email_from": lead.email_from,
"project_id": wizard.project_id.id,
"partner_id": partner,
"user_id": None
}
issue_id = Issue.create(cr, uid, vals, context=None)
# move the mail thread
Lead.message_change_thread(cr, uid, lead.id, issue_id, "project.issue", context=context)
# delete the lead
Lead.unlink(cr, uid, [lead.id], context=None)
# return the action to go to the form view of the new Issue
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('model', '=', 'project.issue'), ('name', '=', 'project_issue_form_view')])
return {
'name': 'Issue created',
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'project.issue',
'type': 'ir.actions.act_window',
'res_id': issue_id,
'context': context
}
| agpl-3.0 |
bminchew/PySAR | pysar/polsar/decomp.py | 1 | 6649 | """
PySAR
Polarimetric SAR decomposition
Contents
--------
decomp_fd(hhhh,vvvv,hvhv,hhvv,numthrd=None) : Freeman-Durden 3-component decomposition
"""
from __future__ import print_function, division
import sys,os
import numpy as np
###===========================================================================================
def decomp_fd(hhhh,vvvv,hvhv,hhvv,null=None,numthrd=None,maxthrd=8):
"""
Freeman-Durden 3-component decomposition
Parameters
----------
hhhh : ndarray
horizontally polarized power
vvvv : ndarray
vertically polarized power
hvhv : ndarray
cross-polarized power
hhvv : ndarray
co-polarized cross product (complex-valued)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
ps : ndarray
surface-scattered power
pd : ndarray
double-bounce power
pv : ndarray
volume-scattered power
Notes
-----
* arrays are returned with the same type as hhhh data
Reference
---------
1. Freeman, A. and Durden, S., "A three-component scattering model for polarimetric SAR data", *IEEE Trans. Geosci. Remote Sensing*, vol. 36, no. 3, pp. 963-973, May 1998.
"""
from pysar.polsar._decomp_modc import free_durden
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
hhvv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhvv = hhvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
P = free_durden(hhhh, vvvv, hvhv, hhvv, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
###---------------------------------------------------------------------------------
def decomp_haa(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
"""
Cloude-Pottier H/A/alpha polarimetric decomposition
Parameters
----------
hhhh : ndarray
horizontal co-polarized power (or 0.5|HH + VV|^2 if matform = 'T')
vvvv : ndarray
vertical co-polarized power (or 0.5|HH - VV|^2 if matform = 'T')
hvhv : ndarray
cross-polarized power (2|HV|^2 for matform = 'T')
hhhv : ndarray
HH.HV* cross-product (or 0.5(HH+VV)(HH-VV)* for matform = 'T')
hhvv : ndarray
HH.VV* cross-product (or HV(HH+VV)* for matform = 'T')
hvvv : ndarray
HV.VV* cross-product (or HV(HH-VV)* for matform = 'T')
matform : str {'C' or 'T'}
form of input matrix entries: 'C' for covariance matrix and
'T' for coherency matrix ['C'] (see ref. 1)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
H : ndarray
entropy (H = -(p1*log_3(p1) + p2*log_3(p2) + p3*log_3(p3))
where pi = lam_i/(hhhh+vvvv+hvhv)) and lam is an eigenvalue
A : ndarray
anisotropy (A = (lam_2-lam_3)/(lam_2+lam_3) --> lam_1 >= lam_2 >= lam_3
alpha : ndarray
alpha angle in degrees (see ref. 1)
Notes
-----
* arrays are returned with the same type as hhhh data
* if covariance matrix form is used, do not multiply entries by any constants
Reference
---------
1. Cloude, S. and Pottier, E., "An entropy based classification scheme for land applications of polarimetric SAR", *IEEE Trans. Geosci. Remote Sensing*, vol. 35, no. 1, pp. 68-78, Jan. 1997.
"""
from pysar.polsar._decomp_modc import cloude_pot
if matform == 'C' or matform == 'c':
mtf = 1
elif matform == 'T' or matform == 't':
mtf = 0
else:
raise ValueError("matform must be 'C' or 'T'")
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
nullmask += np.abs(hvvv-null) < 1.e-7
hhhh[nullmask], vvvv[nullmask] = 0., 0.
hvhv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhhv = hhhv.astype(np.complex64)
hhvv = hhvv.astype(np.complex64)
hvvv = hvvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhhv.ndim, hhvv.ndim, hvvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
hhhv, hvvv = hhhv.flatten(), hvvv.flatten()
P = cloude_pot(hhhh, vvvv, hvhv, hhhv, hhvv, hvvv, mtf, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
def decomp_cp(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
__doc__ = decomp_haa.__doc__
return decomp_haa(hhhh=hhhh,vvvv=vvvv,hvhv=hvhv,hhhv=hhhv,hhvv=hhvv,hvvv=hvvv,
matform=matform,null=null,numthrd=numthrd,maxthrd=maxthrd)
| gpl-3.0 |
rx2130/Leetcode | python/286 Walls and Gates.py | 1 | 3439 | from collections import deque
class Solution(object):
# Op1: DFS
def wallsAndGates(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: void Do not return anything, modify rooms in-place instead.
"""
for i in range(len(rooms)):
for j in range(len(rooms[0])):
if rooms[i][j] == 0:
self.helper(rooms, i, j, 0)
def helper(self, rooms, i, j, d):
if i < 0 or i >= len(rooms) or j < 0 or j >= len(rooms[0]) or rooms[i][j] < d:
return
rooms[i][j] = d
self.helper(rooms, i - 1, j, d + 1)
self.helper(rooms, i + 1, j, d + 1)
self.helper(rooms, i, j - 1, d + 1)
self.helper(rooms, i, j + 1, d + 1)
# Op1.1
def wallsAndGates(self, rooms):
self.direction = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for i in range(len(rooms)):
for j in range(len(rooms[0])):
if rooms[i][j] == 0:
self.helper(rooms, i, j)
def helper(self, rooms, i, j):
for d in self.direction:
p, q = i + d[0], j + d[1]
if 0 <= p < len(rooms) and 0 <= q < len(rooms[0]) and rooms[p][q] > rooms[i][j] + 1:
rooms[p][q] = rooms[i][j] + 1
self.helper(rooms, p, q)
# Op2: Naive BFS
def wallsAndGates(self, rooms):
self.direction = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for i in range(len(rooms)):
for j in range(len(rooms[0])):
if rooms[i][j] == 0:
self.helper(rooms, i, j)
def helper(self, rooms, i, j):
queue = deque()
queue.append([i, j])
while queue:
x = queue.popleft()
i, j = x[0], x[1]
for d in self.direction:
p, q = i + d[0], j + d[1]
if 0 <= p < len(rooms) and 0 <= q < len(rooms[0]) and rooms[p][q] > rooms[i][j] + 1:
rooms[p][q] = rooms[i][j] + 1
queue.append([p, q])
# Op2.1: Multi End BFS
def wallsAndGates(self, rooms):
inf = (1 << 31) - 1
direction = [[1, 0], [-1, 0], [0, 1], [0, -1]]
queue = deque()
for i in range(len(rooms)):
for j in range(len(rooms[0])):
if rooms[i][j] == 0:
queue.append([i, j])
while queue:
x = queue.popleft()
i, j = x[0], x[1]
for d in direction:
p, q = i + d[0], j + d[1]
if 0 <= p < len(rooms) and 0 <= q < len(rooms[0]) and rooms[p][q] == inf:
# if 0 <= p < len(rooms) and 0 <= q < len(rooms[0]) and rooms[p][q] > rooms[i][j] + 1:
rooms[p][q] = rooms[i][j] + 1
queue.append([p, q])
# Op3: Ultimate Python
def wallsAndGates(self, rooms):
q = [(i, j) for i, row in enumerate(rooms) for j, r in enumerate(row) if not r]
for i, j in q:
for I, J in (i+1, j), (i-1, j), (i, j+1), (i, j-1):
if 0 <= I < len(rooms) and 0 <= J < len(rooms[0]) and rooms[I][J] > 1<<30:
rooms[I][J] = rooms[i][j] + 1
q += (I, J),
rooms = [[2147483647, -1, 0, 2147483647], [2147483647, 2147483647, 2147483647, -1],
[2147483647, -1, 2147483647, -1], [0, -1, 2147483647, 2147483647]]
test = Solution()
test.wallsAndGates(rooms)
print(rooms)
| apache-2.0 |
flijloku/livestreamer | win32/build-bbfreeze.py | 21 | 1442 | #!/usr/bin/env python
import os
import shutil
import sys
import bbfreeze.recipes
from itertools import ifilter
from bbfreeze import Freezer
from livestreamer import __version__
def recipe_pycparser(mf):
m = mf.findNode("pycparser")
if not m:
return
mf.import_hook("pycparser", m, ['*'])
return True
bbfreeze.recipes.recipe_pycparser = recipe_pycparser
build_version = __version__
python_path = sys.prefix
script = os.path.join(python_path, "Scripts\\livestreamer-script.py")
script_exe = os.path.join(python_path, "Scripts\\livestreamer.py")
shutil.copy(script, script_exe)
includes = ("requests", "re", "xml", "xml.dom.minidom",
"zlib", "ctypes", "argparse", "hmac", "tempfile",
"os", "sys", "subprocess", "getpass", "msvcrt",
"urllib", "urlparse", "pkgutil", "imp", "ast",
"singledispatch", "cffi", "Crypto", "concurrent.futures")
manual_copy = ("librtmp", "librtmp_config", "librtmp_ffi")
freezer_path = os.path.dirname(os.path.abspath(__file__))
dst = "{0}\\..\\build-win32\\livestreamer-{1}-win32\\".format(freezer_path, build_version)
site_packages = next(ifilter(lambda p: p.endswith("site-packages"), sys.path))
f = Freezer(dst, includes=includes)
f.include_py = False
f.addScript(script_exe, gui_only=False)
f()
for pkg in manual_copy:
src = os.path.join(site_packages, pkg)
pkgdst = os.path.join(dst, pkg)
shutil.copytree(src, pkgdst)
| bsd-2-clause |
sudheesh001/oh-mainline | vendor/packages/python-social-auth/social/store.py | 90 | 2744 | import time
try:
import cPickle as pickle
except ImportError:
import pickle
from openid.store.interface import OpenIDStore as BaseOpenIDStore
from openid.store.nonce import SKEW
class OpenIdStore(BaseOpenIDStore):
"""Storage class"""
def __init__(self, strategy):
"""Init method"""
super(OpenIdStore, self).__init__()
self.strategy = strategy
self.storage = strategy.storage
self.assoc = self.storage.association
self.nonce = self.storage.nonce
self.max_nonce_age = 6 * 60 * 60 # Six hours
def storeAssociation(self, server_url, association):
"""Store new assocition if doesn't exist"""
self.assoc.store(server_url, association)
def removeAssociation(self, server_url, handle):
"""Remove association"""
associations_ids = list(dict(self.assoc.oids(server_url,
handle)).keys())
if associations_ids:
self.assoc.remove(associations_ids)
def expiresIn(self, assoc):
if hasattr(assoc, 'getExpiresIn'):
return assoc.getExpiresIn()
else: # python3-openid 3.0.2
return assoc.expiresIn
def getAssociation(self, server_url, handle=None):
"""Return stored assocition"""
associations, expired = [], []
for assoc_id, association in self.assoc.oids(server_url, handle):
expires = self.expiresIn(association)
if expires > 0:
associations.append(association)
elif expires == 0:
expired.append(assoc_id)
if expired: # clear expired associations
self.assoc.remove(expired)
if associations: # return most recet association
return associations[0]
def useNonce(self, server_url, timestamp, salt):
"""Generate one use number and return *if* it was created"""
if abs(timestamp - time.time()) > SKEW:
return False
return self.nonce.use(server_url, timestamp, salt)
class OpenIdSessionWrapper(dict):
pickle_instances = (
'_yadis_services__openid_consumer_',
'_openid_consumer_last_token'
)
def __getitem__(self, name):
value = super(OpenIdSessionWrapper, self).__getitem__(name)
if name in self.pickle_instances:
value = pickle.loads(value)
return value
def __setitem__(self, name, value):
if name in self.pickle_instances:
value = pickle.dumps(value, 0)
super(OpenIdSessionWrapper, self).__setitem__(name, value)
def get(self, name, default=None):
try:
return self[name]
except KeyError:
return default
| agpl-3.0 |
hungtt57/matchmaker | lib/python2.7/site-packages/django/core/management/commands/runfcgi.py | 120 | 1073 | import argparse
import warnings
from django.core.management.base import BaseCommand
from django.utils.deprecation import RemovedInDjango19Warning
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
def add_arguments(self, parser):
parser.add_argument('args', nargs=argparse.REMAINDER,
help='Various KEY=val options.')
def handle(self, *args, **options):
warnings.warn(
"FastCGI support has been deprecated and will be removed in Django 1.9.",
RemovedInDjango19Warning)
from django.conf import settings
from django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
| mit |
markeldigital/design-system | vendor/ruby/2.0.0/gems/pygments.rb-0.6.3/vendor/simplejson/simplejson/tests/test_encode_basestring_ascii.py | 95 | 2301 | from unittest import TestCase
import simplejson.encoder
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U0001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBaseStringAscii(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(simplejson.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if not simplejson.encoder.c_encode_basestring_ascii:
return
self._test_encode_basestring_ascii(simplejson.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
#self.assertEquals(result, expect,
# '{0!r} != {1!r} for {2}({3!r})'.format(
# result, expect, fname, input_string))
self.assertEquals(result, expect,
'%r != %r for %s(%r)' % (result, expect, fname, input_string))
def test_sorted_dict(self):
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = simplejson.dumps(dict(items), sort_keys=True)
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
| mit |
vitan/hue | desktop/core/ext-py/South-1.0.2/south/db/firebird.py | 93 | 14445 | # firebird
from __future__ import print_function
import datetime
from django.db import connection, models
from django.core.management.color import no_style
from django.db.utils import DatabaseError
from south.db import generic
from south.utils.py3 import string_types
class DatabaseOperations(generic.DatabaseOperations):
backend_name = 'firebird'
alter_string_set_type = 'ALTER %(column)s TYPE %(type)s'
alter_string_set_default = 'ALTER %(column)s SET DEFAULT %(default)s;'
alter_string_drop_null = ''
add_column_string = 'ALTER TABLE %s ADD %s;'
delete_column_string = 'ALTER TABLE %s DROP %s;'
rename_table_sql = ''
# Features
allows_combined_alters = False
has_booleans = False
def _fill_constraint_cache(self, db_name, table_name):
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
rows = self.execute("""
SELECT
rc.RDB$CONSTRAINT_NAME,
rc.RDB$CONSTRAINT_TYPE,
cc.RDB$TRIGGER_NAME
FROM rdb$relation_constraints rc
JOIN rdb$check_constraints cc
ON rc.rdb$constraint_name = cc.rdb$constraint_name
WHERE rc.rdb$constraint_type = 'NOT NULL'
AND rc.rdb$relation_name = '%s'
""" % table_name)
for constraint, kind, column in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((kind, constraint))
return
def _alter_column_set_null(self, table_name, column_name, is_null):
sql = """
UPDATE RDB$RELATION_FIELDS SET RDB$NULL_FLAG = %(null_flag)s
WHERE RDB$FIELD_NAME = '%(column)s'
AND RDB$RELATION_NAME = '%(table_name)s'
"""
null_flag = 'NULL' if is_null else '1'
return sql % {
'null_flag': null_flag,
'column': column_name.upper(),
'table_name': table_name.upper()
}
def _column_has_default(self, params):
sql = """
SELECT a.RDB$DEFAULT_VALUE
FROM RDB$RELATION_FIELDS a
WHERE a.RDB$FIELD_NAME = '%(column)s'
AND a.RDB$RELATION_NAME = '%(table_name)s'
"""
value = self.execute(sql % params)
return True if value else False
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Historically, we used to set defaults here.
# But since South 0.8, we don't ever set defaults on alter-column -- we only
# use database-level defaults as scaffolding when adding columns.
# However, we still sometimes need to remove defaults in alter-column.
if self._column_has_default(params):
sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), []))
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
columns = []
autoinc_sql = ''
for field_name, field in fields:
# avoid default values in CREATE TABLE statements (#925)
field._suppress_default = True
col = self.column_sql(table_name, field_name, field)
if not col:
continue
columns.append(col)
if isinstance(field, models.AutoField):
field_name = field.db_column or field.column
autoinc_sql = connection.ops.autoinc_sql(table_name, field_name)
self.execute(self.create_table_sql % {
"table": self.quote_name(table_name),
"columns": ', '.join([col for col in columns if col]),
})
if autoinc_sql:
self.execute(autoinc_sql[0])
self.execute(autoinc_sql[1])
def rename_table(self, old_table_name, table_name):
"""
Renames table is not supported by firebird.
This involve recreate all related objects (store procedure, views, triggers, etc)
"""
pass
@generic.invalidate_table_constraints
def delete_table(self, table_name, cascade=False):
"""
Deletes the table 'table_name'.
Firebird will also delete any triggers associated with the table.
"""
super(DatabaseOperations, self).delete_table(table_name, cascade=False)
# Also, drop sequence if exists
sql = connection.ops.drop_sequence_sql(table_name)
if sql:
try:
self.execute(sql)
except:
pass
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False):
"""
Creates the SQL snippet for a column. Used by add_column and add_table.
"""
# If the field hasn't already been told its attribute name, do so.
if not field_prepared:
field.set_attributes_from_name(field_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL)
field = self._field_sanity(field)
try:
sql = field.db_type(connection=self._get_connection())
except TypeError:
sql = field.db_type()
if sql:
# Some callers, like the sqlite stuff, just want the extended type.
if with_name:
field_output = [self.quote_name(field.column), sql]
else:
field_output = [sql]
if field.primary_key:
field_output.append('NOT NULL PRIMARY KEY')
elif field.unique:
# Just use UNIQUE (no indexes any more, we have delete_unique)
field_output.append('UNIQUE')
sql = ' '.join(field_output)
sqlparams = ()
# if the field is "NOT NULL" and a default value is provided, create the column with it
# this allows the addition of a NOT NULL field to a table with existing rows
if not getattr(field, '_suppress_default', False):
if field.has_default():
default = field.get_default()
# If the default is actually None, don't add a default term
if default is not None:
# If the default is a callable, then call it!
if callable(default):
default = default()
# Now do some very cheap quoting. TODO: Redesign return values to avoid this.
if isinstance(default, string_types):
default = "'%s'" % default.replace("'", "''")
elif isinstance(default, (datetime.date, datetime.time, datetime.datetime)):
default = "'%s'" % default
elif isinstance(default, bool):
default = int(default)
# Escape any % signs in the output (bug #317)
if isinstance(default, string_types):
default = default.replace("%", "%%")
# Add it in
sql += " DEFAULT %s"
sqlparams = (default)
elif (not field.null and field.blank) or (field.get_default() == ''):
if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls:
sql += " DEFAULT ''"
# Error here would be nice, but doesn't seem to play fair.
#else:
# raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.")
# Firebird need set not null after of default value keyword
if not field.primary_key and not field.null:
sql += ' NOT NULL'
if field.rel and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
# Things like the contrib.gis module fields have this in 1.1 and below
if hasattr(field, 'post_create_sql'):
for stmt in field.post_create_sql(no_style(), table_name):
self.add_deferred_sql(stmt)
# Avoid double index creation (#1317)
# Firebird creates an index implicity for each foreign key field
# sql_indexes_for_field tries to create an index for that field too
if not field.rel:
# In 1.2 and above, you have to ask the DatabaseCreation stuff for it.
# This also creates normal indexes in 1.1.
if hasattr(self._get_connection().creation, "sql_indexes_for_field"):
# Make a fake model to pass in, with only db_table
model = self.mock_model("FakeModelForGISCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.add_deferred_sql(stmt)
if sql:
return sql % sqlparams
else:
return None
def _drop_constraints(self, table_name, name, field):
if self.has_check_constraints:
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop or add UNIQUE constraint
unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE"))
if field.unique and not unique_constraint:
self.create_unique(table_name, [name])
elif not field.unique and unique_constraint:
self.delete_unique(table_name, [name])
# Drop all foreign key constraints
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# There weren't any
pass
@generic.invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
if self.dry_run:
if self.debug:
print(' - no dry run output for alter_column() due to dynamic DDL, sorry')
return
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
else:
field.column = name
if not ignore_constraints:
# Drop all check constraints. Note that constraints will be added back
# with self.alter_string_set_type and self.alter_string_drop_null.
self._drop_constraints(table_name, name, field)
# First, change the type
params = {
"column": self.quote_name(name),
"type": self._db_type_for_alter_column(field),
"table_name": table_name
}
# SQLs is a list of (SQL, values) pairs.
sqls = []
sqls_extra = []
# Only alter the column if it has a type (Geometry ones sometimes don't)
if params["type"] is not None:
sqls.append((self.alter_string_set_type % params, []))
# Add any field- and backend- specific modifications
self._alter_add_column_mods(field, name, params, sqls)
# Next, nullity: modified, firebird doesn't support DROP NOT NULL
sqls_extra.append(self._alter_column_set_null(table_name, name, field.null))
# Next, set any default
self._alter_set_defaults(field, name, params, sqls)
# Finally, actually change the column
if self.allows_combined_alters:
sqls, values = list(zip(*sqls))
self.execute(
"ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)),
generic.flatten(values),
)
else:
# Databases like e.g. MySQL don't like more than one alter at once.
for sql, values in sqls:
try:
self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values)
except DatabaseError as e:
print(e)
# Execute extra sql, which don't need ALTER TABLE statement
for sql in sqls_extra:
self.execute(sql)
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
@generic.copy_column_constraints
@generic.delete_column_constraints
def rename_column(self, table_name, old, new):
if old == new:
# Short-circuit out
return []
self.execute('ALTER TABLE %s ALTER %s TO %s;' % (
self.quote_name(table_name),
self.quote_name(old),
self.quote_name(new),
))
| apache-2.0 |
exogen/nose-achievements | tests/test_achievement.py | 1 | 1430 | # -*- coding: utf-8 -*-
import unittest
from noseachievements.achievements.base import Achievement
from noseachievements.compat import unicode
from helpers import (PASS, TestPlugin, NeverUnlockedAchievement,
AlwaysUnlockedAchievement)
class TestAchievement(TestPlugin):
achievement = NeverUnlockedAchievement()
achievements = [achievement]
def test_achievement_is_loaded(self):
self.assert_(self.achievement in self.plugin.achievements)
def test_no_achievements_are_printed(self):
self.assert_("Ran 1 test" in self.output)
self.assert_("Achievement unlocked" not in self.output)
def test_announcement_returns_unlocked_string(self):
self.assertEqual(self.achievement.announcement(), unicode("""
/.–==*==–.\\
( | #| ) Achievement unlocked!
): ':(
`·…_…·´ Test Achievement
`H´ Test Subtitle
_.U._ Test Message
[_____]""", 'utf-8'))
class TestUnlockedAchievement(TestPlugin):
def setUp(self):
self.achievement = AlwaysUnlockedAchievement()
self.achievements = [self.achievement]
TestPlugin.setUp(self)
def test_achievement_is_printed(self):
self.assert_("""
/.–==*==–.\\
( | #| ) Achievement unlocked!
): ':(
`·…_…·´ Test Achievement
`H´ Test Subtitle
_.U._ Test Message
[_____]""" in self.output)
| bsd-3-clause |
hpcugent/easybuild-framework | easybuild/framework/easyconfig/constants.py | 1 | 2040 | #
# Copyright 2013-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
Easyconfig constants module that provides all constants that can
be used within an Easyconfig file.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import os
import platform
from vsc.utils import fancylogger
from easybuild.tools.systemtools import get_shared_lib_ext, get_os_name, get_os_type, get_os_version
_log = fancylogger.getLogger('easyconfig.constants', fname=False)
EXTERNAL_MODULE_MARKER = 'EXTERNAL_MODULE'
# constants that can be used in easyconfig
EASYCONFIG_CONSTANTS = {
'EXTERNAL_MODULE': (EXTERNAL_MODULE_MARKER, "External module marker"),
'HOME': (os.path.expanduser('~'), "Home directory ($HOME)"),
'OS_TYPE': (get_os_type(), "System type (e.g. 'Linux' or 'Darwin')"),
'OS_NAME': (get_os_name(), "System name (e.g. 'fedora' or 'RHEL')"),
'OS_VERSION': (get_os_version(), "System version"),
'SYS_PYTHON_VERSION': (platform.python_version(), "System Python version (platform.python_version())"),
}
| gpl-2.0 |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/05 citi/parseJSONdata.py | 26 | 6754 | import json;
import time;
from datetime import datetime, timedelta
from parseJSON import getSocialData
def parseData(negativeFileName, postiveFileName, neutralFileName, numNegativ, numPositive, numNeutral):
days_back = 30
date_days_ago = datetime.now() - timedelta(days=days_back)
day = datetime.today().day
day = 7
# array to track number of positive articles by day
ap = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
ap_social = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
ap_trump = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
ap_clinton = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
ap_election = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# array to track number of positive articles by day
an = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
an_social = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
an_trump = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
an_clinton = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
an_election = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# array to track number of positive articles by day
aa = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
aa_social = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
aa_trump = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
aa_clinton = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
aa_election = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
for x in range(1, numNegativ+1):
fileName = negativeFileName + str(x) + '.json'
print fileName
# Negative News Articles from 1 to 9 JSON files
with open(fileName) as json_data:
d = json.load(json_data)
rn = d["posts"]
# go through all posts
for post in rn:
p = post["published"]
m = int(p[5:7])
if m == 11:
d = int(p[8:10])
else:
d = int(p[8:10])+30
an[d-day] += 1
social_impact = getSocialData(post)
an_social[d-day] += social_impact
# Get text from news object
text = post["text"]
text = text.encode('ascii','ignore')
# Search for Trump
foundindx = text.find("Trump")
if foundindx > 0:
an_trump[d-day] += 1
# Search for Election
foundindx = text.find("election")
if foundindx > 0:
an_election[d-day] += 1
# Search for Election
foundindx = text.find("Clinton")
if foundindx > 0:
an_clinton[d-day] += 1
print "Found Trump: " + str(an_trump)
print "Found Election: " + str(an_election)
print "Found Election: " + str(an_clinton)
out0 = open('negative.txt', 'w')
out0.truncate()
for i in an:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('negative_social.txt', 'w')
out0.truncate()
for i in an_social:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('negative_election.txt', 'w')
out0.truncate()
for i in an_election:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('negative_trump.txt', 'w')
out0.truncate()
for i in an_trump:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('negative_clinton.txt', 'w')
out0.truncate()
for i in an_clinton:
out0.write(str(i))
out0.write("\n")
out0.close()
for x in range(1, numNeutral+1):
fileName = neutralFileName + str(x) + '.json'
print fileName
# calculate total number of neutral articles by day
with open(fileName) as json_data:
d = json.load(json_data)
ra = d["posts"]
for post in ra:
p = post["published"]
m = int(p[5:7])
if m == 11:
d = int(p[8:10])
else:
d = int(p[8:10])+30
aa[d-day] += 1
social_impact = getSocialData(post)
an_social[d-day] += social_impact
# Get text from news object
text = post["text"]
text = text.encode('ascii','ignore')
# Search for Trump
foundindx = text.find("Trump")
if foundindx > 0:
aa_trump[d-day] += 1
# Search for Election
foundindx = text.find("election")
if foundindx > 0:
aa_election[d-day] += 1
# Search for Election
foundindx = text.find("Clinton")
if foundindx > 0:
aa_clinton[d-day] += 1
print "Found Trump: " + str(aa_trump)
print "Found Election: " + str(aa_election)
print "Found Clinton: " + str(aa_clinton)
out0 = open('all.txt', 'w')
out0.truncate()
for i in aa:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('all_social.txt', 'w')
out0.truncate()
for i in aa_social:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('all_election.txt', 'w')
out0.truncate()
for i in aa_election:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('all_trump.txt', 'w')
out0.truncate()
for i in aa_trump:
out0.write(str(i))
out0.write("\n")
out0.close()
out0 = open('all_clinton.txt', 'w')
out0.truncate()
for i in aa_clinton:
out0.write(str(i))
out0.write("\n")
out0.close()
for x in range(1, numPositive+1):
fileName = postiveFileName + str(x) + '.json'
print fileName
# calculates number of positive articles per day, from the last 30 days
with open(fileName) as json_data:
d = json.load(json_data)
rp = d["posts"]
for post in rp:
p = post["published"]
m = int(p[5:7])
if m == 11:
d = int(p[8:10])
else:
d = int(p[8:10])+30
ap[d-day] += 1
social_impact = getSocialData(post)
an_social[d-day] += social_impact
# Get text from news object
text = post["text"]
text = text.encode('ascii','ignore')
# Search for Trump
foundindx = text.find("Trump")
if foundindx > 0:
ap_trump[d-day] += 1
# Search for Election
foundindx = text.find("election")
if foundindx > 0:
ap_election[d-day] += 1
# Search for Election
foundindx = text.find("Clinton")
if foundindx > 0:
ap_clinton[d-day] += 1
print "Found Trump: " + str(ap_trump)
print "Found Election: " + str(ap_election)
print "Found Clinton: " + str(ap_clinton)
out1 = open('positive.txt', 'w')
out1.truncate()
for i in ap:
out1.write(str(i))
out1.write("\n")
out1.close()
out1 = open('positive_social.txt', 'w')
out1.truncate()
for i in ap_social:
out1.write(str(i))
out1.write("\n")
out1.close()
out1 = open('positive_election.txt', 'w')
out1.truncate()
for i in ap_election:
out1.write(str(i))
out1.write("\n")
out1.close()
out1 = open('positive_trump.txt', 'w')
out1.truncate()
for i in ap_trump:
out1.write(str(i))
out1.write("\n")
out1.close()
out1 = open('positive_clinton.txt', 'w')
out1.truncate()
for i in ap_clinton:
out1.write(str(i))
out1.write("\n")
out1.close() | mit |
oskar456/youtube-dl | youtube_dl/extractor/periscope.py | 10 | 5763 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
unescapeHTML,
)
class PeriscopeBaseIE(InfoExtractor):
def _call_api(self, method, query, item_id):
return self._download_json(
'https://api.periscope.tv/api/v2/%s' % method,
item_id, query=query)
class PeriscopeIE(PeriscopeBaseIE):
IE_DESC = 'Periscope'
IE_NAME = 'periscope'
_VALID_URL = r'https?://(?:www\.)?(?:periscope|pscp)\.tv/[^/]+/(?P<id>[^/?#]+)'
# Alive example URLs can be found here http://onperiscope.com/
_TESTS = [{
'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==',
'md5': '65b57957972e503fcbbaeed8f4fa04ca',
'info_dict': {
'id': '56102209',
'ext': 'mp4',
'title': 'Bec Boop - 🚠✈️🇬🇧 Fly above #London in Emirates Air Line cable car at night 🇬🇧✈️🚠 #BoopScope 🎀💗',
'timestamp': 1438978559,
'upload_date': '20150807',
'uploader': 'Bec Boop',
'uploader_id': '1465763',
},
'skip': 'Expires in 24 hours',
}, {
'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv',
'only_matching': True,
}, {
'url': 'https://www.periscope.tv/bastaakanoggano/1OdKrlkZZjOJX',
'only_matching': True,
}, {
'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=([\'"])(?P<url>(?:https?:)?//(?:www\.)?(?:periscope|pscp)\.tv/(?:(?!\1).)+)\1', webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
token = self._match_id(url)
broadcast_data = self._call_api(
'getBroadcastPublic', {'broadcast_id': token}, token)
broadcast = broadcast_data['broadcast']
status = broadcast['status']
user = broadcast_data.get('user', {})
uploader = broadcast.get('user_display_name') or user.get('display_name')
uploader_id = (broadcast.get('username') or user.get('username') or
broadcast.get('user_id') or user.get('id'))
title = '%s - %s' % (uploader, status) if uploader else status
state = broadcast.get('state').lower()
if state == 'running':
title = self._live_title(title)
timestamp = parse_iso8601(broadcast.get('created_at'))
thumbnails = [{
'url': broadcast[image],
} for image in ('image_url', 'image_url_small') if broadcast.get(image)]
stream = self._call_api(
'getAccessPublic', {'broadcast_id': token}, token)
video_urls = set()
formats = []
for format_id in ('replay', 'rtmp', 'hls', 'https_hls', 'lhls', 'lhlsweb'):
video_url = stream.get(format_id + '_url')
if not video_url or video_url in video_urls:
continue
video_urls.add(video_url)
if format_id != 'rtmp':
formats.extend(self._extract_m3u8_formats(
video_url, token, 'mp4',
entry_protocol='m3u8_native'
if state in ('ended', 'timed_out') else 'm3u8',
m3u8_id=format_id, fatal=False))
continue
formats.append({
'url': video_url,
'ext': 'flv' if format_id == 'rtmp' else 'mp4',
})
self._sort_formats(formats)
return {
'id': broadcast.get('id') or token,
'title': title,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'thumbnails': thumbnails,
'formats': formats,
}
class PeriscopeUserIE(PeriscopeBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?:periscope|pscp)\.tv/(?P<id>[^/]+)/?$'
IE_DESC = 'Periscope user videos'
IE_NAME = 'periscope:user'
_TEST = {
'url': 'https://www.periscope.tv/LularoeHusbandMike/',
'info_dict': {
'id': 'LularoeHusbandMike',
'title': 'LULAROE HUSBAND MIKE',
'description': 'md5:6cf4ec8047768098da58e446e82c82f0',
},
# Periscope only shows videos in the last 24 hours, so it's possible to
# get 0 videos
'playlist_mincount': 0,
}
def _real_extract(self, url):
user_name = self._match_id(url)
webpage = self._download_webpage(url, user_name)
data_store = self._parse_json(
unescapeHTML(self._search_regex(
r'data-store=(["\'])(?P<data>.+?)\1',
webpage, 'data store', default='{}', group='data')),
user_name)
user = list(data_store['UserCache']['users'].values())[0]['user']
user_id = user['id']
session_id = data_store['SessionToken']['public']['broadcastHistory']['token']['session_id']
broadcasts = self._call_api(
'getUserBroadcastsPublic',
{'user_id': user_id, 'session_id': session_id},
user_name)['broadcasts']
broadcast_ids = [
broadcast['id'] for broadcast in broadcasts if broadcast.get('id')]
title = user.get('display_name') or user.get('username') or user_name
description = user.get('description')
entries = [
self.url_result(
'https://www.periscope.tv/%s/%s' % (user_name, broadcast_id))
for broadcast_id in broadcast_ids]
return self.playlist_result(entries, user_id, title, description)
| unlicense |
MountainWei/nova | nova/tests/functional/libvirt/test_numa_servers.py | 45 | 6782 | # Copyright (C) 2015 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova.tests.functional.test_servers import ServersTestBase
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NumaHostInfo(fakelibvirt.HostInfo):
def __init__(self, **kwargs):
super(NumaHostInfo, self).__init__(**kwargs)
self.numa_mempages_list = []
def get_numa_topology(self):
if self.numa_topology:
return self.numa_topology
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
self.cpu_cores, self.cpu_threads,
self.kB_mem)
self.numa_topology = topology
# update number of active cpus
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
self.cpus = cpu_count - len(self.disabled_cpus_list)
return topology
def set_custom_numa_toplogy(self, topology):
self.numa_topology = topology
class NUMAServersTest(ServersTestBase):
def setUp(self):
super(NUMAServersTest, self).setUp()
# Replace libvirt with fakelibvirt
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
def _setup_compute_service(self):
pass
def _setup_scheduler_service(self):
self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
self.flags(scheduler_driver='nova.scheduler.'
'filter_scheduler.FilterScheduler')
self.flags(scheduler_default_filters=CONF.scheduler_default_filters
+ ['NUMATopologyFilter'])
return self.start_service('scheduler')
def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'):
self.compute = self.start_service('compute', host='test_compute0')
fake_network.set_stub_network_methods(self.stubs)
# Create server
good_server = self._build_server(flavor_id)
post = {'server': good_server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Validate that the server has been created
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
# Validate that NUMATopologyFilter has been called
self.assertTrue(filter_mock.called)
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual(end_status, found_server['status'])
self._delete_server(created_server_id)
def _get_topology_filter_spy(self):
host_manager = self.scheduler.manager.driver.host_manager
numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
return host_pass_mock
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_topology(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
cpu_threads=2, kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with contextlib.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_fails(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2,
kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with contextlib.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock, end_status='ERROR')
| apache-2.0 |
gunzy83/ansible-modules-extras | cloud/cloudstack/cs_vmsnapshot.py | 44 | 8582 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_vmsnapshot
short_description: Manages VM snapshots on Apache CloudStack based clouds.
description:
- Create, remove and revert VM from snapshots.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Unique Name of the snapshot. In CloudStack terms display name.
required: true
aliases: ['display_name']
vm:
description:
- Name of the virtual machine.
required: true
description:
description:
- Description of the snapshot.
required: false
default: null
snapshot_memory:
description:
- Snapshot memory if set to true.
required: false
default: false
zone:
description:
- Name of the zone in which the VM is in. If not set, default zone is used.
required: false
default: null
project:
description:
- Name of the project the VM is assigned to.
required: false
default: null
state:
description:
- State of the snapshot.
required: false
default: 'present'
choices: [ 'present', 'absent', 'revert' ]
domain:
description:
- Domain the VM snapshot is related to.
required: false
default: null
account:
description:
- Account the VM snapshot is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a VM snapshot of disk and memory before an upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
snapshot_memory: yes
# Revert a VM to a snapshot after a failed upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: revert
# Remove a VM snapshot after successful upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the snapshot.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the snapshot.
returned: success
type: string
sample: snapshot before update
display_name:
description: Display name of the snapshot.
returned: success
type: string
sample: snapshot before update
created:
description: date of the snapshot.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
current:
description: true if snapshot is current
returned: success
type: boolean
sample: True
state:
description: state of the vm snapshot
returned: success
type: string
sample: Allocated
type:
description: type of vm snapshot
returned: success
type: string
sample: DiskAndMemory
description:
description: description of vm snapshot
returned: success
type: string
sample: snapshot brought to you by Ansible
domain:
description: Domain the the vm snapshot is related to.
returned: success
type: string
sample: example domain
account:
description: Account the vm snapshot is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the vm snapshot is related to.
returned: success
type: string
sample: Production
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVmSnapshot, self).__init__(module)
self.returns = {
'type': 'type',
'current': 'current',
}
def get_snapshot(self):
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
args['projectid'] = self.get_project('id')
args['name'] = self.module.params.get('name')
snapshots = self.cs.listVMSnapshot(**args)
if snapshots:
return snapshots['vmSnapshot'][0]
return None
def create_snapshot(self):
snapshot = self.get_snapshot()
if not snapshot:
self.result['changed'] = True
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['name'] = self.module.params.get('name')
args['description'] = self.module.params.get('description')
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
if not self.module.check_mode:
res = self.cs.createVMSnapshot(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
snapshot = self.poll_job(res, 'vmsnapshot')
return snapshot
def remove_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
def revert_vm_to_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if snapshot['state'] != "Ready":
self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
if not self.module.check_mode:
res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
self.module.fail_json(msg="snapshot not found, could not revert VM")
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True, aliases=['display_name']),
vm = dict(required=True),
description = dict(default=None),
zone = dict(default=None),
snapshot_memory = dict(type='bool', default=False),
state = dict(choices=['present', 'absent', 'revert'], default='present'),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['icmp_type', 'icmp_code'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
try:
acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
state = module.params.get('state')
if state in ['revert']:
snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
elif state in ['absent']:
snapshot = acs_vmsnapshot.remove_snapshot()
else:
snapshot = acs_vmsnapshot.create_snapshot()
result = acs_vmsnapshot.get_result(snapshot)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
laurenweinstein1/ardupilot-lw | mk/PX4/Tools/genmsg/src/genmsg/gentools.py | 214 | 6644 | #! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for supporting message and service generation for all ROS
client libraries. This is mainly responsible for calculating the
md5sums and message definitions of classes.
"""
# NOTE: this should not contain any rospy-specific code. The rospy
# generator library is rospy.genpy.
import sys
import hashlib
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . import msgs
from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin
from .msg_loader import load_depends
from .srvs import SrvSpec
from . import names
from . import base
def compute_md5_text(msg_context, spec):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
:returns: text for ROS MD5-processing, ``str``
"""
package = spec.package
buff = StringIO()
for c in spec.constants:
buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
msg_type = bare_msg_type(type_)
# md5 spec strips package names
if is_builtin(msg_type):
buff.write("%s %s\n"%(type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
sub_pkg, _ = names.package_resource_name(msg_type)
sub_pkg = sub_pkg or package
sub_spec = msg_context.get_registered(msg_type)
sub_md5 = compute_md5(msg_context, sub_spec)
buff.write("%s %s\n"%(sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(msg_context, spec, hash):
"""
subroutine of compute_md5()
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute hash for.
:param hash: hash instance
"""
# accumulate the hash
# - root file
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(msg_context, spec).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(msg_context, spec.request).encode())
hash.update(compute_md5_text(msg_context, spec.response).encode())
else:
raise Exception("[%s] is not a message or service"%spec)
return hash.hexdigest()
def compute_md5(msg_context, spec):
"""
Compute md5 hash for message/service
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute md5 for.
:returns: md5 hash, ``str``
"""
return _compute_hash(msg_context, spec, hashlib.md5())
## alias
compute_md5_v2 = compute_md5
def _unique_deps(dep_list):
uniques = []
for d in dep_list:
if d not in uniques:
uniques.append(d)
return uniques
def compute_full_text(msg_context, spec):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute full text for.
:returns: concatenated text for msg/srv file and embedded msg/srv types, ``str``
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(spec.text)
buff.write('\n')
# append the text of the dependencies (embedded types). Can't use set() as we have to preserve order.
for d in _unique_deps(msg_context.get_all_depends(spec.full_name)):
buff.write(sep)
buff.write("MSG: %s\n"%d)
buff.write(msg_context.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def compute_full_type_name(package_name, file_name):
"""
Compute the full type name of message/service 'pkg/type'.
:param package_name: name of package file is in, ``str``
:file_name: name of the msg og srv file, ``str``
:returns: typename in format 'pkg/type'
:raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension
"""
# strip extension
for ext in (base.EXT_MSG, base.EXT_SRV):
if file_name.endswith(ext):
short_name = file_name[:-len(ext)]
break
else:
raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name))
return "%s/%s"%(package_name, short_name)
| gpl-3.0 |
2uller/LotF | App/Lib/test/test_bsddb.py | 10 | 12031 | #! /usr/bin/env python
"""Test script for the bsddb C module by Roger E. Masse
Adapted to unittest format and expanded scope by Raymond Hettinger
"""
import os, sys
import unittest
from test import test_support
# Skip test if _bsddb wasn't built.
test_support.import_module('_bsddb')
bsddb = test_support.import_module('bsddb', deprecated=True)
# Just so we know it's imported:
test_support.import_module('dbhash', deprecated=True)
class TestBSDDB(unittest.TestCase):
openflag = 'c'
def setUp(self):
self.f = self.openmethod[0](self.fname, self.openflag, cachesize=32768)
self.d = dict(q='Guido', w='van', e='Rossum', r='invented', t='Python', y='')
for k, v in self.d.iteritems():
self.f[k] = v
def tearDown(self):
self.f.sync()
self.f.close()
if self.fname is None:
return
try:
os.remove(self.fname)
except os.error:
pass
def test_getitem(self):
for k, v in self.d.iteritems():
self.assertEqual(self.f[k], v)
def test_len(self):
self.assertEqual(len(self.f), len(self.d))
def test_change(self):
self.f['r'] = 'discovered'
self.assertEqual(self.f['r'], 'discovered')
self.assertIn('r', self.f.keys())
self.assertIn('discovered', self.f.values())
def test_close_and_reopen(self):
if self.fname is None:
# if we're using an in-memory only db, we can't reopen it
# so finish here.
return
self.f.close()
self.f = self.openmethod[0](self.fname, 'w')
for k, v in self.d.iteritems():
self.assertEqual(self.f[k], v)
def assertSetEquals(self, seqn1, seqn2):
self.assertEqual(set(seqn1), set(seqn2))
def test_mapping_iteration_methods(self):
f = self.f
d = self.d
self.assertSetEquals(d, f)
self.assertSetEquals(d.keys(), f.keys())
self.assertSetEquals(d.values(), f.values())
self.assertSetEquals(d.items(), f.items())
self.assertSetEquals(d.iterkeys(), f.iterkeys())
self.assertSetEquals(d.itervalues(), f.itervalues())
self.assertSetEquals(d.iteritems(), f.iteritems())
def test_iter_while_modifying_values(self):
di = iter(self.d)
while 1:
try:
key = di.next()
self.d[key] = 'modified '+key
except StopIteration:
break
# it should behave the same as a dict. modifying values
# of existing keys should not break iteration. (adding
# or removing keys should)
loops_left = len(self.f)
fi = iter(self.f)
while 1:
try:
key = fi.next()
self.f[key] = 'modified '+key
loops_left -= 1
except StopIteration:
break
self.assertEqual(loops_left, 0)
self.test_mapping_iteration_methods()
def test_iter_abort_on_changed_size(self):
def DictIterAbort():
di = iter(self.d)
while 1:
try:
di.next()
self.d['newkey'] = 'SPAM'
except StopIteration:
break
self.assertRaises(RuntimeError, DictIterAbort)
def DbIterAbort():
fi = iter(self.f)
while 1:
try:
fi.next()
self.f['newkey'] = 'SPAM'
except StopIteration:
break
self.assertRaises(RuntimeError, DbIterAbort)
def test_iteritems_abort_on_changed_size(self):
def DictIteritemsAbort():
di = self.d.iteritems()
while 1:
try:
di.next()
self.d['newkey'] = 'SPAM'
except StopIteration:
break
self.assertRaises(RuntimeError, DictIteritemsAbort)
def DbIteritemsAbort():
fi = self.f.iteritems()
while 1:
try:
key, value = fi.next()
del self.f[key]
except StopIteration:
break
self.assertRaises(RuntimeError, DbIteritemsAbort)
def test_iteritems_while_modifying_values(self):
di = self.d.iteritems()
while 1:
try:
k, v = di.next()
self.d[k] = 'modified '+v
except StopIteration:
break
# it should behave the same as a dict. modifying values
# of existing keys should not break iteration. (adding
# or removing keys should)
loops_left = len(self.f)
fi = self.f.iteritems()
while 1:
try:
k, v = fi.next()
self.f[k] = 'modified '+v
loops_left -= 1
except StopIteration:
break
self.assertEqual(loops_left, 0)
self.test_mapping_iteration_methods()
def test_first_next_looping(self):
items = [self.f.first()]
for i in xrange(1, len(self.f)):
items.append(self.f.next())
self.assertSetEquals(items, self.d.items())
def test_previous_last_looping(self):
items = [self.f.last()]
for i in xrange(1, len(self.f)):
items.append(self.f.previous())
self.assertSetEquals(items, self.d.items())
def test_first_while_deleting(self):
# Test for bug 1725856
self.assertTrue(len(self.d) >= 2, "test requires >=2 items")
for _ in self.d:
key = self.f.first()[0]
del self.f[key]
self.assertEqual([], self.f.items(), "expected empty db after test")
def test_last_while_deleting(self):
# Test for bug 1725856's evil twin
self.assertTrue(len(self.d) >= 2, "test requires >=2 items")
for _ in self.d:
key = self.f.last()[0]
del self.f[key]
self.assertEqual([], self.f.items(), "expected empty db after test")
def test_set_location(self):
self.assertEqual(self.f.set_location('e'), ('e', self.d['e']))
def test_contains(self):
for k in self.d:
self.assertIn(k, self.f)
self.assertNotIn('not here', self.f)
def test_has_key(self):
for k in self.d:
self.assertTrue(self.f.has_key(k))
self.assertTrue(not self.f.has_key('not here'))
def test_clear(self):
self.f.clear()
self.assertEqual(len(self.f), 0)
def test__no_deadlock_first(self, debug=0):
# do this so that testers can see what function we're in in
# verbose mode when we deadlock.
sys.stdout.flush()
# in pybsddb's _DBWithCursor this causes an internal DBCursor
# object is created. Other test_ methods in this class could
# inadvertently cause the deadlock but an explicit test is needed.
if debug: print "A"
k,v = self.f.first()
if debug: print "B", k
self.f[k] = "deadlock. do not pass go. do not collect $200."
if debug: print "C"
# if the bsddb implementation leaves the DBCursor open during
# the database write and locking+threading support is enabled
# the cursor's read lock will deadlock the write lock request..
# test the iterator interface
if True:
if debug: print "D"
i = self.f.iteritems()
k,v = i.next()
if debug: print "E"
self.f[k] = "please don't deadlock"
if debug: print "F"
while 1:
try:
k,v = i.next()
except StopIteration:
break
if debug: print "F2"
i = iter(self.f)
if debug: print "G"
while i:
try:
if debug: print "H"
k = i.next()
if debug: print "I"
self.f[k] = "deadlocks-r-us"
if debug: print "J"
except StopIteration:
i = None
if debug: print "K"
# test the legacy cursor interface mixed with writes
self.assertIn(self.f.first()[0], self.d)
k = self.f.next()[0]
self.assertIn(k, self.d)
self.f[k] = "be gone with ye deadlocks"
self.assertTrue(self.f[k], "be gone with ye deadlocks")
def test_for_cursor_memleak(self):
# do the bsddb._DBWithCursor iterator internals leak cursors?
nc1 = len(self.f._cursor_refs)
# create iterator
i = self.f.iteritems()
nc2 = len(self.f._cursor_refs)
# use the iterator (should run to the first yield, creating the cursor)
k, v = i.next()
nc3 = len(self.f._cursor_refs)
# destroy the iterator; this should cause the weakref callback
# to remove the cursor object from self.f._cursor_refs
del i
nc4 = len(self.f._cursor_refs)
self.assertEqual(nc1, nc2)
self.assertEqual(nc1, nc4)
self.assertTrue(nc3 == nc1+1)
def test_popitem(self):
k, v = self.f.popitem()
self.assertIn(k, self.d)
self.assertIn(v, self.d.values())
self.assertNotIn(k, self.f)
self.assertEqual(len(self.d)-1, len(self.f))
def test_pop(self):
k = 'w'
v = self.f.pop(k)
self.assertEqual(v, self.d[k])
self.assertNotIn(k, self.f)
self.assertNotIn(v, self.f.values())
self.assertEqual(len(self.d)-1, len(self.f))
def test_get(self):
self.assertEqual(self.f.get('NotHere'), None)
self.assertEqual(self.f.get('NotHere', 'Default'), 'Default')
self.assertEqual(self.f.get('q', 'Default'), self.d['q'])
def test_setdefault(self):
self.assertEqual(self.f.setdefault('new', 'dog'), 'dog')
self.assertEqual(self.f.setdefault('r', 'cat'), self.d['r'])
def test_update(self):
new = dict(y='life', u='of', i='brian')
self.f.update(new)
self.d.update(new)
for k, v in self.d.iteritems():
self.assertEqual(self.f[k], v)
def test_keyordering(self):
if self.openmethod[0] is not bsddb.btopen:
return
keys = self.d.keys()
keys.sort()
self.assertEqual(self.f.first()[0], keys[0])
self.assertEqual(self.f.next()[0], keys[1])
self.assertEqual(self.f.last()[0], keys[-1])
self.assertEqual(self.f.previous()[0], keys[-2])
self.assertEqual(list(self.f), keys)
class TestBTree(TestBSDDB):
fname = test_support.TESTFN
openmethod = [bsddb.btopen]
class TestBTree_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.btopen]
class TestBTree_InMemory_Truncate(TestBSDDB):
fname = None
openflag = 'n'
openmethod = [bsddb.btopen]
class TestHashTable(TestBSDDB):
fname = test_support.TESTFN
openmethod = [bsddb.hashopen]
class TestHashTable_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.hashopen]
## # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85
## # appears broken... at least on
## # Solaris Intel - rmasse 1/97
def test_main(verbose=None):
test_support.run_unittest(
TestBTree,
TestHashTable,
TestBTree_InMemory,
TestHashTable_InMemory,
TestBTree_InMemory_Truncate,
)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-2.0 |
hdinsight/hue | desktop/core/ext-py/ndg_httpsclient-0.4.0/setup.py | 22 | 4836 | try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
_long_description = '''
This is a HTTPS client implementation for httplib and urllib2 based on
PyOpenSSL. PyOpenSSL provides a more fully featured SSL implementation over the
default provided with Python and importantly enables full verification of the
SSL peer.
Releases
========
0.4.0
-----
* Made dual compatible with Python 2 / 3.
0.3.3
-----
* Fix to add in AnotherName for ``subjectAltNames`` field - added for support for CACert issued
certs (thanks to Gu1).
* Fix to HTTP Basic Auth option for ``ndg.httpsclient.utils.main``
* Fix to ``ServerSSLCertVerification`` so that it can pass a function-based callback instead of using ``__call__``. In newer versions of OpenSSL (>= 0.14) the latter failed because of a request for ``__name__`` attribute.
0.3.2
-----
* Fix to SubjectAltNames support check - should only be enabled if pyasn1 is
installed.
* Fix to open_url: HTTP Request object was being created inside if headers is
None block - now corrected to create regardless.
* Added http basic auth support to script. (Thanks to Willem van Engen)
0.3.1
-----
* extended utils functions to support keyword for passing additional urllib2
handlers.
0.3.0
-----
* Added ndg.httpsclient.utils.fetch_stream_from_url function and added
parameter for data to post in open_url and fetch_* methods.
* fix to ndg.httpsclient.utils module _should_use_proxy and open_url functions
0.2.0
-----
* added support for SSL verification with subjectAltNames using pyasn1
* fixed minor bug - SSL cert DN prefix matching
0.1.0
-----
Initial release
Prerequisites
=============
This has been developed and tested for Python 2.6 and 2.7 with pyOpenSSL 0.13 and 0.14.
Version 0.4.0 tested with pyOpenSSL 0.15.1 and Python 2.7 and 3.4. Note that proxy support
is only available from Python 2.6.2 onwards. pyasn1 is required for correct SSL
verification with subjectAltNames.
Installation
============
Installation can be performed using easy_install or pip.
Running ndg_httpclient
======================
A simple script for fetching data using HTTP or HTTPS GET from a specified URL.
Parameter:
``url``
The URL of the resource to be fetched
Options:
``-h, --help``
Show help message and exit.
``-c FILE, --certificate=FILE``
Certificate file - defaults to ``$HOME/credentials.pem``
``-k FILE, --private-key=FILE``
Private key file - defaults to the certificate file
``-t DIR, --ca-certificate-dir=DIR``
Trusted CA certificate file directory.
``-d, --debug``
Print debug information - this may be useful in solving problems with HTTP or
HTTPS access to a server.
``-p FILE, --post-data-file=FILE``
POST data file
``-f FILE, --fetch=FILE``
Output file
``-n, --no-verify-peer``
Skip verification of peer certificate.
'''
setup(
name='ndg_httpsclient',
version="0.4.0",
description='Provides enhanced HTTPS support for httplib and urllib2 using '
'PyOpenSSL',
author='Richard Wilkinson and Philip Kershaw',
author_email='Philip.Kershaw@stfc.ac.uk',
url='https://github.com/cedadev/ndg_httpsclient/',
long_description=_long_description,
license='BSD - See LICENCE file for details',
namespace_packages=['ndg'],
packages=find_packages(),
package_dir={'ndg.httpsclient': 'ndg/httpsclient'},
package_data={
'ndg.httpsclient': [
'test/README',
'test/scripts/*.sh',
'test/pki/localhost.*',
'test/pki/ca/*.0'
],
},
install_requires = ['PyOpenSSL'],
extras_require = {'subjectAltName_support': 'pyasn1'},
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Security',
'Topic :: Internet',
'Topic :: Scientific/Engineering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Systems Administration :: Authentication/Directory',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe = False,
entry_points = {
'console_scripts': ['ndg_httpclient = ndg.httpsclient.utils:main',
],
}
)
| apache-2.0 |
id0tfqh/statistic | check-ip.py | 1 | 1236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
pattern = r'^[0-9.]+$'
try:
addr = raw_input('input ip address: \n')
if re.match(pattern, addr):
pass
else:
print('A incorrect address')
addr = '192.168.1.1'
except ValueError: print "A incorrect address"
command = 'ipset -L | grep '
command = command + addr
os.system('ssh -24xCt tech@base221 sudo {0}'.format(command))
os.system('ssh -24xCt tech@base222 sudo {0}'.format(command))
os.system('ssh -24xCt tech@base241 sudo {0}'.format(command))
def check(command):
os.system('ssh -24xCt denis@{0} {1} {2}'.format('echo21','sudo',command))
p1 = threading.Thread(target=check, name="t1", args=command)
p1.start()
#def check_ip(server, command):
# os.system('ssh -24xCt denis@{0} {1} {2}'.format(server,'sudo',command))
#check1 = threading.Thread(target=check_ip, args = ('echo21', command))
#check1 = threading.Thread(target=check_ip, args = ('echo22', command))
#check1 = threading.Thread(target=check_ip, args = ('echo41', command))
#check1 = threading.Thread(target=check_ip, args = ('delta91', command))
#check1 = threading.Thread(target=check_ip, args = ('delta92', command))
#check1 = threading.Thread(target=check_ip, args = ('delta8', command))
| gpl-3.0 |
ojake/django | tests/fixtures_model_package/tests.py | 312 | 2204 | from __future__ import unicode_literals
import warnings
from django.core import management
from django.test import TestCase
from .models import Article
class SampleTestCase(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline
)
class FixtureTestCase(TestCase):
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "fixture1.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Time to reform copyright",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "fixture2.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
management.call_command("loaddata", "unknown.json", verbosity=0)
self.assertEqual(len(w), 1)
self.assertTrue(w[0].message, "No fixture named 'unknown' found.")
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
| bsd-3-clause |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/Lib/test/test_bufio.py | 64 | 2654 | import unittest
from test import support
import io # C implementation.
import _pyio as pyio # Python implementation.
# Simple test to ensure that optimizations in the IO library deliver the
# expected results. For best testing, run this under a debug-build Python too
# (to exercise asserts in the C code).
lengths = list(range(1, 257)) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
16384, 32768, 65536, 1000000]
class BufferSizeTest(unittest.TestCase):
def try_one(self, s):
# Write s + "\n" + s to file, then open it and ensure that successive
# .readline()s deliver what we wrote.
# Ensure we can open TESTFN for writing.
support.unlink(support.TESTFN)
# Since C doesn't guarantee we can write/read arbitrary bytes in text
# files, use binary mode.
f = self.open(support.TESTFN, "wb")
try:
# write once with \n and once without
f.write(s)
f.write(b"\n")
f.write(s)
f.close()
f = open(support.TESTFN, "rb")
line = f.readline()
self.assertEqual(line, s + b"\n")
line = f.readline()
self.assertEqual(line, s)
line = f.readline()
self.assertTrue(not line) # Must be at EOF
f.close()
finally:
support.unlink(support.TESTFN)
def drive_one(self, pattern):
for length in lengths:
# Repeat string 'pattern' as often as needed to reach total length
# 'length'. Then call try_one with that string, a string one larger
# than that, and a string one smaller than that. Try this with all
# small sizes and various powers of 2, so we exercise all likely
# stdio buffer sizes, and "off by one" errors on both sides.
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
self.assertEqual(len(teststring), length)
self.try_one(teststring)
self.try_one(teststring + b"x")
self.try_one(teststring[:-1])
def test_primepat(self):
# A pattern with prime length, to avoid simple relationships with
# stdio buffer sizes.
self.drive_one(b"1234567890\00\01\02\03\04\05\06")
def test_nullpat(self):
self.drive_one(bytes(1000))
class CBufferSizeTest(BufferSizeTest):
open = io.open
class PyBufferSizeTest(BufferSizeTest):
open = staticmethod(pyio.open)
def test_main():
support.run_unittest(CBufferSizeTest, PyBufferSizeTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
TylerTemp/tomorrow | lib/hdlr/tomorrow/blog/edit.py | 1 | 2586 | import tornado.web
import logging
import json
try:
from urllib.parse import unquote
from urllib.parse import quote
except ImportError:
from urllib import unquote
from urllib import quote
from .base import BaseHandler
from ..base import EnsureUser
from lib.db.tomorrow import Article, User
class EditHandler(BaseHandler):
logger = logging.getLogger('tomorrow.blog.edit')
@EnsureUser(level=User.ROOT, active=True)
def get(self, urlslug=None):
if self.get_argument('test', False):
return self.check_slug(self.get_argument('slug'), urlslug)
user = self.current_user
source = self.get_argument('source', 'zh')
if urlslug is not None:
article = Article(urlslug, source)
if not article:
raise tornado.web.HTTPError(404, '%s not found' % urlslug)
else:
article = Article(urlslug, source)
return self.render(
'tomorrow/blog/edit.html',
article=article,
user=user
)
def check_slug(self, slug, this_slug):
if slug == this_slug:
result = -1
else:
result = int(not bool(Article(slug)))
return self.write(str(result))
@EnsureUser(level=User.ROOT, active=True)
def post(self, urlslug=None):
title = self.get_argument('title')
slug = self.get_argument('slug')
content = self.get_argument('content')
tag = []
for each_tag in self.get_argument('tag', '').split(','):
each = each_tag.strip()
if each and each not in tag:
tag.append(each)
description = self.get_argument('description', None)
lang = self.get_argument('language', 'zh')
assert lang in ('zh', 'en')
if description is not None:
description = description.strip() or None
if urlslug:
article = Article(unquote(urlslug), lang=lang)
if not article:
raise tornado.web.HTTPError(404, '%s not found' % urlslug)
else:
article = Article(lang=lang)
article.slug = slug
article.title = title
article.content = content
article.description = description
article.author = self.current_user.name
article.tag = tag
article.banner = self.get_argument('banner', None)
article.cover = self.get_argument('cover', None)
article.save()
return self.write(json.dumps(
{'error': 0, 'redirect': '/blog/%s/' % quote(slug)}))
| gpl-3.0 |
sexroute/commandergenius | project/jni/python/src/Doc/includes/minidom-example.py | 37 | 1564 | import xml.dom.minidom
document = """\
<slideshow>
<title>Demo slideshow</title>
<slide><title>Slide title</title>
<point>This is a demo</point>
<point>Of a program for processing slides</point>
</slide>
<slide><title>Another demo slide</title>
<point>It is important</point>
<point>To have more than</point>
<point>one slide</point>
</slide>
</slideshow>
"""
dom = xml.dom.minidom.parseString(document)
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def handleSlideshow(slideshow):
print "<html>"
handleSlideshowTitle(slideshow.getElementsByTagName("title")[0])
slides = slideshow.getElementsByTagName("slide")
handleToc(slides)
handleSlides(slides)
print "</html>"
def handleSlides(slides):
for slide in slides:
handleSlide(slide)
def handleSlide(slide):
handleSlideTitle(slide.getElementsByTagName("title")[0])
handlePoints(slide.getElementsByTagName("point"))
def handleSlideshowTitle(title):
print "<title>%s</title>" % getText(title.childNodes)
def handleSlideTitle(title):
print "<h2>%s</h2>" % getText(title.childNodes)
def handlePoints(points):
print "<ul>"
for point in points:
handlePoint(point)
print "</ul>"
def handlePoint(point):
print "<li>%s</li>" % getText(point.childNodes)
def handleToc(slides):
for slide in slides:
title = slide.getElementsByTagName("title")[0]
print "<p>%s</p>" % getText(title.childNodes)
handleSlideshow(dom)
| lgpl-2.1 |
istellartech/OpenGoddard | examples/11_Polar_TSTO_Taiki.py | 1 | 19117 | # -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Rocket:
# Atmosphere Parameter
# Use US Standard Atmosphere 1976
stdAtmo = np.loadtxt("./11_Polar_TSTO_Taiki/US_standard_atmosphere.csv",delimiter=",",skiprows=2)
stdAltitude = stdAtmo[:,0] * 1000.0 #converted to km -> m
stdPressure= stdAtmo[:,2] # [Pa]
stdDensity= stdAtmo[:,3] # [kg/m3]
stdSoundSpeed = stdAtmo[:,4] # [m/s]
# 線形補完用
# 高度範囲外(<0, 86<)はfill_valueが外挿
airPressure = interpolate.interp1d(stdAltitude, stdPressure, bounds_error = False, fill_value = (stdPressure[0], 0.0))
airDensity = interpolate.interp1d(stdAltitude, stdDensity, bounds_error = False, fill_value = (stdDensity[0], 0.0))
airSound = interpolate.interp1d(stdAltitude, stdSoundSpeed, bounds_error = False, fill_value = (stdSoundSpeed[0], stdSoundSpeed[-1]))
# Drag Coefficient
CdLog = np.loadtxt("./11_Polar_TSTO_Taiki/Cd.csv", delimiter=",", skiprows=1)
Cd = interpolate.interp1d(CdLog[:,0], CdLog[:,1],fill_value="extrapolate")
def __init__(self):
# Earth Parameter
self.GMe = 3.986004418 * 10**14 # Earth gravitational constant [m^3/s^2]
self.Re = 6371.0 * 1000 # Earth Radius [m]
self.g0 = 9.80665 # Gravitational acceleration on Earth surface [m/s^2]
# Target Parameter
self.Htarget = 561.0 * 1000 # Altitude [m]
self.Rtarget = self.Re + self.Htarget # Orbit Radius [m]
self.Vtarget = np.sqrt(self.GMe / self.Rtarget) # [m/s]
# Launch Site Parameter
self.lat_taiki = 42.506167 # [deg]
self.Vt_equator = 1674.36 # [km/h]
self.Vt_taiki = self.Vt_equator * np.cos(self.lat_taiki * np.pi / 180.0) * 1000.0 / 3600.0 # Radial Velocity of Earth Surface [m/s]
self.inclination = 96.7 # [deg]
self.V0 = self.Vt_taiki * np.cos(-self.inclination * np.pi / 180.0) # [m/s]
self.H0 = 10.0 # Initial Altitude [m]
# Structure Parameter
# Mdryがパラメータ
self.Mdry = [1300.0, 220.0] # Dry Mass [kg], [1st stage, 2nd stage]
self.beta = [10.0, 15.0] # Structure Efficienty [%], [1st stage, 2nd stage]
self.Mpayload = 100.0 # Payload Mass [kg]
self.M0 = [self.Mdry[0] / self.beta[0] * 100.0, self.Mdry[1] / self.beta[1] * 100.0] # Initial Stage Mass [kg], [1st stage, 2nd stage]
self.Mp = [self.M0[0] - self.Mdry[0], self.M0[1] - self.Mdry[1]] # Propellant Mass [kg], [1st stage, 2nd stage]
self.M0[1] = self.M0[1] + self.Mpayload
self.Minit = self.M0[0] + self.M0[1] # Initial Total Mass [kg]
self.d = [1.8, 1.8] # Diameter [m], [1st stage, 2nd stage]
self.A = [0.25 * self.d[0] ** 2 * np.pi, 0.25 * self.d[1] ** 2 * np.pi] # Projected Area [m^2], [1st stage, 2nd stage]
# Engine Parameter
self.Cluster = 9
self.Isp = [261.0 + 0.0, 322.0 + 0.0] # Specific Impulse [s], [1st stage at SL, 2nd stage at vac]
self.dth = [53.9, 53.9] # Throat Diameter [mm], [1st stage, 2nd stage]
self.Ath = [0.25 * (self.dth[0] / 1000.0) ** 2 * np.pi, 0.25 * (self.dth[1] / 1000.0) ** 2 * np.pi] # Throat Area [m^2], [1st stage, 2nd stage]
self.AR = [20.0, 140.0] # Area Ratio, [1st stage, 2nd stage]
self.Ae = [self.Ath[0] * self.AR[0] * self.Cluster, self.Ath[1] * self.AR[1]] # Exit Area [m^2], [1st stage, 2nd stage]
# =======
self.ThrustMax = [33.3, 4.2] # Maximum Thrust [ton], [1st stage at SL, 2nd stage at vac]
self.ThrustMax = [self.ThrustMax[0] * self.g0 * 1000.0, self.ThrustMax[1] * self.g0 * 1000.0] # [N]
# self.ThrustLevel = 1.8 # [G] M0[0] * n G
# self.ThrustMax = [self.M0[0] * self.ThrustLevel * self.g0, self.M0[0] * self.ThrustLevel / self.Cluster * self.g0 + self.airPressure(self.Htarget) * self.Ae[1]] # Maximum Thrust [N], [1st stage at SL, 2nd stage at vac]
# =======
self.refMdot = [self.ThrustMax[0] / (self.Isp[0] * self.g0), self.ThrustMax[1] / (self.Isp[1] * self.g0)] # Isp補正用参照値
self.MaxQ = 500000.0 # Pa
self.MaxG = 20.0 # G
def dynamics(prob, obj, section):
R = prob.states(0, section) # Orbit Radius [m]
theta = prob.states(1, section) #
Vr = prob.states(2, section)
Vt = prob.states(3, section)
m = prob.states(4, section)
Tr = prob.controls(0, section)
Tt = prob.controls(1, section)
g0 = obj.g0
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
dThrust = [(obj.airPressure(obj.H0) - obj.airPressure(R - obj.Re)) * obj.Ae[0], obj.airPressure(R - obj.Re) * obj.Ae[1]]
Isp = obj.Isp[section] + dThrust[section] / (obj.refMdot[section] * g0)
# US standard atmosphereだと86 km以降はrho = 0でDrag = 0
Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[section] # [N]
Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[section] # [N]
dx = Dynamics(prob, section)
dx[0] = Vr
dx[1] = Vt / R
dx[2] = Tr / m - Dr / m - g + Vt**2 / R
dx[3] = Tt / m - Dt / m - (Vr * Vt) / R
dx[4] = - np.sqrt(Tr**2 + Tt**2) / (Isp * g0)
return dx()
def equality(prob, obj):
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
tf = prob.time_final(-1)
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
theta0 = prob.states(1, 0)
theta1 = prob.states(1, 1)
Vr0 = prob.states(2, 0)
Vr1 = prob.states(2, 1)
Vt0 = prob.states(3, 0)
Vt1 = prob.states(3, 1)
m0 = prob.states(4, 0)
m1 = prob.states(4, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
unit_R = prob.unit_states[0][0]
unit_V = prob.unit_states[0][2]
unit_m = prob.unit_states[0][4]
result = Condition()
# event condition
result.equal(R0[0], obj.Re + obj.H0, unit=unit_R) # 初期地表
result.equal(theta0[0], 0.0)
result.equal(Vr0[0], 0.0, unit=unit_V)
result.equal(Vt0[0], obj.V0 , unit=unit_V)
result.equal(m0[0], obj.Minit, unit=unit_m) # (1st stage and 2nd stage and Payload) initial
# knotting condition
result.equal(m1[0], obj.M0[1], unit=unit_m) # (2nd stage + Payload) initial
result.equal(R1[0], R0[-1], unit=unit_R)
result.equal(theta1[0], theta0[-1])
result.equal(Vr1[0], Vr0[-1], unit=unit_V)
result.equal(Vt1[0], Vt0[-1], unit=unit_V)
# Target Condition
result.equal(R1[-1], obj.Rtarget, unit=unit_R) # Radius
result.equal(Vr[-1], 0.0, unit=unit_V) # Radius Velocity
result.equal(Vt[-1], obj.Vtarget, unit=unit_V)
return result()
def inequality(prob, obj):
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
tf = prob.time_final(-1)
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
theta0 = prob.states(1, 0)
theta1 = prob.states(1, 1)
Vr0 = prob.states(2, 0)
Vr1 = prob.states(2, 1)
Vt0 = prob.states(3, 0)
Vt1 = prob.states(3, 1)
m0 = prob.states(4, 0)
m1 = prob.states(4, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
Dr0 = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dt0 = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dr1 = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[1] # [N]
Dt1 = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[1] # [N]
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
# dynamic pressure
q = 0.5 * rho * (Vr**2 + Vt**2) # [Pa]
# accelaration
a_r0 = (Tr - Dr0) / m
a_t0 = (Tt - Dt0) / m
a_mag0 = np.sqrt(a_r0**2 + a_t0**2) # [m/s2]
a_r1 = (Tr - Dr1) / m
a_t1 = (Tt - Dt1) / m
a_mag1 = np.sqrt(a_r1**2 + a_t1**2) # [m/s2]
# Thrust
T0 = np.sqrt(Tr0**2 + Tt0**2)
T1 = np.sqrt(Tr1**2 + Tt1**2)
dThrust0 = (obj.airPressure(obj.H0) - obj.airPressure(R0 - obj.Re)) * obj.Ae[0]
dThrust1 = obj.airPressure(R1 - obj.Re) * obj.Ae[1]
result = Condition()
# lower bounds
result.lower_bound(R, obj.Re, unit=prob.unit_states[0][0]) # 地表以下
result.lower_bound(m0, obj.Mdry[0] + obj.M0[1], unit=prob.unit_states[0][4]) # 乾燥質量以下
result.lower_bound(m1, obj.Mdry[1], unit=prob.unit_states[0][4])
result.lower_bound(Tr, -obj.ThrustMax[1], unit=prob.unit_controls[0][0])
result.lower_bound(Tt, -obj.ThrustMax[1], unit=prob.unit_controls[0][0])
# upper bounds
result.upper_bound(m0, obj.Minit, unit=prob.unit_states[0][4]) # 初期質量以上
result.upper_bound(m1, obj.M0[1], unit=prob.unit_states[0][4])
result.upper_bound(Tr0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(Tt0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(T0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(Tr1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(Tt1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(T1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(q, obj.MaxQ, unit = prob.unit_states[0][0])
result.upper_bound(a_mag0, obj.MaxG * obj.g0)
result.upper_bound(a_mag1, obj.MaxG * obj.g0)
return result()
def cost(prob, obj):
m1 = prob.states(4, 1)
return -m1[-1] / prob.unit_states[1][4]
# ========================
# Program Starting Point
time_init = [0.0, 200, 800]
n = [20, 30]
num_states = [5, 5]
num_controls = [2, 2]
max_iteration = 90
flag_savefig = True
savefig_file = "./11_Polar_TSTO_Taiki/TSTO_"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
# ------------------------
# create instance of operating object
obj = Rocket()
unit_R = obj.Re
unit_theta = 1
unit_V = np.sqrt(obj.GMe / obj.Re)
unit_m = obj.M0[0]
unit_t = unit_R / unit_V
unit_T = unit_m * unit_R / unit_t ** 2
prob.set_unit_states_all_section(0, unit_R)
prob.set_unit_states_all_section(1, unit_theta)
prob.set_unit_states_all_section(2, unit_V)
prob.set_unit_states_all_section(3, unit_V)
prob.set_unit_states_all_section(4, unit_m)
prob.set_unit_controls_all_section(0, unit_T)
prob.set_unit_controls_all_section(1, unit_T)
prob.set_unit_time(unit_t)
# ========================
# Initial parameter guess
# altitude profile
R_init = Guess.cubic(prob.time_all_section, obj.Re, 0.0, obj.Rtarget, 0.0)
# Guess.plot(prob.time_all_section, R_init, "Altitude", "time", "Altitude")
# if(flag_savefig):plt.savefig(savefig_file + "guess_alt" + ".png")
# theta
theta_init = Guess.cubic(prob.time_all_section, 0.0, 0.0, np.deg2rad(25.0), 0.0)
# velocity
Vr_init = Guess.linear(prob.time_all_section, 0.0, 0.0)
Vt_init = Guess.linear(prob.time_all_section, obj.V0, obj.Vtarget)
# Guess.plot(prob.time_all_section, V_init, "Velocity", "time", "Velocity")
# mass profile -0.6
M_init0 = Guess.cubic(prob.time_all_section, obj.Minit, 0.0, obj.Mdry[0] + obj.M0[1], 0.0)
M_init1 = Guess.cubic(prob.time_all_section, obj.M0[1], 0.0, obj.Mdry[1], 0.0)
M_init = np.hstack((M_init0, M_init1))
# Guess.plot(prob.time_all_section, M_init, "Mass", "time", "Mass")
# if(flag_savefig):plt.savefig(savefig_file + "guess_mass" + ".png")
# thrust profile
# T_init = Guess.zeros(prob.time_all_section)
Tr_init0 = Guess.cubic(prob.time[0], obj.ThrustMax[0]*9/10, 0.0, 0.0, 0.0)
Tr_init1 = Guess.cubic(prob.time[1], obj.ThrustMax[1]*9/10, 0.0, 0.0, 0.0)
Tr_init = np.hstack((Tr_init0, Tr_init1))
# Tt_init = Guess.cubic(prob.time_all_section, 0.0, 0.0, 0.0, 0.0)
Tt_init0 = Guess.cubic(prob.time[0], obj.ThrustMax[0]/10, 0.0, 0.0, 0.0)
Tt_init1 = Guess.cubic(prob.time[1], obj.ThrustMax[1]/10, 0.0, 0.0, 0.0)
Tt_init = np.hstack((Tr_init0, Tr_init1))
# Guess.plot(prob.time_all_section, T_init, "Thrust Guess", "time", "Thrust")
# if(flag_savefig):plt.savefig(savefig_file + "guess_thrust" + ".png")
# plt.show()
# ========================
# Substitution initial value to parameter vector to be optimized
# non dimensional values (Divide by scale factor)
prob.set_states_all_section(0, R_init)
prob.set_states_all_section(1, theta_init)
prob.set_states_all_section(2, Vr_init)
prob.set_states_all_section(3, Vt_init)
prob.set_states_all_section(4, M_init)
prob.set_controls_all_section(0, Tr_init)
prob.set_controls_all_section(1, Tt_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics, dynamics]
prob.knot_states_smooth = [False]
prob.cost = cost
# prob.cost_derivative = cost_derivative
prob.equality = equality
prob.inequality = inequality
def display_func():
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
m = prob.states_all_section(4)
ts = prob.time_knots()
tf = prob.time_final(-1)
print("m0 : {0:.5f}".format(m[0]))
print("mf : {0:.5f}".format(m[-1]))
print("mdry : {0:.5f}".format(obj.Mdry[0]))
print("payload : {0:.5f}".format(m[-1] - obj.Mdry[1]))
print("max altitude: {0:.5f}".format(R[-1] - obj.Re))
print("MECO time : {0:.3f}".format(ts[1]))
print("final time : {0:.3f}".format(tf))
prob.solve(obj, display_func, ftol=1e-8)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
time = prob.time_update()
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
# ------------------------
# Calculate necessary variables
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
# dynamic pressure
q = 0.5 * rho * (Vr**2 + Vt**2) # [Pa]
# accelaration
a_r = (Tr - Dr) / m / obj.g0
a_t = (Tt - Dt) / m / obj.g0
a_mag = np.sqrt(a_r**2 + a_t**2) / obj.g0 # [G]
# Thrust
T = np.sqrt(Tr**2 + Tt**2)
dThrust0 = (obj.airPressure(obj.H0) - obj.airPressure(R0 - obj.Re)) * obj.Ae[0]
dThrust1 = obj.airPressure(R1 - obj.Re) * obj.Ae[1]
Isp0 = obj.Isp[0] + dThrust0 / (obj.refMdot[0] * obj.g0)
Isp1 = obj.Isp[1] + dThrust1 / (obj.refMdot[1] * obj.g0)
Thrust_SL = T - np.append(dThrust0, dThrust1)
np.savetxt(savefig_file + "Thrust_Log" + ".csv", np.hstack((time, Thrust_SL, T, Tr, Tt)), delimiter=',')
# ------------------------
# Visualizetion
plt.close("all")
plt.figure()
plt.title("Altitude profile")
plt.plot(time, (R - obj.Re) / 1000, marker="o", label="Altitude")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Altitude [km]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "altitude" + ".png")
np.savetxt(savefig_file + "Altitude_Log" + ".csv", np.hstack((time, (R - obj.Re))), delimiter=',')
plt.figure()
plt.title("Velocity")
plt.plot(time, Vr, marker="o", label="Vr")
plt.plot(time, Vt, marker="o", label="Vt")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Velocity [m/s]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "velocity" + ".png")
np.savetxt(savefig_file + "Velocity_Log" + ".csv", np.hstack((time, Vr, Vt)), delimiter=',')
plt.figure()
plt.title("Mass")
plt.plot(time, m, marker="o", label="Mass")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Mass [kg]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "mass" + ".png")
np.savetxt(savefig_file + "Mass_Log" + ".csv", np.hstack((time, m)), delimiter=',')
plt.figure()
plt.title("Acceleration")
plt.plot(time, a_r, marker="o", label="Acc r")
plt.plot(time, a_t, marker="o", label="Acc t")
plt.plot(time, a_mag, marker="o", label="Acc")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Acceleration [G]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "acceleration" + ".png")
plt.figure()
plt.title("Thrust profile")
plt.plot(time, Tr / 1000, marker="o", label="Tr")
plt.plot(time, Tt / 1000, marker="o", label="Tt")
plt.plot(time, T / 1000, marker="o", label="Thrust")
plt.plot(time, Dr / 1000, marker="o", label="Dr")
plt.plot(time, Dt / 1000, marker="o", label="Dt")
plt.plot(time, m * g / 1000, marker="o", label="Gravity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Thrust [kN]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "force" + ".png")
plt.figure()
plt.title("Flight trajectory")
plt.plot(theta * obj.Re / 1000, (R - obj.Re) / 1000, marker="o", label="trajectory")
plt.grid()
plt.xlabel("Downrange [km]")
plt.ylabel("Altitude [km]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "trajectory" + ".png")
plt.figure()
plt.title("DeltaThrust profile")
plt.plot(time, np.append(dThrust0, dThrust1), marker="o", label="dThrust")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("dThrust [N]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "dforce" + ".png")
plt.figure()
plt.title("Isp profile")
plt.plot(time, np.append(Isp0, Isp1), marker="o", label="Isp")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Isp [s]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "Isp" + ".png") | mit |
hellhovnd/django | tests/m2m_through/tests.py | 117 | 12908 | from __future__ import absolute_import
from datetime import datetime
from operator import attrgetter
from django.test import TestCase
from .models import (Person, Group, Membership, CustomMembership,
PersonSelfRefM2M, Friendship)
class M2mThroughTests(TestCase):
def setUp(self):
self.bob = Person.objects.create(name='Bob')
self.jim = Person.objects.create(name='Jim')
self.jane = Person.objects.create(name='Jane')
self.rock = Group.objects.create(name='Rock')
self.roll = Group.objects.create(name='Roll')
def test_m2m_through(self):
# We start out by making sure that the Group 'rock' has no members.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# To make Jim a member of Group Rock, simply create a Membership object.
m1 = Membership.objects.create(person=self.jim, group=self.rock)
# We can do the same for Jane and Rock.
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Let's check to make sure that it worked. Jane and Jim should be members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(), [
'Jane',
'Jim'
],
attrgetter("name")
)
# Now we can add a bunch more Membership objects to test with.
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
# We can get Jim's Group membership as with any ForeignKey.
self.assertQuerysetEqual(
self.jim.group_set.all(), [
'Rock',
'Roll'
],
attrgetter("name")
)
# Querying the intermediary model works like normal.
self.assertEqual(
repr(Membership.objects.get(person=self.jane, group=self.rock)),
'<Membership: Jane is a member of Rock>'
)
# It's not only get that works. Filter works like normal as well.
self.assertQuerysetEqual(
Membership.objects.filter(person=self.jim), [
'<Membership: Jim is a member of Rock>',
'<Membership: Jim is a member of Roll>'
]
)
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_forward_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.rock.members.add(self.bob))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.rock.members.create(name='Anne'))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.rock.members.remove(self.jim))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Here we back up the list of all members of Rock.
backup = list(self.rock.members.all())
# ...and we verify that it has worked.
self.assertEqual(
[p.name for p in backup],
['Jane', 'Jim']
)
# The clear function should still work.
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.rock, "members", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.rock.members.all(),[
'Jane',
'Jim'
],
attrgetter("name")
)
def test_reverse_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.bob.group_set.add(self.rock))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.bob.group_set.create(name="funk"))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.jim.group_set.remove(self.rock))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jim, group=self.roll)
# Here we back up the list of all of Jim's groups.
backup = list(self.jim.group_set.all())
self.assertEqual(
[g.name for g in backup],
['Rock', 'Roll']
)
# The clear function should still work.
self.jim.group_set.clear()
# Now Jim will be in no groups.
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.jim, "group_set", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.jim.group_set.all(),[
'Rock',
'Roll'
],
attrgetter("name")
)
def test_custom_tests(self):
# Let's see if we can query through our second relationship.
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
# We can query in the opposite direction as well.
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If we get the number of people in Rock, it should be both Bob and Jim.
self.assertQuerysetEqual(
self.rock.custom_members.all(),[
'Bob',
'Jim'
],
attrgetter("name")
)
# Bob should only be in one custom group.
self.assertQuerysetEqual(
self.bob.custom.all(),[
'Rock'
],
attrgetter("name")
)
# Let's make sure our new descriptors don't conflict with the FK related_name.
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),[
'<CustomMembership: Bob is a member of Rock>'
]
)
def test_self_referential_tests(self):
# Let's first create a person who has no friends.
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
chris = PersonSelfRefM2M.objects.create(name="Chris")
f = Friendship.objects.create(first=tony, second=chris, date_friended=datetime.now())
# Tony should now show that Chris is his friend.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
# But we haven't established that Chris is Tony's Friend.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
f2 = Friendship.objects.create(first=chris, second=tony, date_friended=datetime.now())
# Having added Chris as a friend, let's make sure that his friend set reflects
# that addition.
self.assertQuerysetEqual(
chris.friends.all(),[
'Tony'
],
attrgetter("name")
)
# Chris gets mad and wants to get rid of all of his friends.
chris.friends.clear()
# Now he should not have any more friends.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
def test_query_tests(self):
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
m2.invite_reason = "She was just awesome."
m2.date_joined = datetime(2006, 1, 1)
m2.save()
m3.date_joined = datetime(2004, 1, 1)
m3.save()
m5.date_joined = datetime(2004, 1, 1)
m5.save()
# We can query for the related model by using its attribute name (members, in
# this case).
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),[
'Roll'
],
attrgetter("name")
)
# To query through the intermediary model, we specify its model name.
# In this case, membership.
self.assertQuerysetEqual(
Group.objects.filter(membership__invite_reason="She was just awesome."),[
'Rock'
],
attrgetter("name")
)
# If we want to query in the reverse direction by the related model, use its
# model name (group, in this case).
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),[
'Jane',
'Jim'
],
attrgetter("name")
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If the m2m field has specified a related_name, using that will work.
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),[
'Bob',
'Jim'
],
attrgetter("name")
)
# To query through the intermediary model in the reverse direction, we again
# specify its model name (membership, in this case).
self.assertQuerysetEqual(
Person.objects.filter(membership__invite_reason="She was just awesome."),[
'Jane'
],
attrgetter("name")
)
# Let's see all of the groups that Jane joined after 1 Jan 2005:
self.assertQuerysetEqual(
Group.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__person=self.jane),[
'Rock'
],
attrgetter("name")
)
# Queries also work in the reverse direction: Now let's see all of the people
# that have joined Rock since 1 Jan 2005:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__group=self.rock),[
'Jane',
'Jim'
],
attrgetter("name")
)
# Conceivably, queries through membership could return correct, but non-unique
# querysets. To demonstrate this, we query for all people who have joined a
# group after 2004:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)),[
'Jane',
'Jim',
'Jim'
],
attrgetter("name")
)
# Jim showed up twice, because he joined two groups ('Rock', and 'Roll'):
self.assertEqual(
[(m.person.name, m.group.name) for m in Membership.objects.filter(date_joined__gt=datetime(2004, 1, 1))],
[('Jane', 'Rock'), ('Jim', 'Rock'), ('Jim', 'Roll')]
)
# QuerySet's distinct() method can correct this problem.
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)).distinct(),[
'Jane',
'Jim'
],
attrgetter("name")
)
| bsd-3-clause |
johnnygaddarr/zulip | zerver/views/webhooks.py | 102 | 42964 | # Webhooks for external integrations.
from __future__ import absolute_import
from django.conf import settings
from zerver.models import UserProfile, get_client, get_user_profile_by_email
from zerver.lib.actions import check_send_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_dict
from zerver.decorator import authenticated_api_view, REQ, \
has_request_variables, authenticated_rest_api_view, \
api_key_only_webhook_view, to_non_negative_int, flexible_boolean
from zerver.views.messages import send_message_backend
from django.db.models import Q
from defusedxml.ElementTree import fromstring as xml_fromstring
import pprint
import base64
import logging
import re
import ujson
from functools import wraps
def github_generic_subject(noun, topic_focus, blob):
# issue and pull_request objects have the same fields we're interested in
return "%s: %s %d: %s" % (topic_focus, noun, blob['number'], blob['title'])
def github_generic_content(noun, payload, blob):
action = payload['action']
if action == 'synchronize':
action = 'synchronized'
# issue and pull_request objects have the same fields we're interested in
content = ("%s %s [%s %s](%s)"
% (payload['sender']['login'],
action,
noun,
blob['number'],
blob['html_url']))
if payload['action'] in ('opened', 'reopened'):
content += "\n\n~~~ quote\n%s\n~~~" % (blob['body'],)
return content
def api_github_v1(user_profile, event, payload, branches, stream, **kwargs):
"""
processes github payload with version 1 field specification
`payload` comes in unmodified from github
`stream` is set to 'commits' if otherwise unset
"""
commit_stream = stream
issue_stream = 'issues'
return api_github_v2(user_profile, event, payload, branches, stream, commit_stream, issue_stream, **kwargs)
def api_github_v2(user_profile, event, payload, branches, default_stream, commit_stream, issue_stream, topic_focus = None):
"""
processes github payload with version 2 field specification
`payload` comes in unmodified from github
`default_stream` is set to what `stream` is in v1 above
`commit_stream` and `issue_stream` fall back to `default_stream` if they are empty
This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration
"""
if not commit_stream:
commit_stream = default_stream
if not issue_stream:
issue_stream = default_stream
target_stream = commit_stream
repository = payload['repository']
if not topic_focus:
topic_focus = repository['name']
# Event Handlers
if event == 'pull_request':
pull_req = payload['pull_request']
subject = github_generic_subject('pull request', topic_focus, pull_req)
content = github_generic_content('pull request', payload, pull_req)
elif event == 'issues':
# in v1, we assume that this stream exists since it is
# deprecated and the few realms that use it already have the
# stream
target_stream = issue_stream
issue = payload['issue']
subject = github_generic_subject('issue', topic_focus, issue)
content = github_generic_content('issue', payload, issue)
elif event == 'issue_comment':
# Comments on both issues and pull requests come in as issue_comment events
issue = payload['issue']
if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None:
# It's an issues comment
target_stream = issue_stream
noun = 'issue'
else:
# It's a pull request comment
noun = 'pull request'
subject = github_generic_subject(noun, topic_focus, issue)
comment = payload['comment']
content = ("%s [commented](%s) on [%s %d](%s)\n\n~~~ quote\n%s\n~~~"
% (comment['user']['login'],
comment['html_url'],
noun,
issue['number'],
issue['html_url'],
comment['body']))
elif event == 'push':
subject, content = build_message_from_gitlog(user_profile, topic_focus,
payload['ref'], payload['commits'],
payload['before'], payload['after'],
payload['compare'],
payload['pusher']['name'],
forced=payload['forced'],
created=payload['created'])
elif event == 'commit_comment':
comment = payload['comment']
subject = "%s: commit %s" % (topic_focus, comment['commit_id'])
content = ("%s [commented](%s)"
% (comment['user']['login'],
comment['html_url']))
if comment['line'] is not None:
content += " on `%s`, line %d" % (comment['path'], comment['line'])
content += "\n\n~~~ quote\n%s\n~~~" % (comment['body'],)
return (target_stream, subject, content)
@authenticated_api_view
@has_request_variables
def api_github_landing(request, user_profile, event=REQ,
payload=REQ(validator=check_dict([])),
branches=REQ(default=''),
stream=REQ(default=''),
version=REQ(converter=to_non_negative_int, default=1),
commit_stream=REQ(default=''),
issue_stream=REQ(default=''),
exclude_pull_requests=REQ(converter=flexible_boolean, default=False),
exclude_issues=REQ(converter=flexible_boolean, default=False),
exclude_commits=REQ(converter=flexible_boolean, default=False),
emphasize_branch_in_topic=REQ(converter=flexible_boolean, default=False),
):
repository = payload['repository']
# Special hook for capturing event data. If we see our special test repo, log the payload from github.
try:
if repository['name'] == 'zulip-test' and repository['id'] == 6893087 and settings.PRODUCTION:
with open('/var/log/zulip/github-payloads', 'a') as f:
f.write(ujson.dumps({'event': event,
'payload': payload,
'branches': branches,
'stream': stream,
'version': version,
'commit_stream': commit_stream,
'issue_stream': issue_stream,
'exclude_pull_requests': exclude_pull_requests,
'exclude_issues': exclude_issues,
'exclude_commits': exclude_commits,
'emphasize_branch_in_topic': emphasize_branch_in_topic,
}))
f.write("\n")
except Exception:
logging.exception("Error while capturing Github event")
if not stream:
stream = 'commits'
short_ref = re.sub(r'^refs/heads/', '', payload.get('ref', ""))
kwargs = dict()
if emphasize_branch_in_topic and short_ref:
kwargs['topic_focus'] = short_ref
allowed_events = set()
if not exclude_pull_requests:
allowed_events.add('pull_request')
if not exclude_issues:
allowed_events.add("issues")
allowed_events.add("issue_comment")
if not exclude_commits:
allowed_events.add("push")
allowed_events.add("commit_comment")
if event not in allowed_events:
return json_success()
# We filter issue_comment events for issue creation events
if event == 'issue_comment' and payload['action'] != 'created':
return json_success()
if event == 'push':
# If we are given a whitelist of branches, then we silently ignore
# any push notification on a branch that is not in our whitelist.
if branches and short_ref not in re.split('[\s,;|]+', branches):
return json_success()
# Map payload to the handler with the right version
if version == 2:
target_stream, subject, content = api_github_v2(user_profile, event, payload, branches, stream, commit_stream, issue_stream, **kwargs)
else:
target_stream, subject, content = api_github_v1(user_profile, event, payload, branches, stream, **kwargs)
request.client = get_client("ZulipGitHubWebhook")
return send_message_backend(request, user_profile,
message_type_name="stream",
message_to=[target_stream],
forged=False, subject_name=subject,
message_content=content)
def build_commit_list_content(commits, branch, compare_url, pusher):
if compare_url is not None:
push_text = "[pushed](%s)" % (compare_url,)
else:
push_text = "pushed"
content = ("%s %s to branch %s\n\n"
% (pusher,
push_text,
branch))
num_commits = len(commits)
max_commits = 10
truncated_commits = commits[:max_commits]
for commit in truncated_commits:
short_id = commit['id'][:7]
(short_commit_msg, _, _) = commit['message'].partition("\n")
content += "* [%s](%s): %s\n" % (short_id, commit['url'],
short_commit_msg)
if (num_commits > max_commits):
content += ("\n[and %d more commits]"
% (num_commits - max_commits,))
return content
def build_message_from_gitlog(user_profile, name, ref, commits, before, after, url, pusher, forced=None, created=None):
short_ref = re.sub(r'^refs/heads/', '', ref)
subject = name
if re.match(r'^0+$', after):
content = "%s deleted branch %s" % (pusher,
short_ref)
# 'created' and 'forced' are github flags; the second check is for beanstalk
elif (forced and not created) or (forced is None and len(commits) == 0):
content = ("%s [force pushed](%s) to branch %s. Head is now %s"
% (pusher,
url,
short_ref,
after[:7]))
else:
content = build_commit_list_content(commits, short_ref, url, pusher)
return (subject, content)
def guess_zulip_user_from_jira(jira_username, realm):
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) |
Q(short_name__iexact=jira_username) |
Q(email__istartswith=jira_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except IndexError:
return None
def convert_jira_markup(content, realm):
# Attempt to do some simplistic conversion of JIRA
# formatting to Markdown, for consumption in Zulip
# Jira uses *word* for bold, we use **word**
content = re.sub(r'\*([^\*]+)\*', r'**\1**', content)
# Jira uses {{word}} for monospacing, we use `word`
content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content)
# Starting a line with bq. block quotes that line
content = re.sub(r'bq\. (.*)', r'> \1', content)
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL)
content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content)
# {noformat}stuff{noformat} blocks are just code blocks with no
# syntax highlighting
noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL)
content = re.sub(noformat_re, r'~~~\n\1\n~~~', content)
# Code blocks are delineated by {code[: lang]} {code}
code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL)
content = re.sub(code_re, r'~~~\n\1\n~~~', content)
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
# In order to support both forms, we don't match a | in bare links
content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content)
# Full links which have a | are converted into a better markdown link
full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]')
content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content)
# Try to convert a JIRA user mention of format [~username] into a
# Zulip user mention. We don't know the email, just the JIRA username,
# so we naively guess at their Zulip account using this
if realm:
mention_re = re.compile(r'\[~(.*?)\]')
for username in mention_re.findall(content):
# Try to look up username
user_profile = guess_zulip_user_from_jira(username, realm)
if user_profile:
replacement = "@**%s**" % (user_profile.full_name,)
else:
replacement = "**%s**" % (username,)
content = content.replace("[~%s]" % (username,), replacement)
return content
@api_key_only_webhook_view
def api_jira_webhook(request, user_profile):
try:
payload = ujson.loads(request.body)
except ValueError:
return json_error("Malformed JSON input")
try:
stream = request.GET['stream']
except (AttributeError, KeyError):
stream = 'jira'
def get_in(payload, keys, default=''):
try:
for key in keys:
payload = payload[key]
except (AttributeError, KeyError, TypeError):
return default
return payload
event = payload.get('webhookEvent')
author = get_in(payload, ['user', 'displayName'])
issueId = get_in(payload, ['issue', 'key'])
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST url of the issue itself
baseUrl = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
if baseUrl and len(baseUrl.groups()):
issue = "[%s](%s/browse/%s)" % (issueId, baseUrl.group(1), issueId)
else:
issue = issueId
title = get_in(payload, ['issue', 'fields', 'summary'])
priority = get_in(payload, ['issue', 'fields', 'priority', 'name'])
assignee = get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one')
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
assignee_mention = ''
if assignee_email != '':
try:
assignee_profile = get_user_profile_by_email(assignee_email)
assignee_mention = "@**%s**" % (assignee_profile.full_name,)
except UserProfile.DoesNotExist:
assignee_mention = "**%s**" % (assignee_email,)
subject = "%s: %s" % (issueId, title)
if event == 'jira:issue_created':
content = "%s **created** %s priority %s, assigned to **%s**:\n\n> %s" % \
(author, issue, priority, assignee, title)
elif event == 'jira:issue_deleted':
content = "%s **deleted** %s!" % \
(author, issue)
elif event == 'jira:issue_updated':
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
if assignee_mention != '':
assignee_blurb = " (assigned to %s)" % (assignee_mention,)
else:
assignee_blurb = ''
content = "%s **updated** %s%s:\n\n" % (author, issue, assignee_blurb)
changelog = get_in(payload, ['changelog',])
comment = get_in(payload, ['comment', 'body'])
if changelog != '':
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get('items')
for item in items:
field = item.get('field')
# Convert a user's target to a @-mention if possible
targetFieldString = "**%s**" % (item.get('toString'),)
if field == 'assignee' and assignee_mention != '':
targetFieldString = assignee_mention
fromFieldString = item.get('fromString')
if targetFieldString or fromFieldString:
content += "* Changed %s from **%s** to %s\n" % (field, fromFieldString, targetFieldString)
if comment != '':
comment = convert_jira_markup(comment, user_profile.realm)
content += "\n%s\n" % (comment,)
elif event in ['jira:worklog_updated']:
# We ignore these event types
return json_success()
elif 'transition' in payload:
from_status = get_in(payload, ['transition', 'from_status'])
to_status = get_in(payload, ['transition', 'to_status'])
content = "%s **transitioned** %s from %s to %s" % (author, issue, from_status, to_status)
else:
# Unknown event type
if not settings.TEST_SUITE:
if event is None:
logging.warning("Got JIRA event with None event type: %s" % (payload,))
else:
logging.warning("Got JIRA event type we don't understand: %s" % (event,))
return json_error("Unknown JIRA event type")
check_send_message(user_profile, get_client("ZulipJIRAWebhook"), "stream",
[stream], subject, content)
return json_success()
def api_pivotal_webhook_v3(request, user_profile, stream):
payload = xml_fromstring(request.body)
def get_text(attrs):
start = payload
try:
for attr in attrs:
start = start.find(attr)
return start.text
except AttributeError:
return ""
event_type = payload.find('event_type').text
description = payload.find('description').text
project_id = payload.find('project_id').text
story_id = get_text(['stories', 'story', 'id'])
# Ugh, the URL in the XML data is not a clickable url that works for the user
# so we try to build one that the user can actually click on
url = "https://www.pivotaltracker.com/s/projects/%s/stories/%s" % (project_id, story_id)
# Pivotal doesn't tell us the name of the story, but it's usually in the
# description in quotes as the first quoted string
name_re = re.compile(r'[^"]+"([^"]+)".*')
match = name_re.match(description)
if match and len(match.groups()):
name = match.group(1)
else:
name = "Story changed" # Failed for an unknown reason, show something
more_info = " [(view)](%s)" % (url,)
if event_type == 'story_update':
subject = name
content = description + more_info
elif event_type == 'note_create':
subject = "Comment added"
content = description + more_info
elif event_type == 'story_create':
issue_desc = get_text(['stories', 'story', 'description'])
issue_type = get_text(['stories', 'story', 'story_type'])
issue_status = get_text(['stories', 'story', 'current_state'])
estimate = get_text(['stories', 'story', 'estimate'])
if estimate != '':
estimate = " worth %s story points" % (estimate,)
subject = name
content = "%s (%s %s%s):\n\n~~~ quote\n%s\n~~~\n\n%s" % (description,
issue_status,
issue_type,
estimate,
issue_desc,
more_info)
return subject, content
def api_pivotal_webhook_v5(request, user_profile, stream):
payload = ujson.loads(request.body)
event_type = payload["kind"]
project_name = payload["project"]["name"]
project_id = payload["project"]["id"]
primary_resources = payload["primary_resources"][0]
story_url = primary_resources["url"]
story_type = primary_resources["story_type"]
story_id = primary_resources["id"]
story_name = primary_resources["name"]
performed_by = payload.get("performed_by", {}).get("name", "")
story_info = "[%s](https://www.pivotaltracker.com/s/projects/%s): [%s](%s)" % (project_name, project_id, story_name, story_url)
changes = payload.get("changes", [])
content = ""
subject = "#%s: %s" % (story_id, story_name)
def extract_comment(change):
if change.get("kind") == "comment":
return change.get("new_values", {}).get("text", None)
return None
if event_type == "story_update_activity":
# Find the changed valued and build a message
content += "%s updated %s:\n" % (performed_by, story_info)
for change in changes:
old_values = change.get("original_values", {})
new_values = change["new_values"]
if "current_state" in old_values and "current_state" in new_values:
content += "* state changed from **%s** to **%s**\n" % (old_values["current_state"], new_values["current_state"])
if "estimate" in old_values and "estimate" in new_values:
old_estimate = old_values.get("estimate", None)
if old_estimate is None:
estimate = "is now"
else:
estimate = "changed from %s to" % (old_estimate,)
new_estimate = new_values["estimate"] if new_values["estimate"] is not None else "0"
content += "* estimate %s **%s points**\n" % (estimate, new_estimate)
if "story_type" in old_values and "story_type" in new_values:
content += "* type changed from **%s** to **%s**\n" % (old_values["story_type"], new_values["story_type"])
comment = extract_comment(change)
if comment is not None:
content += "* Comment added:\n~~~quote\n%s\n~~~\n" % (comment,)
elif event_type == "comment_create_activity":
for change in changes:
comment = extract_comment(change)
if comment is not None:
content += "%s added a comment to %s:\n~~~quote\n%s\n~~~" % (performed_by, story_info, comment)
elif event_type == "story_create_activity":
content += "%s created %s: %s\n" % (performed_by, story_type, story_info)
for change in changes:
new_values = change.get("new_values", {})
if "current_state" in new_values:
content += "* State is **%s**\n" % (new_values["current_state"],)
if "description" in new_values:
content += "* Description is\n\n> %s" % (new_values["description"],)
elif event_type == "story_move_activity":
content = "%s moved %s" % (performed_by, story_info)
for change in changes:
old_values = change.get("original_values", {})
new_values = change["new_values"]
if "current_state" in old_values and "current_state" in new_values:
content += " from **%s** to **%s**" % (old_values["current_state"], new_values["current_state"])
elif event_type in ["task_create_activity", "comment_delete_activity",
"task_delete_activity", "task_update_activity",
"story_move_from_project_activity", "story_delete_activity",
"story_move_into_project_activity"]:
# Known but unsupported Pivotal event types
pass
else:
logging.warning("Unknown Pivotal event type: %s" % (event_type,))
return subject, content
@api_key_only_webhook_view
def api_pivotal_webhook(request, user_profile):
try:
stream = request.GET['stream']
except (AttributeError, KeyError):
return json_error("Missing stream parameter.")
subject = content = None
try:
subject, content = api_pivotal_webhook_v3(request, user_profile, stream)
except AttributeError:
return json_error("Failed to extract data from Pivotal XML response")
except:
# Attempt to parse v5 JSON payload
try:
subject, content = api_pivotal_webhook_v5(request, user_profile, stream)
except AttributeError:
return json_error("Failed to extract data from Pivotal V5 JSON response")
if subject is None or content is None:
return json_error("Unable to handle Pivotal payload")
check_send_message(user_profile, get_client("ZulipPivotalWebhook"), "stream",
[stream], subject, content)
return json_success()
# Beanstalk's web hook UI rejects url with a @ in the username section of a url
# So we ask the user to replace them with %40
# We manually fix the username here before passing it along to @authenticated_rest_api_view
def beanstalk_decoder(view_func):
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
try:
auth_type, encoded_value = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() == "basic":
email, api_key = base64.b64decode(encoded_value).split(":")
email = email.replace('%40', '@')
request.META['HTTP_AUTHORIZATION'] = "Basic %s" % (base64.b64encode("%s:%s" % (email, api_key)))
except:
pass
return view_func(request, *args, **kwargs)
return _wrapped_view_func
@beanstalk_decoder
@authenticated_rest_api_view
@has_request_variables
def api_beanstalk_webhook(request, user_profile,
payload=REQ(validator=check_dict([]))):
# Beanstalk supports both SVN and git repositories
# We distinguish between the two by checking for a
# 'uri' key that is only present for git repos
git_repo = 'uri' in payload
if git_repo:
# To get a linkable url,
subject, content = build_message_from_gitlog(user_profile, payload['repository']['name'],
payload['ref'], payload['commits'],
payload['before'], payload['after'],
payload['repository']['url'],
payload['pusher_name'])
else:
author = payload.get('author_full_name')
url = payload.get('changeset_url')
revision = payload.get('revision')
(short_commit_msg, _, _) = payload.get('message').partition("\n")
subject = "svn r%s" % (revision,)
content = "%s pushed [revision %s](%s):\n\n> %s" % (author, revision, url, short_commit_msg)
check_send_message(user_profile, get_client("ZulipBeanstalkWebhook"), "stream",
["commits"], subject, content)
return json_success()
# Desk.com's integrations all make the user supply a template, where it fills
# in stuff like {{customer.name}} and posts the result as a "data" parameter.
# There's no raw JSON for us to work from. Thus, it makes sense to just write
# a template Zulip message within Desk.com and have the webhook extract that
# from the "data" param and post it, which this does.
@authenticated_rest_api_view
@has_request_variables
def api_deskdotcom_webhook(request, user_profile, data=REQ(),
topic=REQ(default="Desk.com notification"),
stream=REQ(default="desk.com")):
check_send_message(user_profile, get_client("ZulipDeskWebhook"), "stream",
[stream], topic, data)
return json_success()
@api_key_only_webhook_view
@has_request_variables
def api_newrelic_webhook(request, user_profile, alert=REQ(validator=check_dict([]), default=None),
deployment=REQ(validator=check_dict([]), default=None)):
try:
stream = request.GET['stream']
except (AttributeError, KeyError):
return json_error("Missing stream parameter.")
if alert:
# Use the message as the subject because it stays the same for
# "opened", "acknowledged", and "closed" messages that should be
# grouped.
subject = alert['message']
content = "%(long_description)s\n[View alert](%(alert_url)s)" % (alert)
elif deployment:
subject = "%s deploy" % (deployment['application_name'])
content = """`%(revision)s` deployed by **%(deployed_by)s**
%(description)s
%(changelog)s""" % (deployment)
else:
return json_error("Unknown webhook request")
check_send_message(user_profile, get_client("ZulipNewRelicWebhook"), "stream",
[stream], subject, content)
return json_success()
@authenticated_rest_api_view
@has_request_variables
def api_bitbucket_webhook(request, user_profile, payload=REQ(validator=check_dict([])),
stream=REQ(default='commits')):
repository = payload['repository']
commits = [{'id': commit['raw_node'], 'message': commit['message'],
'url': '%s%scommits/%s' % (payload['canon_url'],
repository['absolute_url'],
commit['raw_node'])}
for commit in payload['commits']]
subject = repository['name']
if len(commits) == 0:
# Bitbucket doesn't give us enough information to really give
# a useful message :/
content = ("%s [force pushed](%s)"
% (payload['user'],
payload['canon_url'] + repository['absolute_url']))
else:
branch = payload['commits'][-1]['branch']
content = build_commit_list_content(commits, branch, None, payload['user'])
subject += '/%s' % (branch,)
check_send_message(user_profile, get_client("ZulipBitBucketWebhook"), "stream",
[stream], subject, content)
return json_success()
@authenticated_rest_api_view
@has_request_variables
def api_stash_webhook(request, user_profile, stream=REQ(default='')):
try:
payload = ujson.loads(request.body)
except ValueError:
return json_error("Malformed JSON input")
# We don't get who did the push, or we'd try to report that.
try:
repo_name = payload["repository"]["name"]
project_name = payload["repository"]["project"]["name"]
branch_name = payload["refChanges"][0]["refId"].split("/")[-1]
commit_entries = payload["changesets"]["values"]
commits = [(entry["toCommit"]["displayId"],
entry["toCommit"]["message"].split("\n")[0]) for \
entry in commit_entries]
head_ref = commit_entries[-1]["toCommit"]["displayId"]
except KeyError, e:
return json_error("Missing key %s in JSON" % (e.message,))
try:
stream = request.GET['stream']
except (AttributeError, KeyError):
stream = 'commits'
subject = "%s/%s: %s" % (project_name, repo_name, branch_name)
content = "`%s` was pushed to **%s** in **%s/%s** with:\n\n" % (
head_ref, branch_name, project_name, repo_name)
content += "\n".join("* `%s`: %s" % (
commit[0], commit[1]) for commit in commits)
check_send_message(user_profile, get_client("ZulipStashWebhook"), "stream",
[stream], subject, content)
return json_success()
class TicketDict(dict):
"""
A helper class to turn a dictionary with ticket information into
an object where each of the keys is an attribute for easy access.
"""
def __getattr__(self, field):
if "_" in field:
return self.get(field)
else:
return self.get("ticket_" + field)
def property_name(property, index):
# The Freshdesk API is currently pretty broken: statuses are customizable
# but the API will only tell you the number associated with the status, not
# the name. While we engage the Freshdesk developers about exposing this
# information through the API, since only FlightCar uses this integration,
# hardcode their statuses.
statuses = ["", "", "Open", "Pending", "Resolved", "Closed",
"Waiting on Customer", "Job Application", "Monthly"]
priorities = ["", "Low", "Medium", "High", "Urgent"]
if property == "status":
return statuses[index] if index < len(statuses) else str(index)
elif property == "priority":
return priorities[index] if index < len(priorities) else str(index)
else:
raise ValueError("Unknown property")
def parse_freshdesk_event(event_string):
# These are always of the form "{ticket_action:created}" or
# "{status:{from:4,to:6}}". Note the lack of string quoting: this isn't
# valid JSON so we have to parse it ourselves.
data = event_string.replace("{", "").replace("}", "").replace(",", ":").split(":")
if len(data) == 2:
# This is a simple ticket action event, like
# {ticket_action:created}.
return data
else:
# This is a property change event, like {status:{from:4,to:6}}. Pull out
# the property, from, and to states.
property, _, from_state, _, to_state = data
return (property, property_name(property, int(from_state)),
property_name(property, int(to_state)))
def format_freshdesk_note_message(ticket, event_info):
# There are public (visible to customers) and private note types.
note_type = event_info[1]
content = "%s <%s> added a %s note to [ticket #%s](%s)." % (
ticket.requester_name, ticket.requester_email, note_type,
ticket.id, ticket.url)
return content
def format_freshdesk_property_change_message(ticket, event_info):
# Freshdesk will only tell us the first event to match our webhook
# configuration, so if we change multiple properties, we only get the before
# and after data for the first one.
content = "%s <%s> updated [ticket #%s](%s):\n\n" % (
ticket.requester_name, ticket.requester_email, ticket.id, ticket.url)
# Why not `"%s %s %s" % event_info`? Because the linter doesn't like it.
content += "%s: **%s** => **%s**" % (
event_info[0].capitalize(), event_info[1], event_info[2])
return content
def format_freshdesk_ticket_creation_message(ticket):
# They send us the description as HTML.
cleaned_description = convert_html_to_markdown(ticket.description)
content = "%s <%s> created [ticket #%s](%s):\n\n" % (
ticket.requester_name, ticket.requester_email, ticket.id, ticket.url)
content += """~~~ quote
%s
~~~\n
""" % (cleaned_description,)
content += "Type: **%s**\nPriority: **%s**\nStatus: **%s**" % (
ticket.type, ticket.priority, ticket.status)
return content
@authenticated_rest_api_view
@has_request_variables
def api_freshdesk_webhook(request, user_profile, stream=REQ(default='')):
try:
payload = ujson.loads(request.body)
ticket_data = payload["freshdesk_webhook"]
except ValueError:
return json_error("Malformed JSON input")
required_keys = [
"triggered_event", "ticket_id", "ticket_url", "ticket_type",
"ticket_subject", "ticket_description", "ticket_status",
"ticket_priority", "requester_name", "requester_email",
]
for key in required_keys:
if ticket_data.get(key) is None:
logging.warning("Freshdesk webhook error. Payload was:")
logging.warning(request.body)
return json_error("Missing key %s in JSON" % (key,))
try:
stream = request.GET['stream']
except (AttributeError, KeyError):
stream = 'freshdesk'
ticket = TicketDict(ticket_data)
subject = "#%s: %s" % (ticket.id, ticket.subject)
try:
event_info = parse_freshdesk_event(ticket.triggered_event)
except ValueError:
return json_error("Malformed event %s" % (ticket.triggered_event,))
if event_info[1] == "created":
content = format_freshdesk_ticket_creation_message(ticket)
elif event_info[0] == "note_type":
content = format_freshdesk_note_message(ticket, event_info)
elif event_info[0] in ("status", "priority"):
content = format_freshdesk_property_change_message(ticket, event_info)
else:
# Not an event we know handle; do nothing.
return json_success()
check_send_message(user_profile, get_client("ZulipFreshdeskWebhook"), "stream",
[stream], subject, content)
return json_success()
def truncate(string, length):
if len(string) > length:
string = string[:length-3] + '...'
return string
@authenticated_rest_api_view
def api_zendesk_webhook(request, user_profile):
"""
Zendesk uses trigers with message templates. This webhook uses the
ticket_id and ticket_title to create a subject. And passes with zendesk
user's configured message to zulip.
"""
try:
ticket_title = request.POST['ticket_title']
ticket_id = request.POST['ticket_id']
message = request.POST['message']
stream = request.POST.get('stream', 'zendesk')
except KeyError as e:
return json_error('Missing post parameter %s' % (e.message,))
subject = truncate('#%s: %s' % (ticket_id, ticket_title), 60)
check_send_message(user_profile, get_client('ZulipZenDeskWebhook'), 'stream',
[stream], subject, message)
return json_success()
PAGER_DUTY_EVENT_NAMES = {
'incident.trigger': 'triggered',
'incident.acknowledge': 'acknowledged',
'incident.unacknowledge': 'unacknowledged',
'incident.resolve': 'resolved',
'incident.assign': 'assigned',
'incident.escalate': 'escalated',
'incident.delegate': 'delineated',
}
def build_pagerduty_formatdict(message):
# Normalize the message dict, after this all keys will exist. I would
# rather some strange looking messages than dropping pages.
format_dict = {}
format_dict['action'] = PAGER_DUTY_EVENT_NAMES[message['type']]
format_dict['incident_id'] = message['data']['incident']['id']
format_dict['incident_num'] = message['data']['incident']['incident_number']
format_dict['incident_url'] = message['data']['incident']['html_url']
format_dict['service_name'] = message['data']['incident']['service']['name']
format_dict['service_url'] = message['data']['incident']['service']['html_url']
# This key can be missing on null
if message['data']['incident'].get('assigned_to_user', None):
format_dict['assigned_to_email'] = message['data']['incident']['assigned_to_user']['email']
format_dict['assigned_to_username'] = message['data']['incident']['assigned_to_user']['email'].split('@')[0]
format_dict['assigned_to_url'] = message['data']['incident']['assigned_to_user']['html_url']
else:
format_dict['assigned_to_email'] = 'nobody'
format_dict['assigned_to_username'] = 'nobody'
format_dict['assigned_to_url'] = ''
# This key can be missing on null
if message['data']['incident'].get('resolved_by_user', None):
format_dict['resolved_by_email'] = message['data']['incident']['resolved_by_user']['email']
format_dict['resolved_by_username'] = message['data']['incident']['resolved_by_user']['email'].split('@')[0]
format_dict['resolved_by_url'] = message['data']['incident']['resolved_by_user']['html_url']
else:
format_dict['resolved_by_email'] = 'nobody'
format_dict['resolved_by_username'] = 'nobody'
format_dict['resolved_by_url'] = ''
trigger_message = []
trigger_subject = message['data']['incident']['trigger_summary_data'].get('subject', '')
if trigger_subject:
trigger_message.append(trigger_subject)
trigger_description = message['data']['incident']['trigger_summary_data'].get('description', '')
if trigger_description:
trigger_message.append(trigger_description)
format_dict['trigger_message'] = u'\n'.join(trigger_message)
return format_dict
def send_raw_pagerduty_json(user_profile, stream, message, topic):
subject = topic or 'pagerduty'
body = (
u'Unknown pagerduty message\n'
u'``` py\n'
u'%s\n'
u'```') % (pprint.pformat(message),)
check_send_message(user_profile, get_client('ZulipPagerDutyWebhook'), 'stream',
[stream], subject, body)
def send_formated_pagerduty(user_profile, stream, message_type, format_dict, topic):
if message_type in ('incident.trigger', 'incident.unacknowledge'):
template = (u':unhealthy_heart: Incident '
u'[{incident_num}]({incident_url}) {action} by '
u'[{service_name}]({service_url}) and assigned to '
u'[{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}')
elif message_type == 'incident.resolve' and format_dict['resolved_by_url']:
template = (u':healthy_heart: Incident '
u'[{incident_num}]({incident_url}) resolved by '
u'[{resolved_by_username}@]({resolved_by_url})\n\n>{trigger_message}')
elif message_type == 'incident.resolve' and not format_dict['resolved_by_url']:
template = (u':healthy_heart: Incident '
u'[{incident_num}]({incident_url}) resolved\n\n>{trigger_message}')
else:
template = (u':average_heart: Incident [{incident_num}]({incident_url}) '
u'{action} by [{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}')
subject = topic or u'incident {incident_num}'.format(**format_dict)
body = template.format(**format_dict)
check_send_message(user_profile, get_client('ZulipPagerDutyWebhook'), 'stream',
[stream], subject, body)
@api_key_only_webhook_view
@has_request_variables
def api_pagerduty_webhook(request, user_profile, stream=REQ(default='pagerduty'), topic=REQ(default=None)):
payload = ujson.loads(request.body)
for message in payload['messages']:
message_type = message['type']
if message_type not in PAGER_DUTY_EVENT_NAMES:
send_raw_pagerduty_json(user_profile, stream, message, topic)
try:
format_dict = build_pagerduty_formatdict(message)
except:
send_raw_pagerduty_json(user_profile, stream, message, topic)
else:
send_formated_pagerduty(user_profile, stream, message_type, format_dict, topic)
return json_success()
| apache-2.0 |
algorithmic-music-exploration/amen | amen/timing.py | 1 | 3596 | #!/usr/bin/env python
'''Timing interface'''
from bisect import bisect_left, bisect_right
import numpy as np
import pandas as pd
import librosa
class TimeSlice(object):
"""
A slice of time: has a start time, a duration, and a reference to an Audio object.
"""
def __init__(self, time, duration, audio, unit='s'):
self.time = pd.to_timedelta(time, unit=unit)
self.duration = pd.to_timedelta(duration, unit=unit)
self.audio = audio
def __repr__(self):
args = self.time.delta * 1e-9, self.duration.delta * 1e-9
return '<TimeSlice, start: {0:.2f}, duration: {1:.2f}>'.format(*args)
def get_samples(self):
"""
Gets the samples corresponding to this TimeSlice from the parent audio object.
"""
start = self.time.delta * 1e-9
duration = self.duration.delta * 1e-9
starting_sample, ending_sample = librosa.time_to_samples(
[start, start + duration], self.audio.sample_rate
)
left_offsets, right_offsets = self._get_offsets(
starting_sample, ending_sample, self.audio.num_channels
)
samples = self._offset_samples(
starting_sample,
ending_sample,
left_offsets,
right_offsets,
self.audio.num_channels,
)
return samples, left_offsets[0], right_offsets[0]
def _get_offsets(self, starting_sample, ending_sample, num_channels):
"""
Find the offset to the next zero-crossing, for each channel.
"""
offsets = []
for zero_index in self.audio.zero_indexes:
index = bisect_left(zero_index, starting_sample) - 1
if index < 0:
starting_offset = 0
else:
starting_crossing = zero_index[index]
starting_offset = starting_crossing - starting_sample
index = bisect_left(zero_index, ending_sample)
if index >= len(zero_index):
ending_offset = 0
else:
zci = min(bisect_right(zero_index, ending_sample), len(zero_index) - 1)
ending_crossing = zero_index[zci]
ending_offset = ending_crossing - ending_sample
offsets.append((starting_offset, ending_offset))
if num_channels == 1:
results = (offsets[0], offsets[0])
elif num_channels == 2:
results = (offsets[0], offsets[1])
return results
def _offset_samples(
self, starting_sample, ending_sample, left_offsets, right_offsets, num_channels
):
"""
Does the offset itself.
"""
left_slice = (
0,
slice(starting_sample + left_offsets[0], ending_sample + left_offsets[1]),
)
right_slice = left_slice
if num_channels == 2:
right_slice = (
1,
slice(
starting_sample + right_offsets[0], ending_sample + right_offsets[1]
),
)
left_channel = self.audio.raw_samples[left_slice]
right_channel = self.audio.raw_samples[right_slice]
return np.array([left_channel, right_channel])
class TimingList(list):
"""
A list of TimeSlices.
"""
def __init__(self, name, timings, audio, unit='s'):
super(self.__class__, self).__init__()
self.name = name
for (start, duration) in timings:
time_slice = TimeSlice(start, duration, audio, unit=unit)
self.append(time_slice)
| bsd-2-clause |
akionakamura/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
nwiizo/workspace_2017 | ansible-modules-extras/cloud/amazon/cloudtrail.py | 23 | 8625 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: cloudtrail
short_description: manage CloudTrail creation and deletion
description:
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- "Ansible Core Team"
- "Ted Timmons"
requirements:
- "boto >= 2.21"
options:
state:
description:
- add or remove CloudTrail configuration.
required: true
choices: ['enabled', 'disabled']
name:
description:
- name for given CloudTrail configuration.
- This is a primary key and is used to identify the configuration.
s3_bucket_prefix:
description:
- bucket to place CloudTrail in.
- this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- required when state=enabled.
required: false
s3_key_prefix:
description:
- prefix to keys in bucket. A trailing slash is not necessary and will be removed.
required: false
include_global_events:
description:
- record API calls from global services such as IAM and STS?
required: false
default: false
choices: ["true", "false"]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
version_added: "1.5"
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
version_added: "1.5"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
extends_documentation_fragment: aws
"""
EXAMPLES = """
- name: enable cloudtrail
local_action: cloudtrail
state: enabled
name: main
s3_bucket_name: ourbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: enable cloudtrail with different configuration
local_action: cloudtrail
state: enabled
name: main
s3_bucket_name: ourbucket2
s3_key_prefix: ''
region: us-east-1
- name: remove cloudtrail
local_action: cloudtrail
state: disabled
name: main
region: us-east-1
"""
HAS_BOTO = False
try:
import boto
import boto.cloudtrail
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_ec2_creds
class CloudTrailManager:
"""Handles cloudtrail configuration"""
def __init__(self, module, region=None, **aws_connect_params):
self.module = module
self.region = region
self.aws_connect_params = aws_connect_params
self.changed = False
try:
self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
self.module.fail_json(msg=str(e))
def view_status(self, name):
return self.conn.get_trail_status(name)
def view(self, name):
ret = self.conn.describe_trails(trail_name_list=[name])
trailList = ret.get('trailList', [])
if len(trailList) == 1:
return trailList[0]
return None
def exists(self, name=None):
ret = self.view(name)
if ret:
return True
return False
def enable_logging(self, name):
'''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
self.conn.start_logging(name)
def enable(self, **create_args):
return self.conn.create_trail(**create_args)
def update(self, **create_args):
return self.conn.update_trail(**create_args)
def delete(self, name):
'''Delete a given cloudtrial configuration. Throws Exception on error.'''
self.conn.delete_trail(name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['enabled', 'disabled']},
name={'required': True, 'type': 'str'},
s3_bucket_name={'required': False, 'type': 'str'},
s3_key_prefix={'default': '', 'required': False, 'type': 'str'},
include_global_events={'default': True, 'required': False, 'type': 'bool'},
))
required_together = (['state', 's3_bucket_name'])
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
aws_connect_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ct_name = module.params['name']
s3_bucket_name = module.params['s3_bucket_name']
# remove trailing slash from the key prefix, really messes up the key structure.
s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
include_global_events = module.params['include_global_events']
#if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
# module.fail_json(msg="ELBs are required for registration or viewing")
cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
results = { 'changed': False }
if module.params['state'] == 'enabled':
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
results['view'] = cf_man.view(ct_name)
# only update if the values have changed.
if results['view']['S3BucketName'] != s3_bucket_name or \
results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
if not module.check_mode:
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
results['changed'] = True
else:
if not module.check_mode:
# doesn't exist. create it.
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
results['changed'] = True
# given cloudtrail should exist now. Enable the logging.
results['view_status'] = cf_man.view_status(ct_name)
results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
if not results['was_logging_enabled']:
if not module.check_mode:
cf_man.enable_logging(ct_name)
results['logging_enabled'] = True
results['changed'] = True
# delete the cloudtrai
elif module.params['state'] == 'disabled':
# check to see if it exists before deleting.
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
# it exists, so we should delete it and mark changed.
if not module.check_mode:
cf_man.delete(ct_name)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| mit |
flibbertigibbet/open-transit-indicators | python/django/datasources/tasks/__init__.py | 2 | 1091 | """Thin wrappers for celery tasks so that autodiscovery works without having a giant file."""
from datasources.tasks.shapefile import (run_shapefile_to_boundary, run_get_shapefile_fields,
run_load_shapefile_data)
from datasources.tasks.osm import run_osm_import
from datasources.tasks.gtfs import run_validate_gtfs
from datasources.tasks.realtime import run_realtime_import
from transit_indicators.celery_settings import app
@app.task
def validate_gtfs(gtfsfeed_id):
run_validate_gtfs(gtfsfeed_id)
@app.task
def shapefile_to_boundary(boundary_id):
run_shapefile_to_boundary(boundary_id)
@app.task
def get_shapefile_fields(demographicdata_id):
run_get_shapefile_fields(demographicdata_id)
@app.task
def load_shapefile_data(demographicdata_id, pop1_field, pop2_field, dest1_field):
run_load_shapefile_data(demographicdata_id, pop1_field, pop2_field, dest1_field)
@app.task
def import_osm_data(osmdata_id):
run_osm_import(osmdata_id)
@app.task
def import_real_time_data(realtime_id):
run_realtime_import(realtime_id)
| gpl-3.0 |
ikool/metact06 | lib/werkzeug/formparser.py | 295 | 21205 | # -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
from io import BytesIO
from tempfile import TemporaryFile
from itertools import chain, repeat, tee
from functools import update_wrapper
from werkzeug._compat import to_native, text_type
from werkzeug.urls import url_decode_stream
from werkzeug.wsgi import make_line_iter, \
get_input_stream, get_content_length
from werkzeug.datastructures import Headers, FileStorage, MultiDict
from werkzeug.http import parse_options_header
#: an iterator that yields empty strings
_empty_string_iter = repeat('')
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return BytesIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(stream_factory, charset, errors,
max_form_memory_size, max_content_length,
cls, silent).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, 'exhaust', None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(self, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get('CONTENT_TYPE', '')
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype,
content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if self.max_content_length is not None and \
content_length is not None and \
content_length > self.max_content_length:
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype,
content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls)
boundary = options.get('boundary')
if boundary is None:
raise ValueError('Missing boundary')
if isinstance(boundary, text_type):
boundary = boundary.encode('ascii')
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if self.max_form_memory_size is not None and \
content_length is not None and \
content_length > self.max_form_memory_size:
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset,
errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
'multipart/form-data': _parse_multipart,
'application/x-www-form-urlencoded': _parse_urlencoded,
'application/x-url-encoded': _parse_urlencoded
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ['\r\n', b'\r\n']:
return line[:-2], True
elif line[-1:] in ['\r', '\n', b'\r', b'\n']:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = 'begin_form'
_begin_file = 'begin_file'
_cont = 'cont'
_end = 'end'
class MultiPartParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
max_form_memory_size=None, cls=None, buffer_size=64 * 1024):
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b''
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get('content-transfer-encoding')
if transfer_encoding is not None and \
transfer_encoding in _supported_multipart_encodings:
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get('content-type')
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get('charset', self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail('Missing boundary')
if not is_valid_multipart_boundary(boundary):
self.fail('Invalid boundary: %s' % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail('Boundary longer than buffer size')
def parse_lines(self, file, boundary, content_length):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b'--' + boundary
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == b'--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == 'base64':
transfer_encoding = 'base64_codec'
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b'\r\n':
buf = b'\r\n'
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b'', b'\r', b'\n', b'\r\n'):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, b''.join(container).decode(
part_charset, self.errors)))
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2)
form = (p[1] for p in formstream if p[0] == 'form')
files = (p[1] for p in filestream if p[0] == 'file')
return self.cls(form), self.cls(files)
from werkzeug import exceptions
| apache-2.0 |
laborautonomo/poedit | deps/boost/tools/build/v2/test/copy_time.py | 44 | 1949 | #!/usr/bin/python
#
# Copyright (c) 2008 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that the common.copy rule set the modification date of the new file to
# the current time.
import BoostBuild
tester = BoostBuild.Tester(use_test_config=False)
tester.write("test1.cpp", """\
template<bool, int M, class Next>
struct time_waster {
typedef typename time_waster<true, M-1, time_waster>::type type1;
typedef typename time_waster<false, M-1, time_waster>::type type2;
typedef void type;
};
template<bool B, class Next>
struct time_waster<B, 0, Next> {
typedef void type;
};
typedef time_waster<true, 10, void>::type type;
int f() { return 0; }
""")
tester.write("test2.cpp", """\
template<bool, int M, class Next>
struct time_waster {
typedef typename time_waster<true, M-1, time_waster>::type type1;
typedef typename time_waster<false, M-1, time_waster>::type type2;
typedef void type;
};
template<bool B, class Next>
struct time_waster<B, 0, Next> {
typedef void type;
};
typedef time_waster<true, 10, void>::type type;
int g() { return 0; }
""")
tester.write("jamroot.jam", """\
obj test2 : test2.cpp ;
obj test1 : test1.cpp : <dependency>test2 ;
install test2i : test2 : <dependency>test1 ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test2.obj")
tester.expect_addition("bin/$toolset/debug/test1.obj")
tester.expect_addition("test2i/test2.obj")
tester.expect_nothing_more()
test2src = tester.read("test2i/test2.obj")
test2dest = tester.read("bin/$toolset/debug/test2.obj")
if test2src != test2dest:
BoostBuild.annotation("failure", "The object file was not copied "
"correctly")
tester.fail_test(1)
tester.run_build_system(["-d1"])
tester.expect_output_lines("common.copy*", False)
tester.expect_nothing_more()
tester.cleanup()
| mit |
napkindrawing/ansible | lib/ansible/modules/network/cloudengine/ce_reboot.py | 39 | 4217 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_reboot
version_added: 2.4
short_description: Reboot a HUAWEI CloudEngine switches.
description:
- Reboot a HUAWEI CloudEngine switches.
author: Gong Jianjun (@CloudEngine-Ansible)
requirements: ["ncclient"]
options:
confirm:
description:
- Safeguard boolean. Set to true if you're sure you want to reboot.
type: bool
default: false
save_config:
description:
- Flag indicating whether to save the configuration.
required: false
type: bool
default: false
'''
EXAMPLES = '''
- name: reboot module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Reboot the device
ce_reboot:
confirm: true
save_config: true
provider: "{{ cli }}"
'''
RETURN = '''
rebooted:
description: Whether the device was instructed to reboot.
returned: success
type: boolean
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import execute_nc_action, ce_argument_spec
try:
from ncclient.operations.errors import TimeoutExpiredError
HAS_NCCLIENT = True
except ImportError:
HAS_NCCLIENT = False
CE_NC_XML_EXECUTE_REBOOT = """
<action>
<devm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<reboot>
<saveConfig>%s</saveConfig>
</reboot>
</devm>
</action>
"""
class Reboot(object):
""" Reboot a network device """
def __init__(self, **kwargs):
""" __init___ """
self.network_module = None
self.netconf = None
self.init_network_module(**kwargs)
self.confirm = self.network_module.params['confirm']
self.save_config = self.network_module.params['save_config']
def init_network_module(self, **kwargs):
""" init network module """
self.network_module = AnsibleModule(**kwargs)
def netconf_set_action(self, xml_str):
""" netconf execute action """
try:
execute_nc_action(self.network_module, xml_str)
except TimeoutExpiredError:
pass
def work(self):
""" start to work """
if not self.confirm:
self.network_module.fail_json(
msg='Error: Confirm must be set to true for this module to work.')
xml_str = CE_NC_XML_EXECUTE_REBOOT % str(self.save_config).lower()
self.netconf_set_action(xml_str)
def main():
""" main """
argument_spec = dict(
confirm=dict(required=True, type='bool', default='false'),
save_config=dict(required=False, type='bool', default='false')
)
argument_spec.update(ce_argument_spec)
module = Reboot(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_NCCLIENT:
module.network_module.fail_json(msg='Error: The ncclient library is required.')
changed = False
rebooted = False
module.work()
changed = True
rebooted = True
results = dict()
results['changed'] = changed
results['rebooted'] = rebooted
module.network_module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
ovnicraft/odoo | addons/l10n_sg/__openerp__.py | 331 | 2380 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Singapore - Accounting',
'version': '1.0',
'author': 'Tech Receptives',
'website': 'http://www.techreceptives.com',
'category': 'Localization/Account Charts',
'description': """
Singapore accounting chart and localization.
=======================================================
After installing this module, the Configuration wizard for accounting is launched.
* The Chart of Accounts consists of the list of all the general ledger accounts
required to maintain the transactions of Singapore.
* On that particular wizard, you will be asked to pass the name of the company,
the chart template to follow, the no. of digits to generate, the code for your
account and bank account, currency to create journals.
* The Chart of Taxes would display the different types/groups of taxes such as
Standard Rates, Zeroed, Exempted, MES and Out of Scope.
* The tax codes are specified considering the Tax Group and for easy accessibility of
submission of GST Tax Report.
""",
'depends': ['base', 'account', 'account_chart'],
'demo': [ ],
'data': [
'l10n_sg_chart_tax_code.xml',
'l10n_sg_chart.xml',
'l10n_sg_chart_tax.xml',
'l10n_sg_wizard.xml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sYnfo/samba-1 | buildtools/wafsamba/samba_optimisation.py | 12 | 8758 | # This file contains waf optimisations for Samba
# most of these optimisations are possible because of the restricted build environment
# that Samba has. For example, Samba doesn't attempt to cope with Win32 paths during the
# build, and Samba doesn't need build varients
# overall this makes some build tasks quite a bit faster
import os
import Build, Utils, Node
from TaskGen import feature, after, before
import preproc
@feature('cc', 'cxx')
@after('apply_type_vars', 'apply_lib_vars', 'apply_core')
def apply_incpaths(self):
lst = []
try:
kak = self.bld.kak
except AttributeError:
kak = self.bld.kak = {}
# TODO move the uselib processing out of here
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_' + lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or path[0] != '/': # os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH', path)
for path in lst:
node = None
if path[0] == '/': # os.path.isabs(path):
if preproc.go_absolute:
node = self.bld.root.find_dir(path)
elif path[0] == '#':
node = self.bld.srcnode
if len(path) > 1:
try:
node = kak[path]
except KeyError:
kak[path] = node = node.find_dir(path[1:])
else:
try:
node = kak[(self.path.id, path)]
except KeyError:
kak[(self.path.id, path)] = node = self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS', node)
@feature('cc')
@after('apply_incpaths')
def apply_obj_vars_cc(self):
"""after apply_incpaths for INC_PATHS"""
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
lss = env['_CCINCFLAGS']
try:
cac = self.bld.cac
except AttributeError:
cac = self.bld.cac = {}
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
try:
lss.extend(cac[i.id])
except KeyError:
cac[i.id] = [cpppath_st % i.bldpath(env), cpppath_st % i.srcpath(env)]
lss.extend(cac[i.id])
env['_CCINCFLAGS'] = lss
# set the library include paths
for i in env['CPPPATH']:
app('_CCINCFLAGS', cpppath_st % i)
import Node, Environment
def vari(self):
return "default"
Environment.Environment.variant = vari
def variant(self, env):
if not env: return 0
elif self.id & 3 == Node.FILE: return 0
else: return "default"
Node.Node.variant = variant
import TaskGen, Task
def create_task(self, name, src=None, tgt=None):
task = Task.TaskBase.classes[name](self.env, generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
return task
TaskGen.task_gen.create_task = create_task
def hash_constraints(self):
a = self.attr
sum = hash((str(a('before', '')),
str(a('after', '')),
str(a('ext_in', '')),
str(a('ext_out', '')),
self.__class__.maxjobs))
return sum
Task.TaskBase.hash_constraints = hash_constraints
def hash_env_vars(self, env, vars_lst):
idx = str(id(env)) + str(vars_lst)
try:
return self.cache_sig_vars[idx]
except KeyError:
pass
m = Utils.md5()
m.update(''.join([str(env[a]) for a in vars_lst]))
ret = self.cache_sig_vars[idx] = m.digest()
return ret
Build.BuildContext.hash_env_vars = hash_env_vars
def store_fast(self, filename):
file = open(filename, 'wb')
data = self.get_merged_dict()
try:
Build.cPickle.dump(data, file, -1)
finally:
file.close()
Environment.Environment.store_fast = store_fast
def load_fast(self, filename):
file = open(filename, 'rb')
try:
data = Build.cPickle.load(file)
finally:
file.close()
self.table.update(data)
Environment.Environment.load_fast = load_fast
def is_this_a_static_lib(self, name):
try:
cache = self.cache_is_this_a_static_lib
except AttributeError:
cache = self.cache_is_this_a_static_lib = {}
try:
return cache[name]
except KeyError:
ret = cache[name] = 'cstaticlib' in self.bld.name_to_obj(name, self.env).features
return ret
TaskGen.task_gen.is_this_a_static_lib = is_this_a_static_lib
def shared_ancestors(self):
try:
cache = self.cache_is_this_a_static_lib
except AttributeError:
cache = self.cache_is_this_a_static_lib = {}
try:
return cache[id(self)]
except KeyError:
ret = []
if 'cshlib' in self.features: # or 'cprogram' in self.features:
if getattr(self, 'uselib_local', None):
lst = self.to_list(self.uselib_local)
ret = [x for x in lst if not self.is_this_a_static_lib(x)]
cache[id(self)] = ret
return ret
TaskGen.task_gen.shared_ancestors = shared_ancestors
@feature('cc', 'cxx')
@after('apply_link', 'init_cc', 'init_cxx', 'apply_core')
def apply_lib_vars(self):
"""after apply_link because of 'link_task'
after default_cc because of the attribute 'uselib'"""
# after 'apply_core' in case if 'cc' if there is no link
env = self.env
app = env.append_value
seen_libpaths = set([])
# OPTIMIZATION 1: skip uselib variables already added (700ms)
seen_uselib = set([])
# 1. the case of the libs defined in the project (visit ancestors first)
# the ancestors external libraries (uselib) will be prepended
self.uselib = self.to_list(self.uselib)
names = self.to_list(self.uselib_local)
seen = set([])
tmp = Utils.deque(names) # consume a copy of the list of names
while tmp:
lib_name = tmp.popleft()
# visit dependencies only once
if lib_name in seen:
continue
y = self.name_to_obj(lib_name)
if not y:
raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
y.post()
seen.add(lib_name)
# OPTIMIZATION 2: pre-compute ancestors shared libraries (100ms)
tmp.extend(y.shared_ancestors())
# link task and flags
if getattr(y, 'link_task', None):
link_name = y.target[y.target.rfind('/') + 1:]
if 'cstaticlib' in y.features:
app('STATICLIB', link_name)
elif 'cshlib' in y.features or 'cprogram' in y.features:
# WARNING some linkers can link against programs
app('LIB', link_name)
# the order
self.link_task.set_run_after(y.link_task)
# for the recompilation
dep_nodes = getattr(self.link_task, 'dep_nodes', [])
self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
# OPTIMIZATION 3: reduce the amount of function calls
# add the link path too
par = y.link_task.outputs[0].parent
if id(par) not in seen_libpaths:
seen_libpaths.add(id(par))
tmp_path = par.bldpath(self.env)
if not tmp_path in env['LIBPATH']:
env.prepend_value('LIBPATH', tmp_path)
# add ancestors uselib too - but only propagate those that have no staticlib
for v in self.to_list(y.uselib):
if v not in seen_uselib:
seen_uselib.add(v)
if not env['STATICLIB_' + v]:
if not v in self.uselib:
self.uselib.insert(0, v)
# 2. the case of the libs defined outside
for x in self.uselib:
for v in self.p_flag_vars:
val = self.env[v + '_' + x]
if val:
self.env.append_value(v, val)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_lib_vars')
@before('apply_obj_vars')
def samba_before_apply_obj_vars(self):
"""before apply_obj_vars for uselib, this removes the standard pathes"""
def is_standard_libpath(env, path):
for _path in env.STANDARD_LIBPATH:
if _path == os.path.normpath(path):
return True
return False
v = self.env
for i in v['RPATH']:
if is_standard_libpath(v, i):
v['RPATH'].remove(i)
for i in v['LIBPATH']:
if is_standard_libpath(v, i):
v['LIBPATH'].remove(i)
| gpl-3.0 |
skycucumber/restful | python/venv/lib/python2.7/site-packages/wtforms/ext/csrf/session.py | 177 | 2627 | """
A provided CSRF implementation which puts CSRF data in a session.
This can be used fairly comfortably with many `request.session` type
objects, including the Werkzeug/Flask session store, Django sessions, and
potentially other similar objects which use a dict-like API for storing
session keys.
The basic concept is a randomly generated value is stored in the user's
session, and an hmac-sha1 of it (along with an optional expiration time,
for extra security) is used as the value of the csrf_token. If this token
validates with the hmac of the random value + expiration time, and the
expiration time is not passed, the CSRF validation will pass.
"""
from __future__ import unicode_literals
import hmac
import os
from hashlib import sha1
from datetime import datetime, timedelta
from ...validators import ValidationError
from .form import SecureForm
__all__ = ('SessionSecureForm', )
class SessionSecureForm(SecureForm):
TIME_FORMAT = '%Y%m%d%H%M%S'
TIME_LIMIT = timedelta(minutes=30)
SECRET_KEY = None
def generate_csrf_token(self, csrf_context):
if self.SECRET_KEY is None:
raise Exception('must set SECRET_KEY in a subclass of this form for it to work')
if csrf_context is None:
raise TypeError('Must provide a session-like object as csrf context')
session = getattr(csrf_context, 'session', csrf_context)
if 'csrf' not in session:
session['csrf'] = sha1(os.urandom(64)).hexdigest()
self.csrf_token.csrf_key = session['csrf']
if self.TIME_LIMIT:
expires = (datetime.now() + self.TIME_LIMIT).strftime(self.TIME_FORMAT)
csrf_build = '%s%s' % (session['csrf'], expires)
else:
expires = ''
csrf_build = session['csrf']
hmac_csrf = hmac.new(self.SECRET_KEY, csrf_build.encode('utf8'), digestmod=sha1)
return '%s##%s' % (expires, hmac_csrf.hexdigest())
def validate_csrf_token(self, field):
if not field.data or '##' not in field.data:
raise ValidationError(field.gettext('CSRF token missing'))
expires, hmac_csrf = field.data.split('##')
check_val = (field.csrf_key + expires).encode('utf8')
hmac_compare = hmac.new(self.SECRET_KEY, check_val, digestmod=sha1)
if hmac_compare.hexdigest() != hmac_csrf:
raise ValidationError(field.gettext('CSRF failed'))
if self.TIME_LIMIT:
now_formatted = datetime.now().strftime(self.TIME_FORMAT)
if now_formatted > expires:
raise ValidationError(field.gettext('CSRF token expired'))
| gpl-2.0 |
Mattze96/youtube-dl | youtube_dl/extractor/cnet.py | 101 | 3246 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class CNETIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
'info_dict': {
'id': '56f4ea68-bd21-4852-b08c-4de5b8354c60',
'ext': 'flv',
'title': 'Hands-on with Microsoft Windows 8.1 Update',
'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
'thumbnail': 're:^http://.*/flmswindows8.jpg$',
'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
'uploader': 'Sarah Mitroff',
},
'params': {
'skip_download': 'requires rtmpdump',
}
}, {
'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
'info_dict': {
'id': '56527b93-d25d-44e3-b738-f989ce2e49ba',
'ext': 'flv',
'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole',
'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
'uploader': 'Ashley Esqueda',
'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
data_json = self._html_search_regex(
r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
webpage, 'data json')
data = json.loads(data_json)
vdata = data['video']
if not vdata:
vdata = data['videos'][0]
if not vdata:
raise ExtractorError('Cannot find video data')
mpx_account = data['config']['players']['default']['mpx_account']
vid = vdata['files'].get('rtmp', vdata['files']['hds'])
tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
video_id = vdata['id']
title = vdata.get('headline')
if title is None:
title = vdata.get('title')
if title is None:
raise ExtractorError('Cannot find title!')
thumbnail = vdata.get('image', {}).get('path')
author = vdata.get('author')
if author:
uploader = '%s %s' % (author['firstName'], author['lastName'])
uploader_id = author.get('id')
else:
uploader = None
uploader_id = None
return {
'_type': 'url_transparent',
'url': tp_link,
'id': video_id,
'display_id': display_id,
'title': title,
'uploader': uploader,
'uploader_id': uploader_id,
'thumbnail': thumbnail,
}
| unlicense |
NickelMedia/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py | 119 | 9544 | #!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.w3c.test_parser import TestParser
options = {'all': False, 'no_overwrite': False}
class TestParserTest(unittest.TestCase):
def test_analyze_test_reftest_one_match(self):
test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
</head>
"""
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html)
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
def test_analyze_test_reftest_multiple_matches(self):
test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="match" href="orange-box-ref.xht" />
</head>
"""
oc = OutputCapture()
oc.capture_output()
try:
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html)
finally:
_, _, logs = oc.restore_output()
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
def test_analyze_test_reftest_match_and_mismatch(self):
test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="orange-box-notref.xht" />
</head>
"""
oc = OutputCapture()
oc.capture_output()
try:
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html)
finally:
_, _, logs = oc.restore_output()
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
def test_analyze_test_reftest_with_ref_support_Files(self):
""" Tests analyze_test() using a reftest that has refers to a reference file outside of the tests directory and the reference file has paths to other support files """
test_html = """<html>
<head>
<link rel="match" href="../reference/green-box-ref.xht" />
</head>
"""
ref_html = """<head>
<link href="support/css/ref-stylesheet.css" rel="stylesheet" type="text/css">
<style type="text/css">
background-image: url("../../support/some-image.png")
</style>
</head>
<body>
<div><img src="../support/black96x96.png" alt="Image download support must be enabled" /></div>
</body>
</html>
"""
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html, ref_contents=ref_html)
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertTrue('refsupport' in test_info.keys(), 'there should be refsupport files for this test')
self.assertEquals(len(test_info['refsupport']), 3, 'there should be 3 support files in this reference')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
def test_analyze_jstest(self):
""" Tests analyze_test() using a jstest """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html)
self.assertNotEqual(test_info, None, 'test_info is None')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertTrue('jstest' in test_info.keys(), 'test should be a jstest')
def test_analyze_pixel_test_all_true(self):
""" Tests analyze_test() using a test that is neither a reftest or jstest with all=False """
test_html = """<html>
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR" />
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
# Set options to 'all' so this gets found
options['all'] = True
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html)
self.assertNotEqual(test_info, None, 'test_info is None')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not be a jstest')
def test_analyze_pixel_test_all_false(self):
""" Tests analyze_test() using a test that is neither a reftest or jstest, with -all=False """
test_html = """<html>
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR" />
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
# Set all to false so this gets skipped
options['all'] = False
test_path = '/some/madeup/path/'
parser = TestParser(options, test_path + 'somefile.html')
test_info = parser.analyze_test(test_contents=test_html)
self.assertEqual(test_info, None, 'test should have been skipped')
def test_analyze_non_html_file(self):
""" Tests analyze_test() with a file that has no html"""
# FIXME: use a mock filesystem
parser = TestParser(options, os.path.join(os.path.dirname(__file__), 'test_parser.py'))
test_info = parser.analyze_test()
self.assertEqual(test_info, None, 'no tests should have been found in this file')
| bsd-3-clause |
EiSandi/greetingslack | greetingslack/lib/python2.7/site-packages/setuptools/command/install_egg_info.py | 412 | 2203 | from distutils import log, dir_util
import os
from setuptools import Command
from setuptools import namespaces
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(namespaces.Installer, Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = []
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
| mit |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| artistic-2.0 |
syphar/django | django/contrib/contenttypes/admin.py | 114 | 5253 | from __future__ import unicode_literals
from functools import partial
from django.contrib.admin.checks import InlineModelAdminChecks
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.forms import (
BaseGenericInlineFormSet, generic_inlineformset_factory,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.forms import ALL_FIELDS
from django.forms.models import modelform_defines_fields
class GenericInlineModelAdminChecks(InlineModelAdminChecks):
def _check_exclude_of_parent_model(self, obj, parent_model):
# There's no FK to exclude, so no exclusion checks are required.
return []
def _check_relation(self, obj, parent_model):
# There's no FK, but we do need to confirm that the ct_field and ct_fk_field are valid,
# and that they are part of a GenericForeignKey.
gfks = [
f for f in obj.model._meta.private_fields
if isinstance(f, GenericForeignKey)
]
if len(gfks) == 0:
return [
checks.Error(
"'%s.%s' has no GenericForeignKey." % (
obj.model._meta.app_label, obj.model._meta.object_name
),
obj=obj.__class__,
id='admin.E301'
)
]
else:
# Check that the ct_field and ct_fk_fields exist
try:
obj.model._meta.get_field(obj.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_field, obj.model._meta.app_label, obj.model._meta.object_name
),
obj=obj.__class__,
id='admin.E302'
)
]
try:
obj.model._meta.get_field(obj.ct_fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_fk_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_fk_field, obj.model._meta.app_label, obj.model._meta.object_name
),
obj=obj.__class__,
id='admin.E303'
)
]
# There's one or more GenericForeignKeys; make sure that one of them
# uses the right ct_field and ct_fk_field.
for gfk in gfks:
if gfk.ct_field == obj.ct_field and gfk.fk_field == obj.ct_fk_field:
return []
return [
checks.Error(
"'%s.%s' has no GenericForeignKey using content type field '%s' and object ID field '%s'." % (
obj.model._meta.app_label, obj.model._meta.object_name, obj.ct_field, obj.ct_fk_field
),
obj=obj.__class__,
id='admin.E304'
)
]
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
checks_class = GenericInlineModelAdminChecks
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.get_extra(request, obj),
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"min_num": self.get_min_num(request, obj),
"max_num": self.get_max_num(request, obj),
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/Week_3/playground/env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
dwightgunning/django | tests/update/tests.py | 325 | 5121 | from __future__ import unicode_literals
from django.test import TestCase
from .models import A, B, D, Bar, DataPoint, Foo, RelatedPoint
class SimpleTest(TestCase):
def setUp(self):
self.a1 = A.objects.create()
self.a2 = A.objects.create()
for x in range(20):
B.objects.create(a=self.a1)
D.objects.create(a=self.a1)
def test_nonempty_update(self):
"""
Test that update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Test that update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Test that update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Test that update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_foreign_key_update_with_id(self):
"""
Test that update works using <field>_id for foreign keys
"""
num_updated = self.a1.d_set.update(a_id=self.a2)
self.assertEqual(num_updated, 20)
self.assertEqual(self.a2.d_set.count(), 20)
class AdvancedTests(TestCase):
def setUp(self):
self.d0 = DataPoint.objects.create(name="d0", value="apple")
self.d2 = DataPoint.objects.create(name="d2", value="banana")
self.d3 = DataPoint.objects.create(name="d3", value="banana")
self.r1 = RelatedPoint.objects.create(name="r1", data=self.d3)
def test_update(self):
"""
Objects are updated by first filtering the candidates into a queryset
and then calling the update() method. It executes immediately and
returns nothing.
"""
resp = DataPoint.objects.filter(value="apple").update(name="d1")
self.assertEqual(resp, 1)
resp = DataPoint.objects.filter(value="apple")
self.assertEqual(list(resp), [self.d0])
def test_update_multiple_objects(self):
"""
We can update multiple objects at once.
"""
resp = DataPoint.objects.filter(value="banana").update(
value="pineapple")
self.assertEqual(resp, 2)
self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple')
def test_update_fk(self):
"""
Foreign key fields can also be updated, although you can only update
the object referred to, not anything inside the related object.
"""
resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0)
self.assertEqual(resp, 1)
resp = RelatedPoint.objects.filter(data__name="d0")
self.assertEqual(list(resp), [self.r1])
def test_update_multiple_fields(self):
"""
Multiple fields can be updated at once
"""
resp = DataPoint.objects.filter(value="apple").update(
value="fruit", another_value="peach")
self.assertEqual(resp, 1)
d = DataPoint.objects.get(name="d0")
self.assertEqual(d.value, 'fruit')
self.assertEqual(d.another_value, 'peach')
def test_update_all(self):
"""
In the rare case you want to update every instance of a model, update()
is also a manager method.
"""
self.assertEqual(DataPoint.objects.update(value='thing'), 3)
resp = DataPoint.objects.values('value').distinct()
self.assertEqual(list(resp), [{'value': 'thing'}])
def test_update_slice_fail(self):
"""
We do not support update on already sliced query sets.
"""
method = DataPoint.objects.all()[:2].update
self.assertRaises(AssertionError, method,
another_value='another thing')
def test_update_respects_to_field(self):
"""
Update of an FK field which specifies a to_field works.
"""
a_foo = Foo.objects.create(target='aaa')
b_foo = Foo.objects.create(target='bbb')
bar = Bar.objects.create(foo=a_foo)
self.assertEqual(bar.foo_id, a_foo.target)
bar_qs = Bar.objects.filter(pk=bar.pk)
self.assertEqual(bar_qs[0].foo_id, a_foo.target)
bar_qs.update(foo=b_foo)
self.assertEqual(bar_qs[0].foo_id, b_foo.target)
| bsd-3-clause |
BondAnthony/ansible | test/lib/ansible_test/_internal/coverage/combine.py | 18 | 7768 | """Combine code coverage files."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..target import (
walk_compile_targets,
walk_powershell_targets,
)
from ..io import (
read_text_file,
)
from ..util import (
display,
)
from ..util_common import (
ResultType,
write_json_test_results,
)
from . import (
enumerate_python_arcs,
enumerate_powershell_lines,
get_collection_path_regexes,
get_python_coverage_files,
get_python_modules,
get_powershell_coverage_files,
initialize_coverage,
COVERAGE_OUTPUT_FILE_NAME,
COVERAGE_GROUPS,
CoverageConfig,
PathChecker,
)
def command_coverage_combine(args):
"""Patch paths in coverage files and merge into a single file.
:type args: CoverageConfig
:rtype: list[str]
"""
paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args)
for path in paths:
display.info('Generated combined output: %s' % path, verbosity=1)
return paths
def _command_coverage_combine_python(args):
"""
:type args: CoverageConfig
:rtype: list[str]
"""
coverage = initialize_coverage(args)
modules = get_python_modules()
coverage_files = get_python_coverage_files()
counter = 0
sources = _get_coverage_targets(args, walk_compile_targets)
groups = _build_stub_groups(args, sources, lambda line_count: set())
collection_search_re, collection_sub_re = get_collection_path_regexes()
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
path_checker = PathChecker(args, collection_search_re)
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not path_checker.check_path(filename):
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source[0], []) for source in sources))
if not args.explain:
output_file = coverage_file + group
updated.write_file(output_file) # always write files to make sure stale files do not exist
if updated:
# only report files which are non-empty to prevent coverage from reporting errors
output_files.append(output_file)
path_checker.report()
return sorted(output_files)
def _command_coverage_combine_powershell(args):
"""
:type args: CoverageConfig
:rtype: list[str]
"""
coverage_files = get_powershell_coverage_files()
def _default_stub_value(lines):
val = {}
for line in range(lines):
val[line] = 0
return val
counter = 0
sources = _get_coverage_targets(args, walk_powershell_targets)
groups = _build_stub_groups(args, sources, _default_stub_value)
collection_search_re, collection_sub_re = get_collection_path_regexes()
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
if group not in groups:
groups[group] = {}
coverage_data = groups[group]
if filename not in coverage_data:
coverage_data[filename] = {}
file_coverage = coverage_data[filename]
for line_no, hit_count in hits.items():
file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
output_files = []
path_checker = PathChecker(args)
for group in sorted(groups):
coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
if args.all:
# Add 0 line entries for files not in coverage_data
for source, source_line_count in sources:
if source in coverage_data:
continue
coverage_data[source] = _default_stub_value(source_line_count)
if not args.explain:
output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
write_json_test_results(ResultType.COVERAGE, output_file, coverage_data)
output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
path_checker.report()
return sorted(output_files)
def _get_coverage_targets(args, walk_func):
"""
:type args: CoverageConfig
:type walk_func: Func
:rtype: list[tuple[str, int]]
"""
sources = []
if args.all or args.stub:
# excludes symlinks of regular files to avoid reporting on the same file multiple times
# in the future it would be nice to merge any coverage for symlinks into the real files
for target in walk_func(include_symlinks=False):
target_path = os.path.abspath(target.path)
target_lines = len(read_text_file(target_path).splitlines())
sources.append((target_path, target_lines))
sources.sort()
return sources
def _build_stub_groups(args, sources, default_stub_value):
"""
:type args: CoverageConfig
:type sources: List[tuple[str, int]]
:type default_stub_value: Func[int]
:rtype: dict
"""
groups = {}
if args.stub:
stub_group = []
stub_groups = [stub_group]
stub_line_limit = 500000
stub_line_count = 0
for source, source_line_count in sources:
stub_group.append((source, source_line_count))
stub_line_count += source_line_count
if stub_line_count > stub_line_limit:
stub_line_count = 0
stub_group = []
stub_groups.append(stub_group)
for stub_index, stub_group in enumerate(stub_groups):
if not stub_group:
continue
groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count))
for source, line_count in stub_group)
return groups
def get_coverage_group(args, coverage_file):
"""
:type args: CoverageConfig
:type coverage_file: str
:rtype: str
"""
parts = os.path.basename(coverage_file).split('=', 4)
# noinspection PyTypeChecker
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
return group
| gpl-3.0 |
aYukiSekiguchi/ACCESS-Chromium | chrome/common/extensions/docs/examples/apps/hello-python/oauth2/clients/smtp.py | 884 | 1680 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| bsd-3-clause |
mstreatfield/anim-studio-tools | grenade/sources/grenade/translators/version.py | 5 | 2030 | #
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios), its
# affiliates and/or its licensors.
#
from .entity import EntityTranslator
from ..converters.default import convert_asset, convert_link, convert_links, convert_project, convert_user
class VersionTranslator(EntityTranslator):
"""
Version property translator.
Assigning this translator to a Version model will cause inbound property values to be converted.
.. versionadded:: 1.4.0
"""
def __init__(self, session=None):
"""
Setup (register converters, etc) the new translator instance.
:param session:
An active Shotgun session.
.. versionadded:: 1.4.0
"""
EntityTranslator.__init__(self, session)
self.register('entity', convert_link)
self.register('notes', convert_links)
self.register('open_notes', convert_links)
self.register('project', convert_project)
self.register('sg_assets', convert_links)
self.register('sg_assigned_to', convert_user)
self.register('sg_tank_revision', convert_link)
self.register('sg_tasks', convert_links)
self.register('user', convert_user)
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 |
benschulz/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/constants.py | 963 | 87346 | from __future__ import absolute_import, division, unicode_literals
import string
import gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid character in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-solidus-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"adoption-agency-4.4":
_("End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unexpected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
_("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay", "controls")),
"video": frozenset(("autoplay", "controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| mpl-2.0 |
drakuna/odoo | addons/website_blog/tests/common.py | 46 | 1479 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestWebsiteBlogCommon(common.TransactionCase):
def setUp(self):
super(TestWebsiteBlogCommon, self).setUp()
Users = self.env['res.users']
group_blog_manager_id = self.ref('base.group_website_designer')
group_employee_id = self.ref('base.group_user')
group_public_id = self.ref('base.group_public')
self.user_employee = Users.with_context({'no_reset_password': True}).create({
'name': 'Armande Employee',
'login': 'armande',
'alias_name': 'armande',
'email': 'armande.employee@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_employee_id])]
})
self.user_blogmanager = Users.with_context({'no_reset_password': True}).create({
'name': 'Bastien BlogManager',
'login': 'bastien',
'alias_name': 'bastien',
'email': 'bastien.blogmanager@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_blog_manager_id, group_employee_id])]
})
self.user_public = Users.with_context({'no_reset_password': True}).create({
'name': 'Cedric Public',
'login': 'cedric',
'alias_name': 'cedric',
'email': 'cedric.public@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_public_id])]
})
| gpl-3.0 |
bankonme/cjdns | node_build/dependencies/libuv/build/gyp/test/variables/commands/gyptest-commands-repeated.py | 330 | 1313 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands where they are evaluated
more then once..
"""
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands-repeated.gyp.stdout').replace('\r\n', '\n')
test.run_gyp('commands-repeated.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the commands-repeated.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands-repeated.gypd').replace('\r\n', '\n')
expect = test.read('commands-repeated.gypd.golden').replace('\r\n', '\n')
if not test.match(contents, expect):
print "Unexpected contents of `commands-repeated.gypd'"
test.diff(expect, contents, 'commands-repeated.gypd ')
test.fail_test()
test.pass_test()
| gpl-3.0 |
Glottotopia/aagd | moin/local/moin/MoinMoin/support/xappy/schema.py | 2 | 1162 | #!/usr/bin/env python
#
# Copyright (C) 2008 Lemur Consulting Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
r"""schema.py: xdefinitions and implementations of field actions.
"""
__docformat__ = "restructuredtext en"
import errors as _errors
from replaylog import log as _log
import parsedate as _parsedate
class Schema(object):
def __init__(self):
pass
if __name__ == '__main__':
import doctest, sys
doctest.testmod (sys.modules[__name__])
| mit |
ndingwall/scikit-learn | sklearn/datasets/_california_housing.py | 11 | 6174 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/datasets/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from os.path import dirname, exists, join
from os import makedirs, remove
import tarfile
import numpy as np
import logging
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import _pkl_filepath
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils.validation import _deprecate_positional_args
# The original data can be found at:
# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz
ARCHIVE = RemoteFileMetadata(
filename='cal_housing.tgz',
url='https://ndownloader.figshare.com/files/5976036',
checksum=('aaa5c9a6afe2225cc2aed2723682ae40'
'3280c4a3695a2ddda4ffb5d8215ea681'))
logger = logging.getLogger(__name__)
@_deprecate_positional_args
def fetch_california_housing(*, data_home=None, download_if_missing=True,
return_X_y=False, as_frame=False):
"""Load the California housing dataset (regression).
============== ==============
Samples total 20640
Dimensionality 8
Features real
Target real 0.15 - 5.
============== ==============
Read more in the :ref:`User Guide <california_housing_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False.
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
.. versionadded:: 0.23
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray, shape (20640, 8)
Each row corresponding to the 8 feature values in order.
If ``as_frame`` is True, ``data`` is a pandas object.
target : numpy array of shape (20640,)
Each value corresponds to the average
house value in units of 100,000.
If ``as_frame`` is True, ``target`` is a pandas object.
feature_names : list of length 8
Array of ordered feature names used in the dataset.
DESCR : string
Description of the California housing dataset.
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
.. versionadded:: 0.23
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
Notes
-----
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, 'cal_housing.pkz')
if not exists(filepath):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info('Downloading Cal. housing from {} to {}'.format(
ARCHIVE.url, data_home))
archive_path = _fetch_remote(ARCHIVE, dirname=data_home)
with tarfile.open(mode="r:gz", name=archive_path) as f:
cal_housing = np.loadtxt(
f.extractfile('CaliforniaHousing/cal_housing.data'),
delimiter=',')
# Columns are not in the same order compared to the previous
# URL resource on lib.stat.cmu.edu
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
cal_housing = cal_housing[:, columns_index]
joblib.dump(cal_housing, filepath, compress=6)
remove(archive_path)
else:
cal_housing = joblib.load(filepath)
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / households
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'california_housing.rst')) as dfile:
descr = dfile.read()
X = data
y = target
frame = None
target_names = ["MedHouseVal", ]
if as_frame:
frame, X, y = _convert_data_dataframe("fetch_california_housing",
data,
target,
feature_names,
target_names)
if return_X_y:
return X, y
return Bunch(data=X,
target=y,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=descr)
| bsd-3-clause |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/bsddb/test/test_compare.py | 72 | 15142 | """
TestCases for python DB duplicate and Btree key comparison function.
"""
import sys, os, re
import test_all
from cStringIO import StringIO
import unittest
from test_all import db, dbshelve, test_support, \
get_new_environment_path, get_new_database_path
# Needed for python 3. "cmp" vanished in 3.0.1
def cmp(a, b) :
if a==b : return 0
if a<b : return -1
return 1
lexical_cmp = cmp
def lowercase_cmp(left, right) :
return cmp(left.lower(), right.lower())
def make_reverse_comparator(cmp) :
def reverse(left, right, delegate=cmp) :
return - delegate(left, right)
return reverse
_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
class ComparatorTests(unittest.TestCase) :
def comparator_test_helper(self, comparator, expected_data) :
data = expected_data[:]
import sys
if sys.version_info < (2, 6) :
data.sort(cmp=comparator)
else : # Insertion Sort. Please, improve
data2 = []
for i in data :
for j, k in enumerate(data2) :
r = comparator(k, i)
if r == 1 :
data2.insert(j, i)
break
else :
data2.append(i)
data = data2
self.assertEqual(data, expected_data,
"comparator `%s' is not right: %s vs. %s"
% (comparator, expected_data, data))
def test_lexical_comparator(self) :
self.comparator_test_helper(lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_comparator(self) :
rev = _expected_lexical_test_data[:]
rev.reverse()
self.comparator_test_helper(make_reverse_comparator(lexical_cmp),
rev)
def test_lowercase_comparator(self) :
self.comparator_test_helper(lowercase_cmp,
_expected_lowercase_test_data)
class AbstractBtreeKeyCompareTestCase(unittest.TestCase) :
env = None
db = None
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
(sys.version_info < (3, 2))) :
def assertLess(self, a, b, msg=None) :
return self.assertTrue(a<b, msg=msg)
def setUp(self) :
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
env = db.DBEnv()
env.open(self.homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
def tearDown(self) :
self.closeDB()
if self.env is not None:
self.env.close()
self.env = None
test_support.rmtree(self.homeDir)
def addDataToDB(self, data) :
i = 0
for item in data:
self.db.put(item, str(i))
i = i + 1
def createDB(self, key_comparator) :
self.db = db.DB(self.env)
self.setupDB(key_comparator)
self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
def setupDB(self, key_comparator) :
self.db.set_bt_compare(key_comparator)
def closeDB(self) :
if self.db is not None:
self.db.close()
self.db = None
def startTest(self) :
pass
def finishTest(self, expected = None) :
if expected is not None:
self.check_results(expected)
self.closeDB()
def check_results(self, expected) :
curs = self.db.cursor()
try:
index = 0
rec = curs.first()
while rec:
key, ignore = rec
self.assertLess(index, len(expected),
"to many values returned from cursor")
self.assertEqual(expected[index], key,
"expected value `%s' at %d but got `%s'"
% (expected[index], index, key))
index = index + 1
rec = curs.next()
self.assertEqual(index, len(expected),
"not enough values returned from cursor")
finally:
curs.close()
class BtreeKeyCompareTestCase(AbstractBtreeKeyCompareTestCase) :
def runCompareTest(self, comparator, data) :
self.startTest()
self.createDB(comparator)
self.addDataToDB(data)
self.finishTest(data)
def test_lexical_ordering(self) :
self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_ordering(self) :
expected_rev_data = _expected_lexical_test_data[:]
expected_rev_data.reverse()
self.runCompareTest(make_reverse_comparator(lexical_cmp),
expected_rev_data)
def test_compare_function_useless(self) :
self.startTest()
def socialist_comparator(l, r) :
return 0
self.createDB(socialist_comparator)
self.addDataToDB(['b', 'a', 'd'])
# all things being equal the first key will be the only key
# in the database... (with the last key's value fwiw)
self.finishTest(['b'])
class BtreeExceptionsTestCase(AbstractBtreeKeyCompareTestCase) :
def test_raises_non_callable(self) :
self.startTest()
self.assertRaises(TypeError, self.createDB, 'abc')
self.assertRaises(TypeError, self.createDB, None)
self.finishTest()
def test_set_bt_compare_with_function(self) :
self.startTest()
self.createDB(lexical_cmp)
self.finishTest()
def check_results(self, results) :
pass
def test_compare_function_incorrect(self) :
self.startTest()
def bad_comparator(l, r) :
return 1
# verify that set_bt_compare checks that comparator('', '') == 0
self.assertRaises(TypeError, self.createDB, bad_comparator)
self.finishTest()
def verifyStderr(self, method, successRe) :
"""
Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.
"""
stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if not successRe.search(errorOut) :
self.fail("unexpected stderr output:\n"+errorOut)
if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
sys.exc_traceback = sys.last_traceback = None
def _test_compare_function_exception(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_bt_compare test
return 0
raise RuntimeError, "i'm a naughty comparison function"
self.createDB(bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_exception(self) :
self.verifyStderr(
self._test_compare_function_exception,
re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
)
def _test_compare_function_bad_return(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_bt_compare test
return 0
return l
self.createDB(bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_bad_return(self) :
self.verifyStderr(
self._test_compare_function_bad_return,
re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
)
def test_cannot_assign_twice(self) :
def my_compare(a, b) :
return 0
self.startTest()
self.createDB(my_compare)
self.assertRaises(RuntimeError, self.db.set_bt_compare, my_compare)
class AbstractDuplicateCompareTestCase(unittest.TestCase) :
env = None
db = None
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
(sys.version_info < (3, 2))) :
def assertLess(self, a, b, msg=None) :
return self.assertTrue(a<b, msg=msg)
def setUp(self) :
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
env = db.DBEnv()
env.open(self.homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
def tearDown(self) :
self.closeDB()
if self.env is not None:
self.env.close()
self.env = None
test_support.rmtree(self.homeDir)
def addDataToDB(self, data) :
for item in data:
self.db.put("key", item)
def createDB(self, dup_comparator) :
self.db = db.DB(self.env)
self.setupDB(dup_comparator)
self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
def setupDB(self, dup_comparator) :
self.db.set_flags(db.DB_DUPSORT)
self.db.set_dup_compare(dup_comparator)
def closeDB(self) :
if self.db is not None:
self.db.close()
self.db = None
def startTest(self) :
pass
def finishTest(self, expected = None) :
if expected is not None:
self.check_results(expected)
self.closeDB()
def check_results(self, expected) :
curs = self.db.cursor()
try:
index = 0
rec = curs.first()
while rec:
ignore, data = rec
self.assertLess(index, len(expected),
"to many values returned from cursor")
self.assertEqual(expected[index], data,
"expected value `%s' at %d but got `%s'"
% (expected[index], index, data))
index = index + 1
rec = curs.next()
self.assertEqual(index, len(expected),
"not enough values returned from cursor")
finally:
curs.close()
class DuplicateCompareTestCase(AbstractDuplicateCompareTestCase) :
def runCompareTest(self, comparator, data) :
self.startTest()
self.createDB(comparator)
self.addDataToDB(data)
self.finishTest(data)
def test_lexical_ordering(self) :
self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_ordering(self) :
expected_rev_data = _expected_lexical_test_data[:]
expected_rev_data.reverse()
self.runCompareTest(make_reverse_comparator(lexical_cmp),
expected_rev_data)
class DuplicateExceptionsTestCase(AbstractDuplicateCompareTestCase) :
def test_raises_non_callable(self) :
self.startTest()
self.assertRaises(TypeError, self.createDB, 'abc')
self.assertRaises(TypeError, self.createDB, None)
self.finishTest()
def test_set_dup_compare_with_function(self) :
self.startTest()
self.createDB(lexical_cmp)
self.finishTest()
def check_results(self, results) :
pass
def test_compare_function_incorrect(self) :
self.startTest()
def bad_comparator(l, r) :
return 1
# verify that set_dup_compare checks that comparator('', '') == 0
self.assertRaises(TypeError, self.createDB, bad_comparator)
self.finishTest()
def test_compare_function_useless(self) :
self.startTest()
def socialist_comparator(l, r) :
return 0
self.createDB(socialist_comparator)
# DUPSORT does not allow "duplicate duplicates"
self.assertRaises(db.DBKeyExistError, self.addDataToDB, ['b', 'a', 'd'])
self.finishTest()
def verifyStderr(self, method, successRe) :
"""
Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.
"""
stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if not successRe.search(errorOut) :
self.fail("unexpected stderr output:\n"+errorOut)
if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
sys.exc_traceback = sys.last_traceback = None
def _test_compare_function_exception(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_dup_compare test
return 0
raise RuntimeError, "i'm a naughty comparison function"
self.createDB(bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_exception(self) :
self.verifyStderr(
self._test_compare_function_exception,
re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
)
def _test_compare_function_bad_return(self) :
self.startTest()
def bad_comparator(l, r) :
if l == r:
# pass the set_dup_compare test
return 0
return l
self.createDB(bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
self.finishTest()
def test_compare_function_bad_return(self) :
self.verifyStderr(
self._test_compare_function_bad_return,
re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
)
def test_cannot_assign_twice(self) :
def my_compare(a, b) :
return 0
self.startTest()
self.createDB(my_compare)
self.assertRaises(RuntimeError, self.db.set_dup_compare, my_compare)
def test_suite() :
res = unittest.TestSuite()
res.addTest(unittest.makeSuite(ComparatorTests))
res.addTest(unittest.makeSuite(BtreeExceptionsTestCase))
res.addTest(unittest.makeSuite(BtreeKeyCompareTestCase))
res.addTest(unittest.makeSuite(DuplicateExceptionsTestCase))
res.addTest(unittest.makeSuite(DuplicateCompareTestCase))
return res
if __name__ == '__main__':
unittest.main(defaultTest = 'suite')
| mit |
artwr/airflow | tests/contrib/operators/test_sagemaker_tuning_operator.py | 6 | 7482 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
from airflow import configuration
from airflow.contrib.hooks.sagemaker_hook import SageMakerHook
from airflow.contrib.operators.sagemaker_tuning_operator \
import SageMakerTuningOperator
from airflow.exceptions import AirflowException
role = 'arn:aws:iam:role/test-role'
bucket = 'test-bucket'
key = 'test/data'
data_url = 's3://{}/{}'.format(bucket, key)
job_name = 'test-job-name'
image = 'test-image'
output_url = 's3://{}/test/output'.format(bucket)
create_tuning_params = {'HyperParameterTuningJobName': job_name,
'HyperParameterTuningJobConfig': {
'Strategy': 'Bayesian',
'HyperParameterTuningJobObjective': {
'Type': 'Maximize',
'MetricName': 'test_metric'
},
'ResourceLimits': {
'MaxNumberOfTrainingJobs': '123',
'MaxParallelTrainingJobs': '123'
},
'ParameterRanges': {
'IntegerParameterRanges': [
{
'Name': 'k',
'MinValue': '2',
'MaxValue': '10'
},
]
}
},
'TrainingJobDefinition': {
'StaticHyperParameters':
{
'k': '10',
'feature_dim': '784',
'mini_batch_size': '500',
'force_dense': 'True'
},
'AlgorithmSpecification':
{
'TrainingImage': image,
'TrainingInputMode': 'File'
},
'RoleArn': role,
'InputDataConfig':
[
{
'ChannelName': 'train',
'DataSource': {
'S3DataSource': {
'S3DataType': 'S3Prefix',
'S3Uri': data_url,
'S3DataDistributionType':
'FullyReplicated'
}
},
'CompressionType': 'None',
'RecordWrapperType': 'None'
}
],
'OutputDataConfig':
{
'S3OutputPath': output_url
},
'ResourceConfig':
{
'InstanceCount': '2',
'InstanceType': 'ml.c4.8xlarge',
'VolumeSizeInGB': '50'
},
'StoppingCondition': dict(MaxRuntimeInSeconds=60 * 60)
}
}
class TestSageMakerTuningOperator(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
self.sagemaker = SageMakerTuningOperator(
task_id='test_sagemaker_operator',
aws_conn_id='sagemaker_test_conn',
config=create_tuning_params,
wait_for_completion=False,
check_interval=5
)
def test_parse_config_integers(self):
self.sagemaker.parse_config_integers()
self.assertEqual(self.sagemaker.config['TrainingJobDefinition']['ResourceConfig']
['InstanceCount'],
int(self.sagemaker.config['TrainingJobDefinition']['ResourceConfig']
['InstanceCount']))
self.assertEqual(self.sagemaker.config['TrainingJobDefinition']['ResourceConfig']
['VolumeSizeInGB'],
int(self.sagemaker.config['TrainingJobDefinition']['ResourceConfig']
['VolumeSizeInGB']))
self.assertEqual(self.sagemaker.config['HyperParameterTuningJobConfig']['ResourceLimits']
['MaxNumberOfTrainingJobs'],
int(self.sagemaker.config['HyperParameterTuningJobConfig']['ResourceLimits']
['MaxNumberOfTrainingJobs']))
self.assertEqual(self.sagemaker.config['HyperParameterTuningJobConfig']['ResourceLimits']
['MaxParallelTrainingJobs'],
int(self.sagemaker.config['HyperParameterTuningJobConfig']['ResourceLimits']
['MaxParallelTrainingJobs']))
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, 'create_tuning_job')
def test_execute(self, mock_tuning, mock_client):
mock_tuning.return_value = {'TrainingJobArn': 'testarn',
'ResponseMetadata':
{'HTTPStatusCode': 200}}
self.sagemaker.execute(None)
mock_tuning.assert_called_once_with(create_tuning_params,
wait_for_completion=False,
check_interval=5,
max_ingestion_time=None
)
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, 'create_tuning_job')
def test_execute_with_failure(self, mock_tuning, mock_client):
mock_tuning.return_value = {'TrainingJobArn': 'testarn',
'ResponseMetadata':
{'HTTPStatusCode': 404}}
self.assertRaises(AirflowException, self.sagemaker.execute, None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/test/test_weakref.py | 23 | 50681 | import gc
import sys
import unittest
import UserList
import weakref
import operator
import contextlib
import copy
from test import test_support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
def create_unbound_method():
return C.method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __ne__(self, other):
if isinstance(other, Object):
return self.arg != other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
self.check_basic_ref(create_unbound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
self.check_basic_callback(create_unbound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = UserList.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
with test_support.check_py3k_warnings():
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = UserList.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = UserList.UserList(range(10))
p3 = weakref.proxy(L3)
with test_support.check_py3k_warnings():
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __unicode__(self):
return u"unicode"
instance = C()
self.assertIn("__unicode__", dir(weakref.proxy(instance)))
self.assertEqual(unicode(weakref.proxy(instance)), u"unicode")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that both old-style classes and new-style classes
# are weakrefable.
class A(object):
pass
class B:
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
b = weakref.ref(B, l.append)
B = None
gc.collect()
self.assertEqual(b(), None)
self.assertEqual(l, [a, b])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C(object):
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super(MyRef, self).__init__(ob, callback)
def __call__(self):
self.called = True
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(i, o) for i, o in enumerate(items))
# Keep an iterator alive
it = dct.iteritems()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
list(it)
del it
gc.collect()
n2 = len(dct)
# iteration should prevent garbage collection here
# Note that this is a test on an implementation detail. The requirement
# is only to provide stable iteration, not that the size of the container
# stay fixed.
self.assertEqual(n1, 20)
#self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda n, k: (k, n))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda n, k: (n, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.iteritems()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.iterkeyrefs())), len(objects))
for wr in dict.iterkeyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = dict.items()
for item in dict.iteritems():
items.remove(item)
self.assertEqual(len(items), 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
self.assertEqual(len(keys), 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
self.assertEqual(len(keys), 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
self.assertEqual(len(values), 0,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertFalse(k in dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertFalse(k in dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertFalse(k in dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.iteritems())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.iteritems())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o] = o.arg
return dict, objects
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict,
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict,
"original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(d.keys(), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(d.items(), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = d.keys()
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print r() is obj
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print r()
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super(ExtendedRef, self).__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.iteritems():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super(ExtendedRef, self).__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print 'OK'
... else:
... print 'WeakValueDictionary error'
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
test_support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
test_support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
| gpl-2.0 |
michaelBenin/sqlalchemy | lib/sqlalchemy/ext/declarative/clsregistry.py | 80 | 10339 | # ext/declarative/clsregistry.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`.relationship` using strings.
"""
from ...orm.properties import ColumnProperty, RelationshipProperty, \
SynonymProperty
from ...schema import _get_table_key
from ...orm import class_mapper, interfaces
from ... import util
from ... import inspection
from ... import exc
import weakref
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in cls._decl_class_registry:
# class already exists.
existing = cls._decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = \
cls._decl_class_registry[classname] = \
_MultipleClassMarker([cls, existing])
else:
cls._decl_class_registry[classname] = cls
try:
root_module = cls._decl_class_registry['_sa_module_registry']
except KeyError:
cls._decl_class_registry['_sa_module_registry'] = \
root_module = _ModuleMarker('_sa_module_registry', None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set([
weakref.ref(item, self._remove_item) for item in classes])
_registries.add(self)
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
"Multiple classes found for path \"%s\" "
"in the registry of this declarative "
"base. Please use a fully module-qualified path." %
(".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.remove(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
modules = set([cls().__module__ for cls in self.contents])
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table." % (
item.__module__,
item.__name__
)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
""""refers to a module name within
_decl_class_registry.
"""
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = \
_MultipleClassMarker([cls],
on_remove=lambda: self._remove_item(name))
class _ModNS(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError("Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key))
class _GetColumns(object):
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise exc.InvalidRequestError(
"Class %r does not have a mapped column named %r"
% (self.cls, key))
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls))
class _GetTable(object):
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[
_get_table_key(key, self.key)
]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
class _class_resolver(object):
def __init__(self, cls, prop, fallback, arg):
self.cls = cls
self.prop = prop
self.arg = self._declarative_arg = arg
self.fallback = fallback
self._dict = util.PopulateDict(self._access_cls)
self._resolvers = ()
def _access_cls(self, key):
cls = self.cls
if key in cls._decl_class_registry:
return _determine_container(key, cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
elif '_sa_module_registry' in cls._decl_class_registry and \
key in cls._decl_class_registry['_sa_module_registry']:
registry = cls._decl_class_registry['_sa_module_registry']
return registry.resolve_attr(key)
elif self._resolvers:
for resolv in self._resolvers:
value = resolv(key)
if value is not None:
return value
return self.fallback[key]
def __call__(self):
try:
x = eval(self.arg, globals(), self._dict)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
raise exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
(self.prop.parent, self.arg, n.args[0], self.cls)
)
def _resolver(cls, prop):
import sqlalchemy
from sqlalchemy.orm import foreign, remote
fallback = sqlalchemy.__dict__.copy()
fallback.update({'foreign': foreign, 'remote': remote})
def resolve_arg(arg):
return _class_resolver(cls, prop, fallback, arg)
return resolve_arg
def _deferred_relationship(cls, prop):
if isinstance(prop, RelationshipProperty):
resolve_arg = _resolver(cls, prop)
for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side'):
v = getattr(prop, attr)
if isinstance(v, util.string_types):
setattr(prop, attr, resolve_arg(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in ('primaryjoin', 'secondaryjoin', 'secondary',
'foreign_keys', 'remote_side', 'order_by'):
if attr in kwargs and isinstance(kwargs[attr], str):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.