repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
sencha/chromium-spacewalk | tools/perf/page_sets/gmail_alt_threadlist_conversation.py | 33 | 2558 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
def _CreateXpathFunction(xpath):
return ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue' % re.escape(xpath))
def _GetCurrentLocation(action_runner):
return action_runner.EvaluateJavaScript('document.location.href')
def _WaitForLocationChange(action_runner, old_href):
action_runner.WaitForJavaScriptCondition(
'document.location.href != "%s"' % old_href)
class GmailAltThreadlistConversationPage(
page_module.Page):
""" Why: Alternate between Inbox and the first email conversation. """
def __init__(self, page_set):
super(GmailAltThreadlistConversationPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
name='gmail_alt_threadlist_conversation')
self.credentials_path = 'data/credentials.json'
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/gmail_alt_threadlist_conversation.json'
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined && '
'document.getElementById("gb") !== null')
def RunEndure(self, action_runner):
old_href = _GetCurrentLocation(action_runner)
action_runner.ClickElement(
element_function=_CreateXpathFunction('//span[@email]'))
_WaitForLocationChange(action_runner, old_href)
action_runner.Wait(1)
old_href = _GetCurrentLocation(action_runner)
action_runner.ClickElement(
'a[href="https://mail.google.com/mail/u/0/?shva=1#inbox"]')
_WaitForLocationChange(action_runner, old_href)
action_runner.Wait(1)
class GmailAltThreadlistConversationPageSet(page_set_module.PageSet):
""" Chrome Endure test for GMail. """
def __init__(self):
super(GmailAltThreadlistConversationPageSet, self).__init__(
credentials_path='data/credentials.json',
user_agent_type='desktop',
archive_data_file='data/gmail_alt_threadlist_conversation.json',
bucket=page_set_module.PUBLIC_BUCKET)
self.AddPage(GmailAltThreadlistConversationPage(self))
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/numpy/lib/tests/test_type_check.py | 103 | 10247 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
)
def assert_all(x):
assert_(np.all(x), x)
class TestCommonType(TestCase):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
class TestMintypecode(TestCase):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar(TestCase):
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
assert_(np.isscalar(long(10)))
assert_(np.isscalar(4.0))
class TestReal(TestCase):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
class TestImag(TestCase):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
class TestIscomplex(TestCase):
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.sometrue(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal(TestCase):
def test_pass(self):
z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
class TestIsrealobj(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(TestCase):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite(TestCase):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf(TestCase):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isinf(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], np.int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
class TestRealIfClose(TestCase):
def test_basic(self):
a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(TestCase):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
assert_(np.issubdtype(a.dtype, np.float))
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
cryptickp/heat | heat_integrationtests/functional/test_default_parameters.py | 8 | 3035 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from heat_integrationtests.functional import functional_base
class DefaultParametersTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2013-05-23
parameters:
length:
type: string
default: 40
resources:
random1:
type: nested_random.yaml
random2:
type: OS::Heat::RandomString
properties:
length: {get_param: length}
outputs:
random1:
value: {get_attr: [random1, random1_value]}
random2:
value: {get_resource: random2}
'''
nested_template = '''
heat_template_version: 2013-05-23
parameters:
length:
type: string
default: 50
resources:
random1:
type: OS::Heat::RandomString
properties:
length: {get_param: length}
outputs:
random1_value:
value: {get_resource: random1}
'''
scenarios = [
('none', dict(param=None, default=None, temp_def=True,
expect1=50, expect2=40)),
('default', dict(param=None, default=12, temp_def=True,
expect1=12, expect2=12)),
('both', dict(param=15, default=12, temp_def=True,
expect1=12, expect2=15)),
('no_temp_default', dict(param=None, default=12, temp_def=False,
expect1=12, expect2=12)),
]
def setUp(self):
super(DefaultParametersTest, self).setUp()
def test_defaults(self):
env = {'parameters': {}, 'parameter_defaults': {}}
if self.param:
env['parameters'] = {'length': self.param}
if self.default:
env['parameter_defaults'] = {'length': self.default}
if not self.temp_def:
# remove the default from the parameter in the nested template.
ntempl = yaml.load(self.nested_template)
del ntempl['parameters']['length']['default']
nested_template = yaml.dump(ntempl)
else:
nested_template = self.nested_template
stack_identifier = self.stack_create(
template=self.template,
files={'nested_random.yaml': nested_template},
environment=env
)
stack = self.client.stacks.get(stack_identifier)
for out in stack.outputs:
if out['output_key'] == 'random1':
self.assertEqual(self.expect1, len(out['output_value']))
if out['output_key'] == 'random2':
self.assertEqual(self.expect2, len(out['output_value']))
| apache-2.0 |
jch1/models | object_detection/anchor_generators/grid_anchor_generator_test.py | 21 | 2950 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.grid_anchor_generator."""
import tensorflow as tf
from object_detection.anchor_generators import grid_anchor_generator
class GridAnchorGeneratorTest(tf.test.TestCase):
def test_construct_single_anchor(self):
"""Builds a 1x1 anchor grid to test the size of the output boxes."""
scales = [0.5, 1.0, 2.0]
aspect_ratios = [0.25, 1.0, 4.0]
anchor_offset = [7, -3]
exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],
[-505, -131, 519, 125], [-57, -67, 71, 61],
[-121, -131, 135, 125], [-249, -259, 263, 253],
[-25, -131, 39, 125], [-57, -259, 71, 253],
[-121, -515, 135, 509]]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales, aspect_ratios,
anchor_offset=anchor_offset)
anchors = anchor_generator.generate(feature_map_shape_list=[(1, 1)])
anchor_corners = anchors.get()
with self.test_session():
anchor_corners_out = anchor_corners.eval()
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid(self):
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors = anchor_generator.generate(feature_map_shape_list=[(2, 2)])
anchor_corners = anchors.get()
with self.test_session():
anchor_corners_out = anchor_corners.eval()
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
oe-alliance/oe-alliance-enigma2 | lib/python/Screens/Timershift.py | 16 | 11636 | from Screens.Screen import Screen
from Screens.Setup import setupdom
from Screens.LocationBox import TimeshiftLocationBox
from Screens.MessageBox import MessageBox
from Components.Label import Label
from Components.config import config, configfile, ConfigYesNo, ConfigNothing, ConfigSelection, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.Pixmap import Pixmap
from Tools.Directories import fileExists
from Components.Sources.Boolean import Boolean
from Components.Sources.StaticText import StaticText
from Components.SystemInfo import SystemInfo
class SetupSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["SetupTitle"] = StaticText(_(parent.setup_title))
self["SetupEntry"] = StaticText("")
self["SetupValue"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
self.parent["config"].onSelectionChanged.remove(self.selectionChanged)
def selectionChanged(self):
self["SetupEntry"].text = self.parent.getCurrentEntry()
self["SetupValue"].text = self.parent.getCurrentValue()
if hasattr(self.parent,"getCurrentDescription"):
self.parent["description"].text = self.parent.getCurrentDescription()
class TimeshiftSettings(Screen,ConfigListScreen):
def removeNotifier(self):
config.usage.setup_level.notifiers.remove(self.levelChanged)
def levelChanged(self, configElement):
list = []
self.refill(list)
self["config"].setList(list)
def refill(self, list):
xmldata = setupdom().getroot()
for x in xmldata.findall("setup"):
if x.get("key") != self.setup:
continue
self.addItems(list, x)
self.setup_title = x.get("title", "").encode("UTF-8")
self.seperation = int(x.get('separation', '0'))
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = "Setup"
self['footnote'] = Label()
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["description"] = Label(_(""))
self.onChangedEntry = [ ]
self.setup = "timeshift"
list = []
ConfigListScreen.__init__(self, list, session = session, on_change = self.changedEntry)
self.createSetup()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"green": self.keySave,
"red": self.keyCancel,
"cancel": self.keyCancel,
"ok": self.ok,
"menu": self.closeRecursive,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
# for summary:
def changedEntry(self):
self.item = self["config"].getCurrent()
if self["config"].getCurrent()[0] == _("Timeshift location"):
self.checkReadWriteDir(self["config"].getCurrent()[1])
for x in self.onChangedEntry:
x()
try:
if isinstance(self["config"].getCurrent()[1], ConfigYesNo) or isinstance(self["config"].getCurrent()[1], ConfigSelection):
self.createSetup()
except:
pass
def getCurrentEntry(self):
return self["config"].getCurrent() and self["config"].getCurrent()[0] or ""
def getCurrentValue(self):
return self["config"].getCurrent() and str(self["config"].getCurrent()[1].getText()) or ""
def getCurrentDescription(self):
return self["config"].getCurrent() and len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2] or ""
def checkReadWriteDir(self, configele):
import os.path
import Components.Harddisk
supported_filesystems = frozenset(('ext4', 'ext3', 'ext2', 'nfs'))
candidates = []
mounts = Components.Harddisk.getProcMounts()
for partition in Components.Harddisk.harddiskmanager.getMountedPartitions(False, mounts):
if partition.filesystem(mounts) in supported_filesystems:
candidates.append((partition.description, partition.mountpoint))
if candidates:
locations = []
for validdevice in candidates:
locations.append(validdevice[1])
if Components.Harddisk.findMountPoint(os.path.realpath(configele.value))+'/' in locations or Components.Harddisk.findMountPoint(os.path.realpath(configele.value)) in locations:
if fileExists(configele.value, "w"):
configele.last_value = configele.value
return True
else:
dir = configele.value
configele.value = configele.last_value
self.session.open(
MessageBox,
_("The directory %s is not writable.\nMake sure you select a writable directory instead.")%dir,
type = MessageBox.TYPE_ERROR
)
return False
else:
dir = configele.value
configele.value = configele.last_value
self.session.open(
MessageBox,
_("The directory %s is not a EXT2, EXT3, EXT4 or NFS partition.\nMake sure you select a valid partition type.")%dir,
type = MessageBox.TYPE_ERROR
)
return False
else:
dir = configele.value
configele.value = configele.last_value
self.session.open(
MessageBox,
_("The directory %s is not a EXT2, EXT3, EXT4 or NFS partition.\nMake sure you select a valid partition type.")%dir,
type = MessageBox.TYPE_ERROR
)
return False
def createSetup(self):
default = config.usage.timeshift_path.value
tmp = config.usage.allowed_timeshift_paths.value
if default not in tmp:
tmp = tmp[:]
tmp.append(default)
# print "TimeshiftPath: ", default, tmp
self.timeshift_dirname = ConfigSelection(default = default, choices = tmp)
self.timeshift_dirname.addNotifier(self.checkReadWriteDir, initial_call=False, immediate_feedback=False)
list = []
self.timeshift_entry = getConfigListEntry(_("Timeshift location"), self.timeshift_dirname, _("Set the default location for your timeshift-files. Press 'OK' to add new locations, select left/right to select an existing location."))
list.append(self.timeshift_entry)
self.refill(list)
self["config"].setList(list)
if config.usage.sort_settings.value:
self["config"].list.sort()
def layoutFinished(self):
self.setTitle(_(self.setup_title))
def ok(self):
currentry = self["config"].getCurrent()
self.lastvideodirs = config.movielist.videodirs.value
self.lasttimeshiftdirs = config.usage.allowed_timeshift_paths.value
if currentry == self.timeshift_entry:
self.entrydirname = self.timeshift_dirname
config.usage.timeshift_path.value = self.timeshift_dirname.value
self.session.openWithCallback(
self.dirnameSelected,
TimeshiftLocationBox
)
def dirnameSelected(self, res):
if res is not None:
import os.path
import Components.Harddisk
supported_filesystems = frozenset(('ext4', 'ext3', 'ext2', 'nfs'))
candidates = []
mounts = Components.Harddisk.getProcMounts()
for partition in Components.Harddisk.harddiskmanager.getMountedPartitions(False, mounts):
if partition.filesystem(mounts) in supported_filesystems:
candidates.append((partition.description, partition.mountpoint))
if candidates:
locations = []
for validdevice in candidates:
locations.append(validdevice[1])
if Components.Harddisk.findMountPoint(os.path.realpath(res))+'/' in locations or Components.Harddisk.findMountPoint(os.path.realpath(res)) in locations:
self.entrydirname.value = res
if config.usage.allowed_timeshift_paths.value != self.lasttimeshiftdirs:
tmp = config.usage.allowed_timeshift_paths.value
default = self.timeshift_dirname.value
if default not in tmp:
tmp = tmp[:]
tmp.append(default)
self.timeshift_dirname.setChoices(tmp, default=default)
self.entrydirname.value = res
else:
self.session.open(
MessageBox,
_("The directory %s is not a EXT2, EXT3, EXT4 or NFS partition.\nMake sure you select a valid partition type.")%res,
type = MessageBox.TYPE_ERROR
)
else:
self.session.open(
MessageBox,
_("The directory %s is not a EXT2, EXT3, EXT4 or NFS partition.\nMake sure you select a valid partition type.")%res,
type = MessageBox.TYPE_ERROR
)
def saveAll(self):
for x in self["config"].list:
x[1].save()
configfile.save()
# keySave and keyCancel are just provided in case you need them.
# you have to call them by yourself.
def keySave(self):
import os.path
import Components.Harddisk
supported_filesystems = frozenset(('ext4', 'ext3', 'ext2', 'nfs'))
candidates = []
mounts = Components.Harddisk.getProcMounts()
for partition in Components.Harddisk.harddiskmanager.getMountedPartitions(False, mounts):
if partition.filesystem(mounts) in supported_filesystems:
candidates.append((partition.description, partition.mountpoint))
if candidates:
locations = []
for validdevice in candidates:
locations.append(validdevice[1])
if Components.Harddisk.findMountPoint(os.path.realpath(config.usage.timeshift_path.value))+'/' in locations or Components.Harddisk.findMountPoint(os.path.realpath(config.usage.timeshift_path.value)) in locations:
config.usage.timeshift_path.value = self.timeshift_dirname.value
config.usage.timeshift_path.save()
self.saveAll()
self.close()
else:
if int(config.timeshift.startdelay.value) > 0:
self.session.open(
MessageBox,
_("The directory %s is not a EXT2, EXT3, EXT4 or NFS partition.\nMake sure you select a valid partition type.")%config.usage.timeshift_path.value,
type = MessageBox.TYPE_ERROR
)
else:
config.timeshift.startdelay.setValue(0)
self.saveAll()
self.close()
else:
if int(config.timeshift.startdelay.value) > 0:
self.session.open(
MessageBox,
_("The directory %s is not a EXT2, EXT3, EXT4 or NFS partition.\nMake sure you select a valid partition type.")%config.usage.timeshift_path.value,
type = MessageBox.TYPE_ERROR
)
else:
config.timeshift.startdelay.setValue(0)
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), default = False)
else:
self.close()
def createSummary(self):
return SetupSummary
def addItems(self, list, parentNode):
for x in parentNode:
if not x.tag:
continue
if x.tag == 'item':
item_level = int(x.get("level", 0))
if not self.levelChanged in config.usage.setup_level.notifiers:
config.usage.setup_level.notifiers.append(self.levelChanged)
self.onClose.append(self.removeNotifier)
if item_level > config.usage.setup_level.index:
continue
requires = x.get("requires")
if requires and requires.startswith('config.'):
item = eval(requires or "")
if item.value and not item.value == "0":
SystemInfo[requires] = True
else:
SystemInfo[requires] = False
if requires and not SystemInfo.get(requires, False):
continue
item_text = _(x.get("text", "??").encode("UTF-8"))
item_description = _(x.get("description", " ").encode("UTF-8"))
b = eval(x.text or "")
if b == "":
continue
#add to configlist
item = b
# the first b is the item itself, ignored by the configList.
# the second one is converted to string.
if not isinstance(item, ConfigNothing):
list.append((item_text, item, item_description))
| gpl-2.0 |
HyperBaton/ansible | lib/ansible/plugins/action/fail.py | 122 | 1477 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('msg',))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
result['failed'] = True
result['msg'] = msg
return result
| gpl-3.0 |
nimisha-srinivasa/RTP | src/phase2/gen_representative_index/gen_super.py | 1 | 2108 | #!/usr/bin/python3
#this python script generates the super_index
import pickle
import sys
path_to_target_dir = "./target/"
path_to_utils_dir = "./src/utils/"
## change your whole dataset path here
path_data = sys.argv[1]
# create dictionary 'dic' for vdrelation
dic = {}
dv_dic = {}
with open(path_to_target_dir + 'convert_table.txt', 'r') as fin_vd:
for line_dv in fin_vd:
line_dv_list = line_dv.split()
did = int(line_dv_list[0])
dv_dic[did] = int(line_dv_list[1])
for i in range(2, len(line_dv_list)):
dic[int(line_dv_list[i])] = did
# read stop words
stop_words = set()
with open(path_to_utils_dir + 'stop_words.txt', 'r') as f:
for line in f:
stop_words.add(line.strip())
# create a hash for each did a hash of word => frequency
print('Creating word unions ...')
# dict_d_tf: a dictionary, key is did, value is a dictionary dict_tf;
# dict_tf: a dictionary, key is word, value is frequency
dict_d_tf = {}
with open(path_data, 'r') as fin:
vid = 0
for line_data in fin:
did = dic.get(vid)
if did is None:
print("version {} not found!".format(str(vid)))
continue
assert isinstance(line_data, str)
words = line_data.split()
dict_tf = dict_d_tf.setdefault(did, {})
for word in words:
dict_tf[word] = dict_tf.get(word, 0) + 1
if vid > 0 and vid % 10000 == 0:
print(vid, 'lines processed.')
vid = vid + 1
index = {}
for doc_id in dict_d_tf:
for word in dict_d_tf[doc_id]:
if word in index:
index[word].append(doc_id)
else:
index[word] = []
f = open(path_to_target_dir + 'super_index','w')
print('Writing super-version ...')
for word in index:
if len(word) > 0 and len(index[word]) > 0 and word not in stop_words:
tf_list = ['{did}:{tf:.2f}'.format(did=doc_id, tf=dict_d_tf[doc_id][word]/float(dv_dic.get(doc_id, 1))) for doc_id in index[word] if word not in stop_words]
f.write('{w}\t{tf}'.format(w=word, tf=' '.join(tf_list)))
f.write('\n') | apache-2.0 |
ivan73/smarthome | plugins/dlms/__init__.py | 4 | 7029 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2013 KNX-User-Forum e.V. http://knx-user-forum.de/
#########################################################################
# DLMS plugin for SmartHome.py. http://mknx.github.io/smarthome/
#
# This plugin is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This plugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this plugin. If not, see <http://www.gnu.org/licenses/>.
#########################################################################
import logging
import time
import serial
import re
logger = logging.getLogger('DLMS')
class DLMS():
def __init__(self, smarthome, serialport, baudrate="auto", update_cycle="60", use_checksum = True, reset_baudrate = True, no_waiting = False):
self._sh = smarthome
self._obis_codes = {}
self._init_seq = bytes('/?!\r\n', 'ascii')
self._request = bytearray('\x06000\r\n', 'ascii')
if (baudrate.lower() == 'auto'):
self._baudrate = -1
else:
self._baudrate = int(baudrate)
pow2 = int(self._baudrate / 600)
self._request[2] = 0x30
while (pow2 > 0):
pow2 >>= 1
self._request[2] += 1
self._update_cycle = int(update_cycle)
self._use_checksum = smarthome.string2bool(use_checksum)
self._reset_baudrate = smarthome.string2bool(reset_baudrate)
self._no_waiting = smarthome.string2bool(no_waiting)
self._serial = serial.Serial(serialport, 300, bytesize=serial.SEVENBITS, parity=serial.PARITY_EVEN, timeout=2)
def run(self):
self.alive = True
self._sh.scheduler.add('DLMS', self._update_values, prio=5, cycle=self._update_cycle)
def stop(self):
self.alive = False
self._serial.close()
self._sh.scheduler.remove('DLMS')
def _update_values(self):
logger.debug("dlms: update")
start = time.time()
try:
if self._reset_baudrate:
self._serial.baudrate = 300
logger.debug("dlms: (re)set baudrate to 300 Baud")
self._serial.write(self._init_seq)
self._serial.drainOutput()
self._serial.flushInput()
response = bytes()
prev_length = 0
while self.alive:
response += self._serial.read()
length = len(response)
# break if timeout or newline-character
if (response[-1] == 0x0a):
break
if (length == prev_length):
logger.warning("dlms: read timeout! - response={}".format(response))
return
prev_length = length
except Exception as e:
logger.warning("dlms: {0}".format(e))
#logger.warning("dlms: response={}".format(response))
if (len(response) < 5) or ((response[4] - 0x30) not in range(6)):
logger.warning("dlms: malformed response to init seq={}".format(response))
return
if (self._baudrate == -1):
self._baudrate = 300 * (1 << (response[4] - 0x30))
logger.debug("dlms: meter returned capability for {} Baud".format(self._baudrate))
self._request[2] = response[4]
try:
if not self._no_waiting:
time.sleep(0.5)
self._serial.write(self._request)
if not self._no_waiting:
time.sleep(0.25)
self._serial.drainOutput()
self._serial.flushInput()
if (self._baudrate != self._serial.baudrate):
# change request to set higher baudrate
logger.debug("dlms: switching to {} Baud".format(self._baudrate))
self._serial.baudrate = self._baudrate
response = bytes()
prev_length = 0
while self.alive:
response += self._serial.read()
length = len(response)
if (not self._use_checksum and (response[-1] == 0x03)) or ((length > 1) and (response[-2] == 0x03)):
break
if (length == prev_length):
logger.warning("dlms: read timeout! - response={}".format(response))
return
prev_length = length
except Exception as e:
logger.warning("dlms: {0}".format(e))
return
logger.debug("dlms: reading took: {:.2f}s".format(time.time() - start))
if self._use_checksum:
# perform checks (start with STX, end with ETX, checksum match)
checksum = 0
for i in response[1:]:
checksum ^= i
if (len(response) < 5) or (response[0] != 0x02) or (response[-2] != 0x03) or (checksum != 0x00):
logger.warning("dlms: checksum/protocol error: response={} checksum={}".format(' '.join(hex(i) for i in response), checksum))
return
#print(str(response[1:-4], 'ascii'))
for line in re.split('\r\n', str(response[1:-4], 'ascii')):
# if re.match('[0-9]+\.[0-9]\.[0-9](.+)', line): # allows only
# x.y.z(foo)
if re.match('[0-9]+\.[0-9].+(.+)', line): # allows also x.y(foo)
try:
#data = re.split('[(*)]', line)
data = line.split('(')
data[1:3] = data[1].strip(')').split('*')
if (len(data) == 2):
logger.debug("dlms: {} = {}".format(data[0], data[1]))
else:
logger.debug("dlms: {} = {} {}".format(data[0], data[1], data[2]))
if data[0] in self._obis_codes:
for item in self._obis_codes[data[0]]['items']:
item(data[1], 'DLMS', 'OBIS {}'.format(data[0]))
except Exception as e:
logger.warning("dlms: line={} exception={}".format(line, e))
def parse_item(self, item):
if 'dlms_obis_code' in item.conf:
logger.debug("parse item: {0}".format(item))
obis_code = item.conf['dlms_obis_code']
if not obis_code in self._obis_codes:
self._obis_codes[obis_code] = {'items': [item], 'logics': []}
else:
self._obis_codes[obis_code]['items'].append(item)
return None
| gpl-3.0 |
ocefpaf/iris | lib/iris/fileformats/um_cf_map.py | 5 | 106777 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
#
# DO NOT EDIT: AUTO-GENERATED
# Created on 29 November 2019 14:11 from
# http://www.metarelate.net/metOcean
# at commit 448f2ef5e676edaaa27408b9f3ddbecbf05e3289
#
# https://github.com/metarelate/metOcean/commit/448f2ef5e676edaaa27408b9f3ddbecbf05e3289
"""
Provides UM/CF phenomenon translations.
"""
from collections import namedtuple
CFName = namedtuple('CFName', 'standard_name long_name units')
LBFC_TO_CF = {
5: CFName('atmosphere_boundary_layer_thickness', None, 'm'),
16: CFName('air_temperature', None, 'K'),
23: CFName('soil_temperature', None, 'K'),
27: CFName('air_density', None, 'kg m-3'),
36: CFName('land_area_fraction', None, '1'),
37: CFName('sea_ice_area_fraction', None, '1'),
50: CFName('wind_speed', None, 'm s-1'),
56: CFName('x_wind', None, 'm s-1'),
57: CFName('y_wind', None, 'm s-1'),
73: CFName('atmosphere_relative_vorticity', None, 's-1'),
74: CFName('divergence_of_wind', None, 's-1'),
83: CFName('potential_vorticity_of_atmosphere_layer', None, 'Pa-1 s-1'),
94: CFName('convective_rainfall_amount', None, 'kg m-2'),
97: CFName('rainfall_flux', None, 'kg m-2 s-1'),
102: CFName('stratiform_rainfall_amount', None, 'kg m-2'),
108: CFName('snowfall_flux', None, 'kg m-2 s-1'),
111: CFName('surface_runoff_amount', None, 'kg m-2'),
116: CFName('stratiform_snowfall_amount', None, 'kg m-2'),
117: CFName('convective_snowfall_amount', None, 'kg m-2'),
122: CFName('moisture_content_of_soil_layer', None, 'kg m-2'),
183: CFName('wind_speed', None, 'm s-1'),
200: CFName('toa_incoming_shortwave_flux', None, 'W m-2'),
203: CFName('surface_downwelling_shortwave_flux_in_air', None, 'W m-2'),
206: CFName('toa_outgoing_longwave_flux', None, 'W m-2'),
208: CFName('surface_downwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
209: CFName('sea_ice_temperature', None, 'K'),
253: CFName('tendency_of_air_temperature_due_to_longwave_heating', None, 'K s-1'),
261: CFName('downward_heat_flux_in_sea_ice', None, 'W m-2'),
321: CFName('root_depth', None, 'm'),
326: CFName('vegetation_area_fraction', None, '1'),
328: CFName('surface_albedo_assuming_deep_snow', None, '1'),
329: CFName('volume_fraction_of_condensed_water_in_soil_at_wilting_point', None, '1'),
330: CFName('volume_fraction_of_condensed_water_in_soil_at_critical_point', None, '1'),
332: CFName('soil_porosity', None, '1'),
333: CFName('soil_hydraulic_conductivity_at_saturation', None, 'm s-1'),
335: CFName('soil_thermal_capacity', None, 'J kg-1 K-1'),
336: CFName('soil_thermal_conductivity', None, 'W m-1 K-1'),
342: CFName('soil_suction_at_saturation', None, 'Pa'),
687: CFName('sea_ice_thickness', None, 'm'),
701: CFName('surface_eastward_sea_water_velocity', None, 'm s-1'),
702: CFName('surface_northward_sea_water_velocity', None, 'm s-1'),
1025: CFName('surface_downward_eastward_stress', None, 'Pa'),
1026: CFName('surface_downward_northward_stress', None, 'Pa'),
1373: CFName('mass_fraction_of_dimethyl_sulfide_in_air', None, '1'),
1374: CFName('mass_fraction_of_sulfur_dioxide_in_air', None, '1'),
1382: CFName('leaf_area_index', None, '1'),
1383: CFName('canopy_height', None, 'm'),
1385: CFName('mass_fraction_of_unfrozen_water_in_soil_moisture', None, '1'),
1386: CFName('mass_fraction_of_frozen_water_in_soil_moisture', None, '1'),
1392: CFName('leaf_area_index', None, '1'),
1393: CFName('canopy_height', None, 'm'),
1395: CFName('soil_albedo', None, '1'),
1507: CFName('snow_grain_size', None, '1e-6 m'),
1559: CFName('soil_moisture_content_at_field_capacity', None, 'kg m-2'),
1720: CFName('cloud_area_fraction_in_atmosphere_layer', None, '1'),
}
STASH_TO_CF = {
'm01s00i001': CFName('surface_air_pressure', None, 'Pa'),
'm01s00i002': CFName('x_wind', None, 'm s-1'),
'm01s00i003': CFName('y_wind', None, 'm s-1'),
'm01s00i004': CFName('air_potential_temperature', None, 'K'),
'm01s00i009': CFName('moisture_content_of_soil_layer', None, 'kg m-2'),
'm01s00i010': CFName('specific_humidity', None, 'kg kg-1'),
'm01s00i012': CFName('mass_fraction_of_cloud_ice_in_air', None, 'kg kg-1'),
'm01s00i013': CFName('convective_cloud_area_fraction', None, '1'),
'm01s00i020': CFName('soil_temperature', None, 'K'),
'm01s00i023': CFName('snowfall_amount', None, 'kg m-2'),
'm01s00i024': CFName('surface_temperature', None, 'K'),
'm01s00i025': CFName('atmosphere_boundary_layer_thickness', None, 'm'),
'm01s00i026': CFName('surface_roughness_length', None, 'm'),
'm01s00i028': CFName('surface_eastward_sea_water_velocity', None, 'm s-1'),
'm01s00i029': CFName('surface_northward_sea_water_velocity', None, 'm s-1'),
'm01s00i030': CFName('land_binary_mask', None, '1'),
'm01s00i031': CFName('sea_ice_area_fraction', None, '1'),
'm01s00i032': CFName('sea_ice_thickness', None, 'm'),
'm01s00i033': CFName('surface_altitude', None, 'm'),
'm01s00i040': CFName('volume_fraction_of_condensed_water_in_soil_at_wilting_point', None, '1'),
'm01s00i041': CFName('volume_fraction_of_condensed_water_in_soil_at_critical_point', None, '1'),
'm01s00i043': CFName('soil_porosity', None, '1'),
'm01s00i044': CFName('soil_hydraulic_conductivity_at_saturation', None, 'm s-1'),
'm01s00i046': CFName('soil_thermal_capacity', None, 'J kg-1 K-1'),
'm01s00i047': CFName('soil_thermal_conductivity', None, 'W m-1 K-1'),
'm01s00i048': CFName('soil_suction_at_saturation', None, 'Pa'),
'm01s00i049': CFName('sea_ice_temperature', None, 'K'),
'm01s00i050': CFName('vegetation_area_fraction', None, '1'),
'm01s00i051': CFName('root_depth', None, 'm'),
'm01s00i052': CFName('surface_albedo_assuming_no_snow', None, '1'),
'm01s00i053': CFName('surface_albedo_assuming_deep_snow', None, '1'),
'm01s00i058': CFName(None, 'tendency_of_atmosphere_mass_content_of_sulfur_dioxide_expressed_as_sulfur_due_to_low_level_emission', 'kg/m2/s'),
'm01s00i059': CFName(None, 'tendency_of_atmosphere_mass_content_of_dimethyl_sulfide_expressed_as_sulfur_due_to_emission', 'kg/m2/s'),
'm01s00i060': CFName('mass_fraction_of_ozone_in_air', None, '1'),
'm01s00i075': CFName(None, 'number_of_cloud_droplets_per_kg_of_air', 'kg-1'),
'm01s00i076': CFName(None, 'number_of_rain_drops_per_kg_of_air', 'kg-1'),
'm01s00i077': CFName(None, 'rain_third_moment', '1'),
'm01s00i078': CFName(None, 'number_of_ice_particles_per_kg_of_air', 'kg-1'),
'm01s00i079': CFName(None, 'number_of_snow_aggregates_per_kg_of_air', 'kg-1'),
'm01s00i080': CFName(None, 'snow_third_moment', '1'),
'm01s00i081': CFName(None, 'number_of_graupel_particles_per_kg_of_air', 'kg-1'),
'm01s00i082': CFName(None, 'graupel_third_moment', '1'),
'm01s00i090': CFName(None, 'visibility_murk_aerosol', '1e-9 kg kg-1'),
'm01s00i091': CFName(None, 'lightning_flash_potential', '1'),
'm01s00i095': CFName(None, 'Snow amount on sea ice', 'kg/m^2'),
'm01s00i099': CFName(None, 'variance_of_vertical_velocity_from_boundary_layer_for_turbulent_mixed_phase_scheme', 'm s-1'),
'm01s00i101': CFName(None, 'mass_fraction_of_sulfur_dioxide_expressed_as_sulfur_in_air', 'kg/kg'),
'm01s00i102': CFName(None, 'mass_fraction_of_dimethyl_sulfide_expressed_as_sulfur_in_air', 'kg/kg'),
'm01s00i103': CFName(None, 'mass_fraction_of_aitken_mode_sulfate_dry_aerosol_expressed_as_sulfur_in_air', 'kg/kg'),
'm01s00i104': CFName(None, 'mass_fraction_of_accumulation_mode_sulfate_dry_aerosol_expressed_as_sulfur_in_air', 'kg/kg'),
'm01s00i105': CFName(None, 'mass_fraction_of_dissolved_sulfate_dry_aerosol_expressed_as_sulfur_in_air', 'kg/kg'),
'm01s00i106': CFName('mass_fraction_of_hydrogen_peroxide_in_air', None, 'kg kg-1'),
'm01s00i107': CFName(None, 'mass_fraction_of_ammonia_expressed_as_nitrogen_in_air', 'kg/kg'),
'm01s00i108': CFName(None, 'mass_fraction_of_fresh_black_carbon_dry_aerosol_in_air', 'kg/kg'),
'm01s00i109': CFName(None, 'mass_fraction_of_aged_black_carbon_dry_aerosol_in_air', 'kg/kg'),
'm01s00i110': CFName(None, 'mass_fraction_of_cloud_black_carbon_dry_aerosol_in_air', 'kg/kg'),
'm01s00i111': CFName(None, 'mass_fraction_of_fresh_biomass_burning_dry_aerosol_in_air', 'kg/kg'),
'm01s00i112': CFName(None, 'mass_fraction_of_aged_biomass_burning_dry_aerosol_in_air', 'kg/kg'),
'm01s00i113': CFName(None, 'mass_fraction_of_cloud_biomass_burning_dry_aerosol_in_air', 'kg/kg'),
'm01s00i114': CFName(None, 'mass_fraction_of_fresh_organic_carbon_from_fossil_fuel_dry_aerosol_in_air', 'kg/kg'),
'm01s00i115': CFName(None, 'mass_fraction_of_aged_organic_carbon_from_fossil_fuel_dry_aerosol_in_air', 'kg/kg'),
'm01s00i116': CFName(None, 'mass_fraction_of_cloud_organic_carbon_from_fossil_fuel_dry_aerosol_in_air', 'kg/kg'),
'm01s00i117': CFName(None, 'mass_fraction_of_accumulation_mode_nitrate_dry_aerosol_expressed_as_nitrogen_in_air', 'kg/kg'),
'm01s00i118': CFName(None, 'mass_fraction_of_dissolved_nitrate_dry_aerosol_expressed_as_nitrogen_in_air', 'kg/kg'),
'm01s00i121': CFName(None, '3D NATURAL SO2 EMISSIONS', 'kg m-2 s-1'),
'm01s00i122': CFName(None, 'molecular_concentration_of_hydroxyl_radical_in_air', 'cm-3'),
'm01s00i123': CFName(None, 'molecular_concentration_of_hydroperoxyl_radical_in_air', 'cm-3'),
'm01s00i124': CFName('mass_fraction_of_hydrogen_peroxide_in_air', None, 'kg kg-1'),
'm01s00i125': CFName('mass_fraction_of_ozone_in_air', None, 'kg kg-1'),
'm01s00i126': CFName(None, 'tendency_of_atmosphere_mass_content_of_sulfur_dioxide_expressed_as_sulfur_due_to_high_level_emission', 'kg/m2/s'),
'm01s00i127': CFName(None, 'tendency_of_atmosphere_mass_content_of_ammonia_expressed_as_nitrogen_due_to_emission', 'kg/m2/s'),
'm01s00i128': CFName(None, 'tendency_of_atmosphere_mass_content_of_black_carbon_dry_aerosol_due_to_low_level_emission', 'kg/m2/s'),
'm01s00i129': CFName(None, 'tendency_of_atmosphere_mass_content_of_black_carbon_dry_aerosol_due_to_high_level_emission', 'kg/m2/s'),
'm01s00i130': CFName(None, 'tendency_of_atmosphere_mass_content_of_biomass_burning_dry_aerosol_due_to_low_level_emission', 'kg/m2/s'),
'm01s00i131': CFName(None, 'tendency_of_atmosphere_mass_content_of_biomass_burning_dry_aerosol_due_to_high_level_emission', 'kg/m2/s'),
'm01s00i132': CFName('mole_concentration_of_dimethyl_sulfide_in_sea_water', None, 'nanomole/l'),
'm01s00i134': CFName(None, 'tendency_of_atmosphere_mass_content_of_organic_carbon_from_fossil_fuel_dry_aerosol_due_to_low_level_emission', 'kg/m2/s'),
'm01s00i135': CFName(None, 'tendency_of_atmosphere_mass_content_of_organic_carbon_from_fossil_fuel_dry_aerosol_due_to_high_level_emission', 'kg/m2/s'),
'm01s00i150': CFName('upward_air_velocity', None, 'm s-1'),
'm01s00i205': CFName('land_area_fraction', None, '1'),
'm01s00i208': CFName('leaf_area_index', None, '1'),
'm01s00i209': CFName('canopy_height', None, 'm'),
'm01s00i211': CFName(None, 'Convective cloud amount with anvil', '1'),
'm01s00i214': CFName('mass_fraction_of_unfrozen_water_in_soil_moisture', None, '1'),
'm01s00i215': CFName('mass_fraction_of_frozen_water_in_soil_moisture', None, '1'),
'm01s00i217': CFName('leaf_area_index', None, '1'),
'm01s00i218': CFName('canopy_height', None, 'm'),
'm01s00i220': CFName('soil_albedo', None, '1'),
'm01s00i223': CFName('soil_carbon_content', None, 'kg m-2'),
'm01s00i231': CFName('snow_grain_size', None, '1e-6 m'),
'm01s00i243': CFName(None, 'surface_diffuse_albedo_assuming_no_snow', '1'),
'm01s00i244': CFName(None, 'surface_diffuse_albedo_of_photosynthetically_active_radiation_assuming_no_snow', '1'),
'm01s00i245': CFName(None, 'surface_diffuse_albedo_of_near_infra_red_radiation_assuming_no_snow', '1'),
'm01s00i252': CFName('mass_fraction_of_carbon_dioxide_in_air', None, '1'),
'm01s00i254': CFName('mass_fraction_of_cloud_liquid_water_in_air', None, 'kg kg-1'),
'm01s00i255': CFName('dimensionless_exner_function', None, '1'),
'm01s00i265': CFName('cloud_area_fraction_in_atmosphere_layer', None, '1'),
'm01s00i266': CFName(None, 'cloud_volume_fraction_in_atmosphere_layer', '1'),
'm01s00i267': CFName(None, 'liquid_cloud_volume_fraction_in_atmosphere_layer', '1'),
'm01s00i268': CFName(None, 'ice_cloud_volume_fraction_in_atmosphere_layer', '1'),
'm01s00i269': CFName('surface_eastward_sea_water_velocity', None, 'm s-1'),
'm01s00i270': CFName('surface_northward_sea_water_velocity', None, 'm s-1'),
'm01s00i271': CFName(None, 'mass_fraction_of_cloud_ice_crystals_in_air', 'kg kg-1'),
'm01s00i272': CFName('mass_fraction_of_rain_in_air', None, 'kg kg-1'),
'm01s00i273': CFName('mass_fraction_of_graupel_in_air', None, 'kg kg-1'),
'm01s00i351': CFName(None, 'mass_concentration_of_biogenic_nmvoc_in_air', 'kg/kg'),
'm01s00i352': CFName(None, 'mass_fraction_of_fresh_biomass_burning_dry_aerosol_in_air', 'kg/kg'),
'm01s00i353': CFName(None, 'mass_fraction_of_aged_biomass_burning_dry_aerosol_in_air', 'kg/kg'),
'm01s00i354': CFName(None, 'mass_fraction_of_cloud_biomass_burning_dry_aerosol_in_air', 'kg/kg'),
'm01s00i355': CFName(None, 'mass_fraction_of_fresh_black_carbon_dry_aerosol_in_air', 'kg/kg'),
'm01s00i356': CFName(None, 'mass_fraction_of_aged_black_carbon_dry_aerosol_in_air', 'kg/kg'),
'm01s00i357': CFName(None, 'atmosphere_number_concentration_of_film_mode_sea_salt_particles', 'kg/kg'),
'm01s00i358': CFName(None, 'atmosphere_number_concentration_of_jet_mode_sea_salt_particles', 'kg/kg'),
'm01s00i359': CFName(None, 'mass_fraction_of_aitken_mode_sulfate_dry_aerosol_in_air_expressed_as_sulfur', 'kg/kg'),
'm01s00i360': CFName(None, 'mass_fraction_of_accumulation_mode_sulfate_dry_aerosol_in_air_expressed_as_sulfur', 'kg/kg'),
'm01s00i361': CFName(None, 'mass_fraction_of_dissolved_sulfate_dry_aerosol_in_air_expressed_as_sulfur', 'kg/kg'),
'm01s00i362': CFName(None, 'mass_fraction_of_dust_ukmo_division_1_dry_aerosol_in_air', 'kg/kg'),
'm01s00i363': CFName(None, 'mass_fraction_of_dust_ukmo_division_2_dry_aerosol_in_air', 'kg/kg'),
'm01s00i364': CFName(None, 'mass_fraction_of_dust_ukmo_division_3_dry_aerosol_in_air', 'kg/kg'),
'm01s00i365': CFName(None, 'mass_fraction_of_dust_ukmo_division_4_dry_aerosol_in_air', 'kg/kg'),
'm01s00i366': CFName(None, 'mass_fraction_of_dust_ukmo_division_5_dry_aerosol_in_air', 'kg/kg'),
'm01s00i367': CFName(None, 'mass_fraction_of_dust_ukmo_division_6_dry_aerosol_in_air', 'kg/kg'),
'm01s00i368': CFName(None, 'mass_fraction_of_fresh_organic_carbon_from_fossil_fuel_dry_aerosol_in_air', 'kg/kg'),
'm01s00i369': CFName(None, 'mass_fraction_of_aged_organic_carbon_from_fossil_fuel_dry_aerosol_in_air', 'kg/kg'),
'm01s00i370': CFName(None, 'mass_fraction_of_cloud_organic_carbon_from_fossil_fuel_dry_aerosol_in_air', 'kg/kg'),
'm01s00i371': CFName(None, 'mass_concentration_of_unspecified_aerosol_in_air', 'kg/kg'),
'm01s00i388': CFName(None, 'virtual_potential_temperature', 'K'),
'm01s00i389': CFName('air_density', None, 'kg m-3'),
'm01s00i391': CFName('humidity_mixing_ratio', None, 'kg kg-1'),
'm01s00i392': CFName('cloud_liquid_water_mixing_ratio', None, 'kg kg-1'),
'm01s00i393': CFName('cloud_ice_mixing_ratio', None, 'kg kg-1'),
'm01s00i394': CFName(None, 'rain_mixing_ratio', 'kg kg-1'),
'm01s00i395': CFName(None, 'graupel_mixing_ratio', 'kg kg-1'),
'm01s00i406': CFName('dimensionless_exner_function', None, '1'),
'm01s00i407': CFName('air_pressure', None, 'Pa'),
'm01s00i408': CFName('air_pressure', None, 'Pa'),
'm01s00i409': CFName('surface_air_pressure', None, 'Pa'),
'm01s00i413': CFName(None, 'Sea ice concentration by categories', '1'),
'm01s00i414': CFName(None, 'Sea ice thickness GBM by categories', 'm'),
'm01s00i415': CFName('sea_ice_surface_temperature', None, 'K'),
'm01s00i416': CFName(None, 'Snow thickness on sea ice', 'm'),
'm01s00i418': CFName('volume_fraction_of_clay_in_soil', None, 'm3 m-3'),
'm01s00i419': CFName('volume_fraction_of_silt_in_soil', None, 'm3 m-3'),
'm01s00i420': CFName('volume_fraction_of_sand_in_soil', None, 'm3 m-3'),
'm01s00i421': CFName(None, 'mass_fraction_of_soil_particles_in_ukmo_division1', 'kg/kg'),
'm01s00i422': CFName(None, 'mass_fraction_of_soil_particles_in_ukmo_division2', 'kg/kg'),
'm01s00i423': CFName(None, 'mass_fraction_of_soil_particles_in_ukmo_division3', 'kg/kg'),
'm01s00i424': CFName(None, 'mass_fraction_of_soil_particles_in_ukmo_division4', 'kg/kg'),
'm01s00i425': CFName(None, 'mass_fraction_of_soil_particles_in_ukmo_division5', 'kg/kg'),
'm01s00i426': CFName(None, 'mass_fraction_of_soil_particles_in_ukmo_division6', 'kg/kg'),
'm01s00i431': CFName(None, 'mass_fraction_of_dust_ukmo_division_1_dry_aerosol_in_air', 'kg/kg'),
'm01s00i432': CFName(None, 'mass_fraction_of_dust_ukmo_division_2_dry_aerosol_in_air', 'kg/kg'),
'm01s00i433': CFName(None, 'mass_fraction_of_dust_ukmo_division_3_dry_aerosol_in_air', 'kg/kg'),
'm01s00i434': CFName(None, 'mass_fraction_of_dust_ukmo_division_4_dry_aerosol_in_air', 'kg/kg'),
'm01s00i435': CFName(None, 'mass_fraction_of_dust_ukmo_division_5_dry_aerosol_in_air', 'kg/kg'),
'm01s00i436': CFName(None, 'mass_fraction_of_dust_ukmo_division_6_dry_aerosol_in_air', 'kg/kg'),
'm01s00i505': CFName('land_area_fraction', None, '1'),
'm01s00i506': CFName('surface_temperature', None, 'K'),
'm01s00i507': CFName('surface_temperature', None, 'K'),
'm01s00i508': CFName('surface_temperature', None, 'K'),
'm01s00i509': CFName(None, 'product_of_sea_ice_albedo_and_sunlit_binary_mask', '1'),
'm01s00i510': CFName(None, 'product_of_land_albedo_and_sunlit_binary_mask', '1'),
'm01s01i004': CFName('air_temperature', None, 'K'),
'm01s01i101': CFName(None, 'northward_horizon_angle_from_zenith', 'rad'),
'm01s01i102': CFName(None, 'northeastward_horizon_angle_from_zenith', 'rad'),
'm01s01i103': CFName(None, 'eastward_horizon_angle_from_zenith', 'rad'),
'm01s01i104': CFName(None, 'southeastward_horizon_angle_from_zenith', 'rad'),
'm01s01i105': CFName(None, 'southward_horizon_angle_from_zenith', 'rad'),
'm01s01i106': CFName(None, 'southwestward_horizon_angle_from_zenith', 'rad'),
'm01s01i107': CFName(None, 'westward_horizon_angle_from_zenith', 'rad'),
'm01s01i108': CFName(None, 'northwestward_horizon_angle_from_zenith', 'rad'),
'm01s01i109': CFName(None, 'northnortheastward_horizon_angle_from_zenith', 'rad'),
'm01s01i110': CFName(None, 'eastnortheastward_horizon_angle_from_zenith', 'rad'),
'm01s01i111': CFName(None, 'eastsoutheastward_horizon_angle_from_zenith', 'rad'),
'm01s01i112': CFName(None, 'southsoutheastward_horizon_angle_from_zenith', 'rad'),
'm01s01i113': CFName(None, 'southsouthwestward_horizon_angle_from_zenith', 'rad'),
'm01s01i114': CFName(None, 'westsouthwestward_horizon_angle_from_zenith', 'rad'),
'm01s01i115': CFName(None, 'westnorthwestward_horizon_angle_from_zenith', 'rad'),
'm01s01i116': CFName(None, 'northnorthwestward_horizon_angle_from_zenith', 'rad'),
'm01s01i181': CFName(None, 'change_over_time_in_air_temperature_due_to_shortwave_heating', 'K'),
'm01s01i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_shortwave_heating', 'kg kg-1'),
'm01s01i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_shortwave_heating', 'kg kg-1'),
'm01s01i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_shortwave_heating', '1'),
'm01s01i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_shortwave_heating', '1'),
'm01s01i201': CFName('surface_net_downward_shortwave_flux', None, 'W m-2'),
'm01s01i203': CFName('surface_net_downward_shortwave_flux', None, 'W m-2'),
'm01s01i205': CFName('toa_outgoing_shortwave_flux', None, 'W m-2'),
'm01s01i207': CFName('toa_incoming_shortwave_flux', None, 'W m-2'),
'm01s01i208': CFName('toa_outgoing_shortwave_flux', None, 'W m-2'),
'm01s01i209': CFName('toa_outgoing_shortwave_flux_assuming_clear_sky', None, 'W m-2'),
'm01s01i210': CFName('surface_downwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s01i211': CFName('surface_upwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s01i217': CFName('upwelling_shortwave_flux_in_air', None, 'W m-2'),
'm01s01i218': CFName('downwelling_shortwave_flux_in_air', None, 'W m-2'),
'm01s01i219': CFName('upwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s01i220': CFName('downwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s01i221': CFName(None, 'product_of_effective_radius_of_stratiform_cloud_liquid_water_particle_and_stratiform_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', 'um'),
'm01s01i223': CFName(None, 'product_of_stratiform_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', '1'),
'm01s01i224': CFName(None, 'product_of_stratiform_cloud_liquid_water_path_and_stratiform_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', 'kg m-2'),
'm01s01i225': CFName(None, 'product_of_effective_radius_of_convective_cloud_liquid_water_particle_and_convective_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', 'um'),
'm01s01i226': CFName(None, 'product_of_convective_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', '1'),
'm01s01i232': CFName('tendency_of_air_temperature_due_to_shortwave_heating', None, 'K s-1'),
'm01s01i233': CFName('tendency_of_air_temperature_due_to_shortwave_heating_assuming_clear_sky', None, 'K s-1'),
'm01s01i235': CFName('surface_downwelling_shortwave_flux_in_air', None, 'W m-2'),
'm01s01i237': CFName('net_downward_shortwave_flux_in_air', None, 'W m-2'),
'm01s01i238': CFName('tropopause_upwelling_shortwave_flux', None, 'W m-2'),
'm01s01i241': CFName(None, 'product_of_number_concentration_of_stratiform_cloud_liquid_water_particles_and_stratiform_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', 'cm-3'),
'm01s01i242': CFName(None, 'product_of_stratiform_cloud_liquid_water_content_and_stratiform_cloud_liquid_water_area_fraction_and_sunlit_binary_mask', 'g cm-3'),
'm01s01i243': CFName(None, 'product_of_mass_concentration_of_sulfate_ion_and_sunlit_binary_mask', 'ug m-3'),
'm01s01i244': CFName(None, 'sunlit_binary_mask_in_atmosphere_layer_below_cloud_top', '1'),
'm01s01i245': CFName(None, 'product_of_effective_radius_of_cloud_liquid_water_particle_and_cloud_liquid_water_area_fraction_exposed_to_space_and_sunlit_binary_mask', 'um'),
'm01s01i246': CFName(None, 'product_of_cloud_liquid_water_area_fraction_exposed_to_space_and_sunlit_binary_mask', '1'),
'm01s01i247': CFName(None, 'atmosphere_number_concentration_of_film_mode_sea_salt_particles', 'm-3'),
'm01s01i248': CFName(None, 'atmosphere_number_concentration_of_jet_mode_sea_salt_particles', 'm-3'),
'm01s01i254': CFName(None, 'product_of_effective_radius_of_warm_cloud_liquid_water_particle_and_warm_cloud_liquid_water_area_fraction_exposed_to_space_and_sunlit_binary_mask', 'um'),
'm01s01i255': CFName(None, 'product_of_warm_cloud_liquid_water_area_fraction_exposed_to_space_and_sunlit_binary_mask', '1'),
'm01s01i268': CFName(None, 'surface_direct_beam_albedo_assuming_no_snow', '1'),
'm01s01i269': CFName(None, 'surface_diffuse_albedo_assuming_no_snow', '1'),
'm01s01i270': CFName(None, 'scaling_factor_for_surface_diffuse_albedo_of_photosynthetically_active_radiation_assuming_no_snow', '1'),
'm01s01i271': CFName(None, 'scaling_factor_for_surface_diffuse_albedo_of_near_infra_red_radiation_assuming_no_snow', '1'),
'm01s01i280': CFName(None, 'product_of_atmosphere_number_content_of_cloud_droplets_and_warm_cloud_area_fraction_and_sunlit_binary_mask', 'm-2'),
'm01s01i281': CFName(None, 'product_of_warm_cloud_area_fraction_and_sunlit_binary_mask', '1'),
'm01s01i294': CFName(None, 'surface_mean_slope_angle', 'rad'),
'm01s01i295': CFName(None, 'orographic_correction_factor_to_surface_direct_downwelling_shortwave_flux', '1'),
'm01s01i410': CFName('surface_downwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s01i435': CFName('surface_downwelling_shortwave_flux_in_air', None, 'W m-2'),
'm01s02i004': CFName('air_temperature', None, 'K'),
'm01s02i101': CFName(None, 'ratio_of_skyview_factor_to_cosine_of_surface_mean_slope_angle', '1'),
'm01s02i181': CFName(None, 'change_over_time_in_air_temperature_due_to_longwave_heating', 'K'),
'm01s02i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_longwave_heating', 'kg kg-1'),
'm01s02i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_longwave_heating', 'kg kg-1'),
'm01s02i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_longwave_heating', '1'),
'm01s02i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_longwave_heating', '1'),
'm01s02i201': CFName('surface_net_downward_longwave_flux', None, 'W m-2'),
'm01s02i203': CFName('surface_net_downward_longwave_flux', None, 'W m-2'),
'm01s02i204': CFName('cloud_area_fraction', None, '1'),
'm01s02i205': CFName('toa_outgoing_longwave_flux', None, 'W m-2'),
'm01s02i206': CFName('toa_outgoing_longwave_flux_assuming_clear_sky', None, 'W m-2'),
'm01s02i207': CFName('surface_downwelling_longwave_flux_in_air', None, 'W m-2'),
'm01s02i208': CFName('surface_downwelling_longwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s02i217': CFName('upwelling_longwave_flux_in_air', None, 'W m-2'),
'm01s02i218': CFName('downwelling_longwave_flux_in_air', None, 'W m-2'),
'm01s02i219': CFName('upwelling_longwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s02i220': CFName('downwelling_longwave_flux_in_air_assuming_clear_sky', None, 'W m-2'),
'm01s02i232': CFName('tendency_of_air_temperature_due_to_longwave_heating', None, 'K s-1'),
'm01s02i233': CFName('tendency_of_air_temperature_due_to_longwave_heating_assuming_clear_sky', None, 'K s-1'),
'm01s02i237': CFName('tropopause_net_downward_longwave_flux', None, 'W m-2'),
'm01s02i238': CFName('tropopause_downwelling_longwave_flux', None, 'W m-2'),
'm01s02i260': CFName('mass_fraction_of_ozone_in_air', None, '1'),
'm01s02i261': CFName('cloud_area_fraction_in_atmosphere_layer', None, '1'),
'm01s02i262': CFName(None, 'product_of_mass_absorption_coefficient_due_to_cloud_and_upwelling_longwave_flux_assuming_clear_sky_and_cloud_area_fraction_in_atmosphere_layer', 'W kg-1'),
'm01s02i263': CFName(None, 'product_of_upwelling_longwave_flux_assuming_clear_sky_and_cloud_area_fraction_in_atmosphere_layer', 'W m-2'),
'm01s02i264': CFName(None, 'product_of_mass_absorption_coefficient_due_to_stratiform_cloud_and_upwelling_longwave_flux_assuming_clear_sky_and_stratiform_cloud_area_fraction_in_atmosphere_layer', 'W kg-1'),
'm01s02i265': CFName(None, 'product_of_upwelling_longwave_flux_assuming_clear_sky_and_stratiform_cloud_area_fraction_in_atmosphere_layer', 'W m-2'),
'm01s02i266': CFName(None, 'product_of_mass_absorption_coefficient_due_to_convective_cloud_and_upwelling_longwave_flux_assuming_clear_sky_and_convective_cloud_area_fraction_in_atmosphere_layer', 'W kg-1'),
'm01s02i267': CFName(None, 'product_of_upwelling_longwave_flux_assuming_clear_sky_and_convective_cloud_area_fraction_in_atmosphere_layer', 'W m-2'),
'm01s02i280': CFName(None, 'model_level_number_at_ozone_tropopause', '1'),
'm01s02i281': CFName(None, 'ozone_tropopause_altitude', 'm'),
'm01s02i282': CFName(None, 'model_level_number_at_thermal_tropopause', '1'),
'm01s02i283': CFName(None, 'thermal_tropopause_altitude', 'm'),
'm01s02i284': CFName(None, 'atmosphere_optical_thickness_due_to_sulphate_ambient_aerosol', '1'),
'm01s02i285': CFName('atmosphere_optical_thickness_due_to_dust_ambient_aerosol', None, '1'),
'm01s02i286': CFName('atmosphere_optical_thickness_due_to_seasalt_ambient_aerosol', None, '1'),
'm01s02i287': CFName('atmosphere_optical_thickness_due_to_black_carbon_ambient_aerosol', None, '1'),
'm01s02i288': CFName(None, 'atmosphere_optical_thickness_due_to_biomass_burning_ambient_aerosol', '1'),
'm01s02i289': CFName(None, 'atmosphere_optical_thickness_due_to_biogenic_aerosol', '1'),
'm01s02i295': CFName(None, 'atmosphere_optical_thickness_due_to_fossil_fuel_organic_carbon_ambient_aerosol', '1'),
'm01s02i296': CFName(None, 'atmosphere_optical_thickness_due_to_unspecified_aerosol', '1'),
'm01s02i297': CFName(None, 'atmosphere_optical_thickness_due_to_ammonium_nitrate_ambient_aerosol', '1'),
'm01s02i298': CFName(None, 'atmosphere_optical_thickness_due_all_ambient_aerosol', '1'),
'm01s02i299': CFName('angstrom_exponent_of_ambient_aerosol_in_air', None, '1'),
'm01s02i300': CFName(None, 'atmosphere_optical_thickness_due_to_soluble_aitken_mode_sulphate_aerosol', '1'),
'm01s02i301': CFName(None, 'atmosphere_optical_thickness_due_to_soluble_accumulation_mode_sulphate_aerosol', '1'),
'm01s02i302': CFName(None, 'atmosphere_optical_thickness_due_to_soluble_coarse_mode_sulphate_aerosol', '1'),
'm01s02i303': CFName(None, 'atmosphere_optical_thickness_due_to_insoluble_aitken_mode_sulphate_aerosol', '1'),
'm01s02i304': CFName(None, 'atmosphere_optical_thickness_due_to_unsoluble_accumulation_mode_sulphate_aerosol', '1'),
'm01s02i305': CFName(None, 'atmosphere_optical_thickness_due_to_unsoluble_coarse_mode_sulphate_aerosol', '1'),
'm01s02i308': CFName('mass_fraction_of_stratiform_cloud_liquid_water_in_air', None, '1'),
'm01s02i309': CFName('mass_fraction_of_stratiform_cloud_ice_in_air', None, '1'),
'm01s02i310': CFName('mass_fraction_of_convective_cloud_liquid_water_in_air', None, '1'),
'm01s02i311': CFName('mass_fraction_of_convective_cloud_ice_in_air', None, '1'),
'm01s02i312': CFName(None, 'stratiform_cloud_liquid_water_area_fraction_in_atmosphere_layer', '1'),
'm01s02i313': CFName(None, 'stratiform_cloud_ice_area_fraction_in_atmosphere_layer', '1'),
'm01s02i314': CFName(None, 'convective_cloud_liquid_water_area_fraction_in_atmosphere_layer', '1'),
'm01s02i315': CFName(None, 'convective_cloud_ice_area_fraction_in_atmosphere_layer', '1'),
'm01s02i348': CFName('toa_bidirectional_reflectance', None, '1'),
'm01s02i351': CFName('equivalent_reflectivity_factor', None, 'dBZ'),
'm01s02i370': CFName('histogram_of_backscattering_ratio_over_height_above_reference_ellipsoid', None, '1'),
'm01s02i372': CFName('histogram_of_equivalent_reflectivity_factor_over_height_above_reference_ellipsoid', None, '1'),
'm01s02i375': CFName('atmosphere_optical_thickness_due_to_stratiform_cloud', None, '1'),
'm01s02i376': CFName('stratiform_cloud_longwave_emissivity', None, '1'),
'm01s02i377': CFName('atmosphere_optical_thickness_due_to_convective_cloud', None, '1'),
'm01s02i378': CFName('convective_cloud_longwave_emissivity', None, '1'),
'm01s02i380': CFName('effective_radius_of_stratiform_cloud_liquid_water_particle', None, 'm'),
'm01s02i381': CFName('effective_radius_of_stratiform_cloud_ice_particle', None, 'm'),
'm01s02i382': CFName('effective_radius_of_stratiform_cloud_rain_particle', None, 'm'),
'm01s02i383': CFName('effective_radius_of_stratiform_cloud_snow_particle', None, 'm'),
'm01s02i384': CFName('effective_radius_of_convective_cloud_liquid_water_particle', None, 'm'),
'm01s02i385': CFName('effective_radius_of_convective_cloud_ice_particle', None, 'm'),
'm01s02i386': CFName('effective_radius_of_convective_cloud_rain_particle', None, 'm'),
'm01s02i387': CFName('effective_radius_of_convective_cloud_snow_particle', None, 'm'),
'm01s02i388': CFName('effective_radius_of_stratiform_cloud_graupel_particle', None, 'm'),
'm01s02i421': CFName(None, 'atmosphere_optical_thickness_due_to_sulphate_ambient_aerosol', '1'),
'm01s02i422': CFName('atmosphere_optical_thickness_due_to_dust_ambient_aerosol', None, '1'),
'm01s02i423': CFName('atmosphere_optical_thickness_due_to_seasalt_ambient_aerosol', None, '1'),
'm01s02i424': CFName('atmosphere_optical_thickness_due_to_black_carbon_ambient_aerosol', None, '1'),
'm01s02i425': CFName(None, 'atmosphere_optical_thickness_due_to_biomass_burning_ambient_aerosol', '1'),
'm01s02i426': CFName(None, 'atmosphere_optical_thickness_due_to_fossil_fuel_organic_carbon_ambient_aerosol', '1'),
'm01s02i427': CFName(None, 'atmosphere_optical_thickness_due_to_ammonium_nitrate_ambient_aerosol', '1'),
'm01s03i004': CFName('air_temperature', None, 'K'),
'm01s03i010': CFName('specific_humidity', None, '1'),
'm01s03i025': CFName('atmosphere_boundary_layer_thickness', None, 'm'),
'm01s03i181': CFName(None, 'change_over_time_in_air_temperature_due_to_boundary_layer_mixing', 'K'),
'm01s03i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_boundary_layer_mixing', 'kg kg-1'),
'm01s03i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_boundary_layer_mixing', 'kg kg-1'),
'm01s03i184': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_boundary_layer_mixing', 'kg kg-1'),
'm01s03i185': CFName(None, 'change_over_time_in_x_wind_due_to_boundary_layer_mixing', 'm s-1'),
'm01s03i186': CFName(None, 'change_over_time_in_y_wind_due_to_boundary_layer_mixing', 'm s-1'),
'm01s03i187': CFName(None, 'change_over_time_in_upward_air_velocity_due_to_boundary_layer_mixing', 'm s-1'),
'm01s03i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_boundary_layer_mixing', '1'),
'm01s03i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_boundary_layer_mixing', '1'),
'm01s03i194': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer_due_to_boundary_layer_mixing', '1'),
'm01s03i201': CFName('downward_heat_flux_in_sea_ice', None, 'W m-2'),
'm01s03i202': CFName('downward_heat_flux_in_soil', None, 'W m-2'),
'm01s03i209': CFName('x_wind', None, 'm s-1'),
'm01s03i210': CFName('y_wind', None, 'm s-1'),
'm01s03i216': CFName('upward_heat_flux_in_air', None, 'W m-2'),
'm01s03i217': CFName('surface_upward_sensible_heat_flux', None, 'W m-2'),
'm01s03i219': CFName(None, 'atmosphere_downward_eastward_stress', 'Pa'),
'm01s03i220': CFName(None, 'atmosphere_downward_northward_stress', 'Pa'),
'm01s03i222': CFName('upward_water_vapor_flux_in_air', None, 'kg m-2 s-1'),
'm01s03i223': CFName('surface_upward_water_flux', None, 'kg m-2 s-1'),
'm01s03i224': CFName('wind_mixing_energy_flux_into_sea_water', None, 'W m-2'),
'm01s03i225': CFName('x_wind', None, 'm s-1'),
'm01s03i226': CFName('y_wind', None, 'm s-1'),
'm01s03i227': CFName('wind_speed', None, 'm s-1'),
'm01s03i228': CFName('surface_upward_sensible_heat_flux', None, 'W m-2'),
'm01s03i230': CFName('wind_speed', None, 'm s-1'),
'm01s03i231': CFName(None, 'water_sublimation_flux_in_timestep', 'kg m-2'),
'm01s03i232': CFName(None, 'Evaporation flux from open sea', 'kg/m^2/s'),
'm01s03i234': CFName('surface_upward_latent_heat_flux', None, 'W m-2'),
'm01s03i235': CFName(None, 'Latent heat flux from sea ice top melt', 'W/m^2'),
'm01s03i236': CFName('air_temperature', None, 'K'),
'm01s03i237': CFName('specific_humidity', None, '1'),
'm01s03i238': CFName('soil_temperature', None, 'K'),
'm01s03i245': CFName('relative_humidity', None, '%'),
'm01s03i247': CFName('visibility_in_air', None, 'm'),
'm01s03i248': CFName('fog_area_fraction', None, '1'),
'm01s03i249': CFName('wind_speed', None, 'm s-1'),
'm01s03i250': CFName('dew_point_temperature', None, 'K'),
'm01s03i256': CFName(None, 'Heat flux through sea ice', 'W/m^2'),
'm01s03i257': CFName(None, 'Heat flux in sea ice surface melt', 'W/m^2'),
'm01s03i258': CFName('surface_snow_melt_heat_flux', None, 'W m-2'),
'm01s03i261': CFName('gross_primary_productivity_of_carbon', None, 'kg m-2 s-1'),
'm01s03i262': CFName('net_primary_productivity_of_carbon', None, 'kg m-2 s-1'),
'm01s03i263': CFName('plant_respiration_carbon_flux', None, 'kg m-2 s-1'),
'm01s03i270': CFName('tendency_of_atmosphere_mass_content_of_sulfur_dioxide_due_to_dry_deposition', None, 'kg m-2 s-1'),
'm01s03i281': CFName('visibility_in_air', None, 'm'),
'm01s03i293': CFName('soil_respiration_carbon_flux', None, 'kg m-2 s-1'),
'm01s03i295': CFName(None, 'surface_snow_area_fraction_where_land', '%'),
'm01s03i296': CFName(None, 'Evaporation from soil surface', 'kg/m^2/s'),
'm01s03i297': CFName(None, 'Evaporation from canopy', 'kg/m^2/s'),
'm01s03i298': CFName('water_sublimation_flux', None, 'kg m-2 s-1'),
'm01s03i300': CFName('tendency_of_atmosphere_mass_content_of_ammonia_due_to_dry_deposition', None, 'kg m-2 s-1'),
'm01s03i304': CFName(None, 'Turbulent mixing height after boundary layer', 'm'),
'm01s03i305': CFName(None, 'Stable boundary layer indicator', '1'),
'm01s03i306': CFName(None, 'Stratocumulus over stable boundary layer indicator', '1'),
'm01s03i307': CFName(None, 'Well-mixed boundary layer indicator', '1'),
'm01s03i308': CFName(None, 'Decoupled stratocumulus not over cumulus indicator', '1'),
'm01s03i309': CFName(None, 'Decoupled stratocumulus over cumulus indicator', '1'),
'm01s03i310': CFName(None, 'Cumulus capped boundary layer indicator', '1'),
'm01s03i313': CFName('soil_moisture_content_at_field_capacity', None, 'kg m-2'),
'm01s03i321': CFName(None, 'Canopy water on tiles', 'kg/m^2'),
'm01s03i331': CFName(None, 'Sublimation moisture flux on tiles', 'kg/m^2/s'),
'm01s03i332': CFName('toa_outgoing_longwave_flux', None, 'W m-2'),
'm01s03i334': CFName('water_potential_evaporation_flux', None, 'kg m-2 s-1'),
'm01s03i337': CFName('downward_heat_flux_in_soil', None, 'W m-2'),
'm01s03i339': CFName(None, 'bulk_richardson_number', '1'),
'm01s03i340': CFName(None, 'Shear driven boundary layer indicator', '1'),
'm01s03i353': CFName(None, 'Sublimation of sea ice meaned over sea portion of grid box', 'kg/m^2/s'),
'm01s03i365': CFName('x_wind', None, 'm s-1'),
'm01s03i366': CFName('y_wind', None, 'm s-1'),
'm01s03i380': CFName('surface_net_downward_radiative_flux', None, 'W m-2'),
'm01s03i390': CFName('wind_speed_shear', None, 'm s-1'),
'm01s03i391': CFName('surface_downward_eastward_stress', None, 'Pa'),
'm01s03i392': CFName('surface_downward_eastward_stress', None, 'Pa'),
'm01s03i393': CFName('surface_downward_northward_stress', None, 'Pa'),
'm01s03i394': CFName('surface_downward_northward_stress', None, 'Pa'),
'm01s03i395': CFName('land_area_fraction', None, '1'),
'm01s03i401': CFName(None, 'Dust emissions division 1', 'kg/m^2/s'),
'm01s03i402': CFName(None, 'Dust emissions division 2', 'kg/m^2/s'),
'm01s03i403': CFName(None, 'Dust emissions division 3', 'kg/m^2/s'),
'm01s03i404': CFName(None, 'Dust emissions division 4', 'kg/m^2/s'),
'm01s03i405': CFName(None, 'Dust emissions division 5', 'kg/m^2/s'),
'm01s03i406': CFName(None, 'Dust emissions division 6', 'kg/m^2/s'),
'm01s03i430': CFName(None, 'Dust friction velocity', 'm/s'),
'm01s03i441': CFName(None, 'Dust dry deposition flux division 1 from level 1', 'kg/m^2/s'),
'm01s03i442': CFName(None, 'Dust dry deposition flux division 2 from level 1', 'kg/m^2/s'),
'm01s03i443': CFName(None, 'Dust dry deposition flux division 3 from level 1', 'kg/m^2/s'),
'm01s03i444': CFName(None, 'Dust dry deposition flux division 4 from level 1', 'kg/m^2/s'),
'm01s03i445': CFName(None, 'Dust dry deposition flux division 5 from level 1', 'kg/m^2/s'),
'm01s03i446': CFName(None, 'Dust dry deposition flux division 6 from level 1', 'kg/m^2/s'),
'm01s03i451': CFName(None, 'Dust dry deposition flux division 1 from level 2', 'kg/m^2/s'),
'm01s03i452': CFName(None, 'Dust dry deposition flux division 2 from level 2', 'kg/m^2/s'),
'm01s03i453': CFName(None, 'Dust dry deposition flux division 3 from level 2', 'kg/m^2/s'),
'm01s03i454': CFName(None, 'Dust dry deposition flux division 4 from level 2', 'kg/m^2/s'),
'm01s03i455': CFName(None, 'Dust dry deposition flux division 5 from level 2', 'kg/m^2/s'),
'm01s03i456': CFName(None, 'Dust dry deposition flux division 6 from level 2', 'kg/m^2/s'),
'm01s03i460': CFName('surface_downward_eastward_stress', None, 'Pa'),
'm01s03i461': CFName('surface_downward_northward_stress', None, 'Pa'),
'm01s03i463': CFName('wind_speed_of_gust', None, 'm s-1'),
'm01s03i471': CFName('atmosphere_momentum_diffusivity', None, 'm2 s-1'),
'm01s03i472': CFName('atmosphere_heat_diffusivity', None, 'm2 s-1'),
'm01s03i491': CFName('surface_carbon_dioxide_mole_flux', None, 'mol m-2 s-1'),
'm01s03i538': CFName('surface_drag_coefficient_for_momentum_in_air', None, '1'),
'm01s03i541': CFName('surface_drag_coefficient_for_heat_in_air', None, '1'),
'm01s04i004': CFName('air_temperature', None, 'K'),
'm01s04i010': CFName('specific_humidity', None, '1'),
'm01s04i100': CFName(None, 'ice_aggregate_fraction', '1'),
'm01s04i101': CFName(None, 'flag_to_indicate_microphysics_code_has_been_run', '1'),
'm01s04i102': CFName(None, 'fall_speed_of_ice_crystals', 'm s-1'),
'm01s04i103': CFName(None, 'fall_speed_of_ice_aggregates', 'm s-1'),
'm01s04i104': CFName(None, 'flag_for_ice_fall_speed_in_use', '1'),
'm01s04i105': CFName(None, 'assumed_fall_speed_of_ice', 'm s-1'),
'm01s04i110': CFName(None, 'radar_reflectivity_due_to_all_hydrometeors_at_the_surface', 'dBZ'),
'm01s04i111': CFName(None, 'maximum_radar_reflectivity_in_the_grid_column_due_to_all_hydrometeors', 'dBZ'),
'm01s04i112': CFName(None, 'radar_reflectivity_due_to_all_hydrometeors_at_1km_altitude', 'dBZ'),
'm01s04i113': CFName(None, 'radar_reflectivity_due_to_graupel_alone', 'dBZ'),
'm01s04i114': CFName(None, 'radar_reflectivity_due_to_ice_aggregates_alone', 'dBZ'),
'm01s04i115': CFName(None, 'radar_reflectivity_due_to_ice_crystals_alone', 'dBZ'),
'm01s04i116': CFName(None, 'radar_reflectivity_due_to_rain_alone', 'dBZ'),
'm01s04i117': CFName(None, 'radar_reflectivity_due_to_cloud_alone', 'dBZ'),
'm01s04i118': CFName(None, 'radar_reflectivity_due_to_all_hydrometeor_species', 'dBZ'),
'm01s04i119': CFName(None, 'cloud_top_altitude_derived_using_radar_echo_top_altitude', 'm'),
'm01s04i141': CFName(None, 'change_over_time_in_air_temperature_due_to_pc2_checks', 'K'),
'm01s04i142': CFName(None, 'change_over_time_in_specific_humidity_due_to_pc2_checks', 'kg kg-1'),
'm01s04i143': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_pc2_checks', 'kg kg-1'),
'm01s04i144': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_pc2_checks', 'kg kg-1'),
'm01s04i152': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_checks', '1'),
'm01s04i153': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_checks', '1'),
'm01s04i154': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_checks', '1'),
'm01s04i181': CFName(None, 'change_over_time_in_air_temperature_due_to_stratiform_precipitation', 'K'),
'm01s04i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_stratiform_precipitation', 'kg kg-1'),
'm01s04i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_stratiform_precipitation', 'kg kg-1'),
'm01s04i184': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_stratiform_precipitation', 'kg kg-1'),
'm01s04i189': CFName(None, 'change_over_time_in_mass_fraction_of_rain_in_air_due_to_stratiform_precipitation', 'kg kg-1'),
'm01s04i190': CFName(None, 'graupel_mixing_ratio_increment_due_to_grid_scale_precipitation', '1'),
'm01s04i191': CFName(None, 'change_over_time_in_mass_fraction_of_graupel_in_air_due_to_stratiform_precipitation', 'kg kg-1'),
'm01s04i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_stratiform_precipitation', '1'),
'm01s04i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_stratiform_precipitation', '1'),
'm01s04i194': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer_due_to_stratiform_precipitation', '1'),
'm01s04i201': CFName('stratiform_rainfall_amount', None, 'kg m-2'),
'm01s04i202': CFName('stratiform_snowfall_amount', None, 'kg m-2'),
'm01s04i203': CFName('stratiform_rainfall_flux', None, 'kg m-2 s-1'),
'm01s04i204': CFName('stratiform_snowfall_flux', None, 'kg m-2 s-1'),
'm01s04i205': CFName('mass_fraction_of_cloud_liquid_water_in_air', None, '1'),
'm01s04i206': CFName('mass_fraction_of_cloud_ice_in_air', None, '1'),
'm01s04i207': CFName(None, 'relative_humidity_with_respect_to_liquid_water', '%'),
'm01s04i208': CFName(None, 'relative_humidity_with_respect_to_water_and_ice', '%'),
'm01s04i209': CFName(None, 'graupel_fall_amount_at_surface', 'kg m-2'),
'm01s04i210': CFName(None, 'cloud_drop_number_concentration_where_cloud_is_present', 'm-3'),
'm01s04i211': CFName(None, 'cloud_drop_number_concentration_ignoring_prescence_of_cloud', 'm-3'),
'm01s04i212': CFName(None, 'graupel_fall_flux_at_surface', 'kg m-2 s-1'),
'm01s04i222': CFName('large_scale_rainfall_flux', None, 'kg m-2 s-1'),
'm01s04i223': CFName('large_scale_snowfall_flux', None, 'kg m-2 s-1'),
'm01s04i224': CFName(None, 'supercooled_liquid_water_content', '1'),
'm01s04i225': CFName(None, 'supercooled_rainfall_flux', 'kg m-2 s-1'),
'm01s04i226': CFName(None, 'graupel_fall_flux_on_model_levels', 'kg m-2 s-1'),
'm01s04i227': CFName(None, 'fraction_of_grid_box_assumed_to_be_rain', '1'),
'm01s04i231': CFName(None, 'Dust wet deposition flux due to large scale precipitation division 1', 'kg/m^2/s'),
'm01s04i232': CFName(None, 'Dust wet deposition flux due to large scale precipitation division 2', 'kg/m^2/s'),
'm01s04i233': CFName(None, 'Dust wet deposition flux due to large scale precipitation division 3', 'kg/m^2/s'),
'm01s04i234': CFName(None, 'Dust wet deposition flux due to large scale precipitation division 4', 'kg/m^2/s'),
'm01s04i235': CFName(None, 'Dust wet deposition flux due to large scale precipitation division 5', 'kg/m^2/s'),
'm01s04i236': CFName(None, 'Dust wet deposition flux due to large scale precipitation division 6', 'kg/m^2/s'),
'm01s04i240': CFName('tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_homogeneous_nucleation', None, 's-1'),
'm01s04i241': CFName('tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_heterogeneous_nucleation_from_water_vapor', None, 's-1'),
'm01s04i242': CFName('tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_heterogeneous_nucleation_from_cloud_liquid', None, 's-1'),
'm01s04i243': CFName(None, 'rate_of_increase_of_ice_mass_due_to_vapour_deposition', 's-1'),
'm01s04i245': CFName(None, 'rate_of_increase_of_snow_mass_due_to_vapour_deposition', 's-1'),
'm01s04i247': CFName('tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_riming_from_cloud_liquid', None, 's-1'),
'm01s04i248': CFName(None, 'rate_of_increase_of_snow_mass_due_to_riming_of_liquid_cloud', 's-1'),
'm01s04i249': CFName(None, 'rate_of_increase_of_ice_mass_due_to_capture_of_raindrops', 's-1'),
'm01s04i250': CFName(None, 'rate_of_increase_of_snow_mass_due_to_capture_of_raindrops', 's-1'),
'm01s04i251': CFName(None, 'rate_of_loss_of_ice_mass_due_to_sublimation', 's-1'),
'm01s04i252': CFName(None, 'rate_of_loss_of_snow_mass_due_to_sublimation', 's-1'),
'm01s04i253': CFName(None, 'rate_of_increase_of_rain_mass_due_to_melting_of_ice_crystals', 's-1'),
'm01s04i254': CFName(None, 'rate_of_increase_of_rain_mass_due_to_melting_of_snow', 's-1'),
'm01s04i255': CFName(None, 'rate_of_increase_of_snow_mass_due_to_autoconversion_from_ice_crystals', 's-1'),
'm01s04i256': CFName(None, 'rate_of_increase_of_snow_mass_due_to_capture_of_ice_crystals', 's-1'),
'm01s04i257': CFName(None, 'rate_of_increase_of_rain_mass_due_to_autoconversion_from_liquid_cloud', 's-1'),
'm01s04i258': CFName(None, 'rate_of_increase_of_rain_mass_due_to_accretion_of_liquid_cloud', 's-1'),
'm01s04i259': CFName(None, 'rate_of_loss_of_rain_mass_due_to_evaporation', 's-1'),
'm01s04i260': CFName(None, 'rate_of_increase_of_graupel_mass_due_to_autoconversion_from_snow', 's-1'),
'm01s04i261': CFName(None, 'rate_of_change_of_graupel_mass_due_to_riming_liquid_water', 's-1'),
'm01s04i262': CFName(None, 'rate_of_change_of_graupel_mass_due_to_capturing_snow', 's-1'),
'm01s04i263': CFName(None, 'melting_rate_of_graupel_mass', 's-1'),
'm01s04i264': CFName(None, 'loss_of_graupel_mass_due_to_sublimation', 's-1'),
'm01s04i265': CFName(None, 'sedimentation_rate_of_ice_crystal_mass', 's-1'),
'm01s04i266': CFName(None, 'sedimentation_rate_of_ice_aggregate_mass', 's-1'),
'm01s04i267': CFName(None, 'sedimentation_rate_of_rain_mass', 's-1'),
'm01s04i268': CFName(None, 'sedimentation_rate_of_graupel_mass', 's-1'),
'm01s04i269': CFName(None, 'rate_of_sedimentation_of_settling_cloud_droplets', 's-1'),
'm01s04i270': CFName(None, 'rate_of_evaporation_of_settling_cloud_droplets', 's-1'),
'm01s04i271': CFName(None, 'rate_of_change_of_ice_mass_due_to_homogeneous_freezing_of_rain', 's-1'),
'm01s04i272': CFName(None, 'rate_of_change_of_ice_mass_due_to_heterogeneous_freezing_of_rain', 's-1'),
'm01s04i275': CFName(None, 'maximum_predicted_hailstone_size_at_surface', 'mm'),
'm01s04i276': CFName(None, 'maximum_predicted_hailstone_size_in_model_vertical_column', 'mm'),
'm01s04i277': CFName(None, 'maximum_predicted_hailstone_size', 'mm'),
'm01s04i294': CFName(None, 'cloud_liquid_content_diagnosed_by_turbulent_mixed_phase_scheme', '1'),
'm01s04i295': CFName(None, 'liquid_cloud_fraction_diagnosed_by_turbulent_mixed_phase_scheme', '1'),
'm01s04i296': CFName(None, 'turbulent_decorrelation_timescale_diagnosed_by_turbulent_mixed_phase_scheme', 's'),
'm01s04i297': CFName(None, 'time_for_in_cloud_air_to_adjust_to_ice_saturation_via_deposition', 's-1'),
'm01s04i298': CFName(None, 'turbulent_dissipation_rate_diagnosed_by_turbulent_mixed_phase_scheme', 'm2 s-3'),
'm01s04i299': CFName(None, 'timescale_for_tubulence_to_mix_cloud_and_environment', 's-1'),
'm01s04i300': CFName(None, 'mean_of_subgrid_pdf_of_supersaturation_with_respect_to_ice', '1'),
'm01s04i301': CFName(None, 'variance_of_subgrid_pdf_of_supersaturation_with_respect_to_ice', '1'),
'm01s04i302': CFName(None, 'surface_snow_amount_ignoring_graupel', 'kg m-2'),
'm01s04i303': CFName(None, 'cloud_liquid_content_increment_by_turbulent_mixed_phase_scheme_and_PC2_scheme', 's-1'),
'm01s04i304': CFName(None, 'surface_snowfall_rate_ignoring_graupel', 'kg m-2 s-1'),
'm01s04i323': CFName(None, 'snowfall_flux_on_model_levels_ignoring_graupel', 'kg m-2 s-1'),
'm01s04i325': CFName(None, 'rate_of_change_of_liquid_cloud_mass_due_to_vapour_condensation_or_evaporation', 's-1'),
'm01s04i336': CFName(None, 'sedimentation_rate_of_ice_cloud_mass', 's-1'),
'm01s04i350': CFName(None, 'rate_of_change_of_ice_number_due_to_homogeneous_freezing_of_cloud', 'kg s-1'),
'm01s04i351': CFName(None, 'rate_of_change_of_ice_number_due_to_homogeneous_freezing_of_rain', 'kg s-1'),
'm01s04i352': CFName(None, 'rate_of_change_of_ice_number_due_to_hallett_mossop_process', 'kg-1 s-1'),
'm01s04i353': CFName(None, 'rate_of_change_of_ice_number_due_to_ice_nucleation', 'kg-1 s-1'),
'm01s04i354': CFName(None, 'rate_of_change_of_ice_number_due_to_snow_sedimentation', 'kg-1 s-1'),
'm01s04i355': CFName(None, 'rate_of_change_of_snow_number_due_to_snow_sedimentation', 'kg-1 s-1'),
'm01s04i356': CFName(None, 'rate_of_change_of_graupel_number_due_to_graupel_sedimentation', 'kg-1 s-1'),
'm01s04i400': CFName(None, 'subgrid_orographic_cloud_mixing_ratio', '1'),
'm01s04i401': CFName(None, 'subgrid_orographic_rain_accretion_rate', 's-1'),
'm01s04i402': CFName(None, 'subgrid_orographic_snow_riming_rate', 's-1'),
'm01s04i982': CFName(None, 'change_over_time_in_specific_humidity_due_to_methane_oxidation', 'kg kg-1'),
'm01s05i010': CFName('specific_humidity', None, '1'),
'm01s05i181': CFName(None, 'change_over_time_in_air_temperature_due_to_convection', 'K'),
'm01s05i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_convection', 'kg kg-1'),
'm01s05i185': CFName(None, 'change_over_time_in_x_wind_due_to_convection', 'm s-1'),
'm01s05i186': CFName(None, 'change_over_time_in_y_wind_due_to_convection', 'm s-1'),
'm01s05i201': CFName('convective_rainfall_amount', None, 'kg m-2'),
'm01s05i202': CFName('convective_snowfall_amount', None, 'kg m-2'),
'm01s05i205': CFName('convective_rainfall_flux', None, 'kg m-2 s-1'),
'm01s05i206': CFName('convective_snowfall_flux', None, 'kg m-2 s-1'),
'm01s05i207': CFName('air_pressure_at_convective_cloud_base', None, 'Pa'),
'm01s05i208': CFName('air_pressure_at_convective_cloud_top', None, 'Pa'),
'm01s05i209': CFName('air_temperature', None, 'K'),
'm01s05i212': CFName('convective_cloud_area_fraction_in_atmosphere_layer', None, '1'),
'm01s05i213': CFName('mass_fraction_of_convective_cloud_liquid_water_in_air', None, '1'),
'm01s05i214': CFName('rainfall_flux', None, 'kg m-2 s-1'),
'm01s05i215': CFName('snowfall_flux', None, 'kg m-2 s-1'),
'm01s05i216': CFName('precipitation_flux', None, 'kg m-2 s-1'),
'm01s05i222': CFName('air_pressure_at_convective_cloud_base', None, 'Pa'),
'm01s05i226': CFName('precipitation_amount', None, 'kg m-2'),
'm01s05i227': CFName('convective_rainfall_flux', None, 'kg m-2 s-1'),
'm01s05i228': CFName('convective_snowfall_flux', None, 'kg m-2 s-1'),
'm01s05i231': CFName(None, 'Cape time scale (deep)', 's'),
'm01s05i232': CFName(None, 'reduced cape time scale indicator', '1'),
'm01s05i233': CFName(None, 'undilute_cape', 'J kg-1'),
'm01s05i269': CFName(None, 'deep convection indicator', '1'),
'm01s05i270': CFName(None, 'shallow convection indicator', '1'),
'm01s05i272': CFName(None, 'mid level convection indicator', '1'),
'm01s05i277': CFName(None, 'deep convective precipitation rate', 'kg/m^2/s'),
'm01s05i278': CFName(None, 'shallow convective precipitation rate', 'kg/m^2/s'),
'm01s05i279': CFName(None, 'mid level convective precipitation rate', 'kg/m^2/s'),
'm01s05i281': CFName(None, 'Dust wet deposition flux due to convective precipitation division 1', 'kg/m^2/s'),
'm01s05i282': CFName(None, 'Dust wet deposition flux due to convective precipitation division 2', 'kg/m^2/s'),
'm01s05i283': CFName(None, 'Dust wet deposition flux due to convective precipitation division 3', 'kg/m^2/s'),
'm01s05i284': CFName(None, 'Dust wet deposition flux due to convective precipitation division 4', 'kg/m^2/s'),
'm01s05i285': CFName(None, 'Dust wet deposition flux due to convective precipitation division 5', 'kg/m^2/s'),
'm01s05i286': CFName(None, 'Dust wet deposition flux due to convective precipitation division 6', 'kg/m^2/s'),
'm01s06i111': CFName('upward_eastward_momentum_flux_in_air_due_to_nonorographic_eastward_gravity_waves', None, 'Pa'),
'm01s06i113': CFName('upward_eastward_momentum_flux_in_air_due_to_nonorographic_westward_gravity_waves', None, 'Pa'),
'm01s06i115': CFName('tendency_of_eastward_wind_due_to_nonorographic_gravity_wave_drag', None, 'm s-2'),
'm01s06i181': CFName(None, 'change_over_time_in_air_temperature_due_to_gravity_wave_drag', 'K'),
'm01s06i185': CFName(None, 'change_over_time_in_x_wind_due_to_gravity_wave_drag', 'm s-1'),
'm01s06i186': CFName(None, 'change_over_time_in_y_wind_due_to_gravity_wave_drag', 'm s-1'),
'm01s06i201': CFName('atmosphere_eastward_stress_due_to_gravity_wave_drag', None, 'Pa'),
'm01s06i202': CFName('atmosphere_northward_stress_due_to_gravity_wave_drag', None, 'Pa'),
'm01s06i241': CFName('upward_eastward_momentum_flux_in_air_due_to_orographic_gravity_waves', None, 'Pa'),
'm01s06i247': CFName('tendency_of_eastward_wind_due_to_orographic_gravity_wave_drag', None, 'm s-2'),
'm01s08i023': CFName('surface_snow_amount', None, 'kg m-2'),
'm01s08i202': CFName(None, 'surface_snow_melt_flux_where_land', 'W m-2'),
'm01s08i204': CFName('surface_runoff_amount', None, 'kg m-2'),
'm01s08i205': CFName('subsurface_runoff_amount', None, 'kg m-2'),
'm01s08i208': CFName('soil_moisture_content', None, 'kg m-2'),
'm01s08i209': CFName('canopy_water_amount', None, 'kg m-2'),
'm01s08i223': CFName('moisture_content_of_soil_layer', None, 'kg m-2'),
'm01s08i225': CFName('soil_temperature', None, 'K'),
'm01s08i229': CFName('mass_fraction_of_unfrozen_water_in_soil_moisture', None, 'kg kg-1'),
'm01s08i230': CFName('mass_fraction_of_frozen_water_in_soil_moisture', None, 'kg kg-1'),
'm01s08i231': CFName(None, 'surface_snow_melt_flux_where_land', 'kg m-2 s-1'),
'm01s08i233': CFName('canopy_throughfall_flux', None, 'kg m-2 s-1'),
'm01s08i234': CFName('surface_runoff_flux', None, 'kg m-2 s-1'),
'm01s08i235': CFName('subsurface_runoff_flux', None, 'kg m-2 s-1'),
'm01s08i245': CFName(None, 'Inland basin flow on atmospheric grid', 'kg/m^2/s'),
'm01s08i258': CFName('surface_runoff_flux', None, 'kg m-2 s-1'),
'm01s09i004': CFName('air_temperature', None, 'K'),
'm01s09i010': CFName('specific_humidity', None, '1'),
'm01s09i201': CFName('stratiform_cloud_area_fraction_in_atmosphere_layer', None, '1'),
'm01s09i203': CFName('low_type_cloud_area_fraction', None, '1'),
'm01s09i204': CFName('medium_type_cloud_area_fraction', None, '1'),
'm01s09i205': CFName('high_type_cloud_area_fraction', None, '1'),
'm01s09i208': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_0p1_oktas', 'kft'),
'm01s09i209': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_1p5_oktas', 'kft'),
'm01s09i210': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_2p5_oktas', 'kft'),
'm01s09i211': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_3p5_oktas', 'kft'),
'm01s09i212': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_4p5_oktas', 'kft'),
'm01s09i213': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_5p5_oktas', 'kft'),
'm01s09i214': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_6p5_oktas', 'kft'),
'm01s09i215': CFName(None, 'cloud_base_altitude_assuming_only_consider_cloud_area_fraction_greater_than_7p5_oktas', 'kft'),
'm01s09i216': CFName(None, 'cloud_area_fraction_assuming_random_overlap', '1'),
'm01s09i217': CFName(None, 'cloud_area_fraction_assuming_maximum_random_overlap', '1'),
'm01s09i218': CFName(None, 'cloud_area_fraction_assuming_only_consider_surface_to_1000_feet_asl', '1'),
'm01s09i219': CFName('cloud_base_altitude', None, 'ft'),
'm01s09i221': CFName(None, 'wet_bulb_freezing_level_altitude', 'm'),
'm01s09i222': CFName('wet_bulb_temperature', None, 'K'),
'm01s09i226': CFName(None, 'binary_mask_where_cloud_area_fraction_in_atmosphere_layer_gt_0', '1'),
'm01s09i228': CFName(None, 'relative_humidity_at_which_cloud_assumed_to_form', '%'),
'm01s09i229': CFName('relative_humidity', None, '%'),
'm01s09i230': CFName(None, 'visibility_in_atmosphere_layer', 'm'),
'm01s10i181': CFName(None, 'change_over_time_in_air_temperature_due_to_pressure_solver', 'K'),
'm01s10i185': CFName(None, 'change_over_time_in_x_wind_due_to_pressure_solver', 'm s-1'),
'm01s10i186': CFName(None, 'change_over_time_in_y_wind_due_to_pressure_solver', 'm s-1'),
'm01s10i187': CFName(None, 'change_over_time_in_upward_air_velocity_due_to_pressure_solver', 'm s-1'),
'm01s12i004': CFName('air_temperature', None, 'K'),
'm01s12i010': CFName('specific_humidity', None, '1'),
'm01s12i012': CFName('mass_fraction_of_cloud_ice_in_air', None, '1'),
'm01s12i181': CFName(None, 'change_over_time_in_air_temperature_due_to_advection', 'K'),
'm01s12i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_advection', 'kg kg-1'),
'm01s12i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_advection', 'kg kg-1'),
'm01s12i184': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_advection', 'kg kg-1'),
'm01s12i185': CFName(None, 'change_over_time_in_x_wind_due_to_advection', 'm s-1'),
'm01s12i186': CFName(None, 'change_over_time_in_y_wind_due_to_advection', 'm s-1'),
'm01s12i187': CFName(None, 'change_over_time_in_upward_air_velocity_due_to_advection', 'm s-1'),
'm01s12i189': CFName(None, 'change_over_time_in_mass_fraction_of_rain_in_air_due_to_advection', 'kg kg-1'),
'm01s12i190': CFName(None, 'change_over_time_in_mass_fraction_of_graupel_in_air_due_to_advection', 'kg kg-1'),
'm01s12i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_advection', '1'),
'm01s12i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_advection', '1'),
'm01s12i194': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer_due_to_advection', '1'),
'm01s12i195': CFName(None, 'change_over_time_in_humidity_mixing_ratio_due_to_advection', 'kg kg-1'),
'm01s12i196': CFName(None, 'change_over_time_in_cloud_liquid_water_mixing_ratio_due_to_advection', 'kg kg-1'),
'm01s12i197': CFName(None, 'change_over_time_in_cloud_ice_mixing_ratio_due_to_advection', 'kg kg-1'),
'm01s12i198': CFName(None, 'change_over_time_in_rain_mixing_ratio_due_to_advection', 'kg kg-1'),
'm01s12i199': CFName(None, 'change_over_time_in_graupel_mixing_ratio_due_to_advection', 'kg kg-1'),
'm01s12i201': CFName('lagrangian_tendency_of_air_pressure', None, 'Pa s-1'),
'm01s12i202': CFName('lagrangian_tendency_of_air_pressure', None, 'Pa s-1'),
'm01s12i381': CFName(None, 'change_over_time_in_air_temperature_due_to_advection_corrections', 'K'),
'm01s12i382': CFName(None, 'change_over_time_in_specific_humidity_due_to_advection_corrections', 'kg kg-1'),
'm01s12i383': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_advection_corrections', 'kg kg-1'),
'm01s12i384': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_advection_corrections', 'kg kg-1'),
'm01s12i389': CFName(None, 'change_over_time_in_mass_fraction_of_rain_in_air_due_to_advection_corrections', 'kg kg-1'),
'm01s12i391': CFName(None, 'change_over_time_in_mass_fraction_of_graupel_in_air_due_to_advection_corrections', 'kg kg-1'),
'm01s12i395': CFName(None, 'change_over_time_in_humidity_mixing_ratio_due_to_advection_corrections', 'kg kg-1'),
'm01s12i396': CFName(None, 'change_over_time_in_cloud_liquid_water_mixing_ratio_due_to_advection_corrections', 'kg kg-1'),
'm01s12i397': CFName(None, 'change_over_time_in_cloud_ice_mixing_ratio_due_to_advection_corrections', 'kg kg-1'),
'm01s12i398': CFName(None, 'change_over_time_in_rain_mixing_ratio_due_to_advection_corrections', 'kg kg-1'),
'm01s12i399': CFName(None, 'change_over_time_in_graupel_mixing_ratio_due_to_advection_corrections', 'kg kg-1'),
'm01s13i002': CFName('eastward_wind', None, 'm s-1'),
'm01s13i003': CFName('northward_wind', None, 'm s-1'),
'm01s13i004': CFName('air_temperature', None, 'K'),
'm01s13i181': CFName(None, 'change_over_time_in_air_temperature_due_to_diffusion', 'K'),
'm01s13i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_diffusion', 'kg kg-1'),
'm01s13i185': CFName(None, 'change_over_time_in_x_wind_due_to_diffusion', 'm s-1'),
'm01s13i186': CFName(None, 'change_over_time_in_y_wind_due_to_diffusion', 'm s-1'),
'm01s13i187': CFName(None, 'change_over_time_in_upward_air_velocity_due_to_diffusion', 'm s-1'),
'm01s14i181': CFName(None, 'change_over_time_in_air_temperature_due_to_energy_correction', 'K'),
'm01s15i101': CFName('height_above_reference_ellipsoid', None, 'm'),
'm01s15i102': CFName('height_above_reference_ellipsoid', None, 'm'),
'm01s15i108': CFName('air_pressure', None, 'Pa'),
'm01s15i119': CFName('air_potential_temperature', None, 'K'),
'm01s15i127': CFName('air_density', None, 'kg m-3'),
'm01s15i142': CFName('upward_air_velocity', None, 'm s-1'),
'm01s15i143': CFName('x_wind', None, 'm s-1'),
'm01s15i144': CFName('y_wind', None, 'm s-1'),
'm01s15i201': CFName('x_wind', None, 'm s-1'),
'm01s15i202': CFName('y_wind', None, 'm s-1'),
'm01s15i212': CFName('x_wind', None, 'm s-1'),
'm01s15i213': CFName('y_wind', None, 'm s-1'),
'm01s15i214': CFName('ertel_potential_vorticity', None, 'K m2 kg-1 s-1'),
'm01s15i215': CFName('air_potential_temperature', None, 'K'),
'm01s15i216': CFName('air_potential_temperature', None, 'K'),
'm01s15i217': CFName('potential_vorticity_of_atmosphere_layer', None, 'Pa-1 s-1'),
'm01s15i218': CFName('potential_vorticity_of_atmosphere_layer', None, 'Pa-1 s-1'),
'm01s15i219': CFName('square_of_air_temperature', None, 'K2'),
'm01s15i220': CFName(None, 'square_of_x_wind', 'm2 s-2'),
'm01s15i221': CFName(None, 'square_of_y_wind', 'm2 s-2'),
'm01s15i222': CFName('lagrangian_tendency_of_air_pressure', None, 'Pa s-1'),
'm01s15i223': CFName('product_of_omega_and_air_temperature', None, 'K Pa s-1'),
'm01s15i224': CFName(None, 'product_of_x_wind_and_omega', 'Pa m s-2'),
'm01s15i225': CFName(None, 'product_of_y_wind_and_omega', 'Pa m s-2'),
'm01s15i226': CFName('specific_humidity', None, 'kg kg-1'),
'm01s15i227': CFName(None, 'product_of_x_wind_and_specific_humidity', 'm s-1'),
'm01s15i228': CFName(None, 'product_of_y_wind_and_specific_humidity', 'm s-1'),
'm01s15i235': CFName('product_of_omega_and_specific_humidity', None, 'Pa s-1'),
'm01s15i238': CFName('geopotential_height', None, 'm'),
'm01s15i239': CFName(None, 'product_of_x_wind_and_geopotential_height', 'm2 s-1'),
'm01s15i240': CFName(None, 'product_of_y_wind_and_geopotential_height', 'm2 s-1'),
'm01s15i242': CFName('upward_air_velocity', None, 'm s-1'),
'm01s15i243': CFName('x_wind', None, 'm s-1'),
'm01s15i244': CFName('y_wind', None, 'm s-1'),
'm01s16i004': CFName('air_temperature', None, 'K'),
'm01s16i161': CFName(None, 'change_over_time_in_air_temperature_due_to_pc2_initialisation', 'K'),
'm01s16i162': CFName(None, 'change_over_time_in_specific_humidity_due_to_pc2_initialisation', 'kg kg-1'),
'm01s16i163': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_pc2_initialisation', 'kg kg-1'),
'm01s16i164': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_pc2_initialisation', 'kg kg-1'),
'm01s16i172': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_initialisation', '1'),
'm01s16i173': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_initialisation', '1'),
'm01s16i174': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_initialisation', '1'),
'm01s16i181': CFName(None, 'change_over_time_in_air_temperature_due_to_pc2_pressure_change', 'K'),
'm01s16i182': CFName(None, 'change_over_time_in_specific_humidity_due_to_pc2_pressure_change', 'kg kg-1'),
'm01s16i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air_due_to_pc2_pressure_change', 'kg kg-1'),
'm01s16i184': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air_due_to_pc2_pressure_change', 'kg kg-1'),
'm01s16i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_pressure_change', '1'),
'm01s16i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_pressure_change', '1'),
'm01s16i194': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer_due_to_pc2_pressure_change', '1'),
'm01s16i201': CFName('geopotential_height', None, 'm'),
'm01s16i202': CFName('geopotential_height', None, 'm'),
'm01s16i203': CFName('air_temperature', None, 'K'),
'm01s16i204': CFName('relative_humidity', None, '%'),
'm01s16i205': CFName('wet_bulb_potential_temperature', None, 'K'),
'm01s16i222': CFName('air_pressure_at_sea_level', None, 'Pa'),
'm01s16i224': CFName(None, 'square_of_height', 'm2'),
'm01s16i255': CFName('geopotential_height', None, 'm'),
'm01s16i256': CFName('relative_humidity', None, '%'),
'm01s17i220': CFName(None, 'mass_concentration_of_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i221': CFName(None, 'mass_concentration_of_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i222': CFName(None, 'mass_concentration_of_ammonium_sulfate_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i223': CFName(None, 'mass_concentration_of_ammonium_sulfate_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i224': CFName(None, 'mass_concentration_of_black_carbon_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i225': CFName(None, 'mass_concentration_of_black_carbon_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i226': CFName(None, 'mass_concentration_of_biomass_burning_aerosol_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i227': CFName(None, 'mass_concentration_of_biomass_burning_aerosol_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i228': CFName(None, 'mass_concentration_of_organic_carbon_from_fossil_fuel_combustion_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i229': CFName(None, 'mass_concentration_of_organic_carbon_from_fossil_fuel_combustion_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i230': CFName(None, 'mass_concentration_of_secondary_particulate_organic_matter_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i231': CFName(None, 'mass_concentration_of_secondary_particulate_organic_matter_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i232': CFName(None, 'mass_concentration_of_seasalt_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i233': CFName(None, 'mass_concentration_of_seasalt_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i234': CFName(None, 'mass_concentration_of_dust_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i235': CFName(None, 'mass_concentration_of_dust_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i236': CFName(None, 'mass_concentration_of_ammonium_nitrate_in_pm10_dry_aerosol_in_air', 'ug m-3'),
'm01s17i237': CFName(None, 'mass_concentration_of_ammonium_nitrate_in_pm2p5_dry_aerosol_in_air', 'ug m-3'),
'm01s17i257': CFName('mass_concentration_of_dust_dry_aerosol_in_air', None, 'ug m-3'),
'm01s19i002': CFName('vegetation_carbon_content', None, 'kg m-2'),
'm01s19i016': CFName('soil_carbon_content', None, 'kg m-2'),
'm01s20i003': CFName('wind_speed', None, 'm s-1'),
'm01s20i004': CFName('wind_speed', None, 'm s-1'),
'm01s20i005': CFName('divergence_of_wind', None, 's-1'),
'm01s20i006': CFName('atmosphere_relative_vorticity', None, 's-1'),
'm01s20i024': CFName('tropopause_air_pressure', None, 'Pa'),
'm01s20i025': CFName('tropopause_air_temperature', None, 'K'),
'm01s20i026': CFName('tropopause_altitude', None, 'm'),
'm01s20i034': CFName('air_pressure_at_freezing_level', None, 'Pa'),
'm01s20i064': CFName('tropopause_air_pressure', None, 'Pa'),
'm01s20i065': CFName('tropopause_air_temperature', None, 'K'),
'm01s20i066': CFName('tropopause_altitude', None, 'm'),
'm01s21i100': CFName(None, 'lightning_flash_rate', 's-1'),
'm01s21i101': CFName(None, 'flag_for_location_of_storms', '1'),
'm01s21i102': CFName(None, 'graupel_water_path', 'kg m-2'),
'm01s21i103': CFName(None, 'total_ice_water_path', 'kg m-2'),
'm01s21i104': CFName(None, 'Number_of_lightning_flashes', '1'),
'm01s21i105': CFName(None, 'lightning_flash_rate_due_to_graupel_flux', 's-1'),
'm01s21i106': CFName(None, 'lightning_flash_rate_due_to_total_ice_water_path', 's-1'),
'm01s26i001': CFName(None, 'river water storage', 'kg'),
'm01s26i002': CFName(None, 'gridbox outflow', 'kg/s'),
'm01s26i003': CFName(None, 'gridbox inflow', 'kg/s'),
'm01s26i004': CFName('water_flux_into_sea_water_from_rivers', None, 'kg m-2 s-1'),
'm01s26i006': CFName(None, 'Inland basin flow on trip grid', 'kg/s'),
'm01s30i003': CFName('upward_air_velocity', None, 'm s-1'),
'm01s30i004': CFName('air_temperature', None, 'K'),
'm01s30i005': CFName('specific_humidity', None, '1'),
'm01s30i007': CFName('specific_kinetic_energy_of_air', None, 'm2 s-2'),
'm01s30i008': CFName('lagrangian_tendency_of_air_pressure', None, 'Pa s-1'),
'm01s30i111': CFName('air_temperature', None, 'K'),
'm01s30i113': CFName('relative_humidity', None, '%'),
'm01s30i181': CFName(None, 'change_over_time_in_air_temperature', 'K'),
'm01s30i182': CFName(None, 'change_over_time_in_specific_humidity', 'kg kg-1'),
'm01s30i183': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_liquid_water_in_air', 'kg kg-1'),
'm01s30i184': CFName(None, 'change_over_time_in_mass_fraction_of_cloud_ice_in_air', 'kg kg-1'),
'm01s30i185': CFName(None, 'change_over_time_in_x_wind', 'm s-1'),
'm01s30i186': CFName(None, 'change_over_time_in_y_wind', 'm s-1'),
'm01s30i187': CFName(None, 'change_over_time_in_upward_air_velocity', 'm s-1'),
'm01s30i188': CFName('tendency_of_air_density', None, 'kg m-3 s-1'),
'm01s30i189': CFName(None, 'change_over_time_in_mass_fraction_of_rain_in_air', 'kg kg-1'),
'm01s30i191': CFName(None, 'change_over_time_in_mass_fraction_of_graupel_in_air', 'kg kg-1'),
'm01s30i192': CFName(None, 'change_over_time_in_cloud_volume_fraction_in_atmosphere_layer', '1'),
'm01s30i193': CFName(None, 'change_over_time_in_liquid_water_cloud_volume_fraction_in_atmosphere_layer', '1'),
'm01s30i194': CFName(None, 'change_over_time_in_ice_cloud_volume_fraction_in_atmosphere_layer', '1'),
'm01s30i195': CFName(None, 'change_over_time_in_humidity_mixing_ratio', 'kg kg-1'),
'm01s30i196': CFName(None, 'change_over_time_in_cloud_liquid_water_mixing_ratio', 'kg kg-1'),
'm01s30i197': CFName(None, 'change_over_time_in_cloud_ice_mixing_ratio', 'kg kg-1'),
'm01s30i198': CFName(None, 'change_over_time_in_rain_mixing_ratio', 'kg kg-1'),
'm01s30i199': CFName(None, 'change_over_time_in_graupel_mixing_ratio', 'kg kg-1'),
'm01s30i201': CFName('x_wind', None, 'm s-1'),
'm01s30i202': CFName('y_wind', None, 'm s-1'),
'm01s30i203': CFName('upward_air_velocity', None, 'm s-1'),
'm01s30i204': CFName('air_temperature', None, 'K'),
'm01s30i205': CFName('specific_humidity', None, '1'),
'm01s30i206': CFName('relative_humidity', None, '%'),
'm01s30i207': CFName('geopotential_height', None, 'm'),
'm01s30i208': CFName('lagrangian_tendency_of_air_pressure', None, 'Pa s-1'),
'm01s30i211': CFName('square_of_eastward_wind', None, 'm2 s-2'),
'm01s30i212': CFName('product_of_eastward_wind_and_northward_wind', None, 'm2 s-2'),
'm01s30i213': CFName('product_of_eastward_wind_and_upward_air_velocity', None, 'm2 s-2'),
'm01s30i214': CFName('product_of_eastward_wind_and_air_temperature', None, 'K m s-1'),
'm01s30i215': CFName('product_of_eastward_wind_and_specific_humidity', None, 'm s-1'),
'm01s30i217': CFName('product_of_eastward_wind_and_geopotential_height', None, 'm2 s-1'),
'm01s30i218': CFName(None, 'product_of_x_wind_and_omega', 'Pa m s-2'),
'm01s30i222': CFName('square_of_northward_wind', None, 'm2 s-2'),
'm01s30i223': CFName('product_of_northward_wind_and_upward_air_velocity', None, 'm2 s-2'),
'm01s30i224': CFName('product_of_northward_wind_and_air_temperature', None, 'K m s-1'),
'm01s30i225': CFName('product_of_northward_wind_and_specific_humidity', None, 'm s-1'),
'm01s30i227': CFName('product_of_northward_wind_and_geopotential_height', None, 'm2 s-1'),
'm01s30i228': CFName(None, 'product_of_y_wind_and_omega', 'Pa m s-2'),
'm01s30i233': CFName('square_of_upward_air_velocity', None, 'm2 s-2'),
'm01s30i234': CFName('product_of_upward_air_velocity_and_air_temperature', None, 'K m s-1'),
'm01s30i235': CFName('product_of_upward_air_velocity_and_specific_humidity', None, 'm s-1'),
'm01s30i244': CFName('square_of_air_temperature', None, 'K2'),
'm01s30i245': CFName('product_of_air_temperature_and_specific_humidity', None, 'K'),
'm01s30i248': CFName('product_of_air_temperature_and_omega', None, 'K Pa s-1'),
'm01s30i258': CFName('product_of_specific_humidity_and_omega', None, 'Pa s-1'),
'm01s30i277': CFName('square_of_geopotential_height', None, 'm2'),
'm01s30i278': CFName('product_of_geopotential_height_and_omega', None, 'Pa m s-1'),
'm01s30i288': CFName('square_of_lagrangian_tendency_of_air_pressure', None, 'Pa2 s-2'),
'm01s30i301': CFName(None, 'Heavyside function on pressure levels', '1'),
'm01s30i302': CFName('virtual_temperature', None, 'K'),
'm01s30i310': CFName('northward_transformed_eulerian_mean_air_velocity', None, 'm s-1'),
'm01s30i311': CFName('northward_transformed_eulerian_mean_air_velocity', None, 'm s-1'),
'm01s30i312': CFName('northward_eliassen_palm_flux_in_air', None, 'kg s-2'),
'm01s30i313': CFName('upward_eliassen_palm_flux_in_air', None, 'kg s-2'),
'm01s30i314': CFName('tendency_of_eastward_wind_due_to_eliassen_palm_flux_divergence', None, 'm s-2'),
'm01s30i401': CFName('atmosphere_kinetic_energy_content', None, 'J m-2'),
'm01s30i404': CFName('atmosphere_mass_per_unit_area', None, 'kg m-2'),
'm01s30i405': CFName('atmosphere_cloud_liquid_water_content', None, 'kg m-2'),
'm01s30i406': CFName('atmosphere_cloud_ice_content', None, 'kg m-2'),
'm01s30i417': CFName('surface_air_pressure', None, 'Pa'),
'm01s30i418': CFName('surface_air_pressure', None, 'Pa'),
'm01s30i451': CFName('tropopause_air_pressure', None, 'Pa'),
'm01s30i452': CFName('tropopause_air_temperature', None, 'K'),
'm01s30i453': CFName('tropopause_altitude', None, 'm'),
'm01s30i901': CFName(None, 'change_over_time_in_air_potential_temperature', 'K'),
'm01s30i902': CFName(None, 'change_over_time_in_virtual_potential_temperature', 'K'),
'm01s30i903': CFName(None, 'change_over_time_in_air_density', 'kg m-3'),
'm01s33i001': CFName('mole_fraction_of_ozone_in_air', None, 'mole mole-1'),
'm01s33i004': CFName(None, 'mole_fraction_of_nitrogen_trioxide_in_air', 'mole mole-1'),
'm01s33i005': CFName('mole_fraction_of_dinitrogen_pentoxide_in_air', None, 'mole mole-1'),
'm01s33i006': CFName('mole_fraction_of_peroxynitric_acid_in_air', None, 'mole mole-1'),
'm01s33i007': CFName('mole_fraction_of_chlorine_nitrate_in_air', None, 'mole mole-1'),
'm01s33i009': CFName('mole_fraction_of_methane_in_air', None, 'mole mole-1'),
'm01s33i041': CFName('mole_fraction_of_atomic_chlorine_in_air', None, '1'),
'm01s33i042': CFName('mole_fraction_of_chlorine_monoxide_in_air', None, '1'),
'm01s33i043': CFName('mole_fraction_of_dichlorine_peroxide_in_air', None, '1'),
'm01s33i044': CFName('mole_fraction_of_chlorine_dioxide_in_air', None, '1'),
'm01s33i047': CFName('mole_fraction_of_bromine_chloride_in_air', None, '1'),
'm01s33i048': CFName('mole_fraction_of_bromine_nitrate_in_air', None, '1'),
'm01s33i049': CFName('mole_fraction_of_nitrous_oxide_in_air', None, '1'),
'm01s33i051': CFName('mole_fraction_of_hypochlorous_acid_in_air', None, '1'),
'm01s33i054': CFName('mole_fraction_of_chlorine_nitrate_in_air', None, '1'),
'm01s33i055': CFName('mole_fraction_of_cfc11_in_air', None, '1'),
'm01s33i056': CFName('mole_fraction_of_cfc12_in_air', None, '1'),
'm01s33i058': CFName('mole_fraction_of_atomic_nitrogen_in_air', None, '1'),
'm01s33i150': CFName('age_of_stratospheric_air', None, 's'),
'm01s34i001': CFName('mass_fraction_of_ozone_in_air', None, 'kg kg-1'),
'm01s34i002': CFName('mass_fraction_of_nitrogen_monoxide_in_air', None, 'kg kg-1'),
'm01s34i003': CFName('mass_fraction_of_nitrate_radical_in_air', None, 'kg kg-1'),
'm01s34i004': CFName('mass_fraction_of_nitrogen_dioxide_in_air', None, 'kg kg-1'),
'm01s34i005': CFName('mass_fraction_of_dinitrogen_pentoxide_in_air', None, 'kg kg-1'),
'm01s34i006': CFName('mass_fraction_of_peroxynitric_acid_in_air', None, 'kg kg-1'),
'm01s34i007': CFName('mass_fraction_of_nitric_acid_in_air', None, 'kg kg-1'),
'm01s34i008': CFName('mass_fraction_of_hydrogen_peroxide_in_air', None, 'kg kg-1'),
'm01s34i009': CFName('mass_fraction_of_methane_in_air', None, 'kg kg-1'),
'm01s34i010': CFName('mass_fraction_of_carbon_monoxide_in_air', None, 'kg kg-1'),
'm01s34i011': CFName('mass_fraction_of_formaldehyde_in_air', None, 'kg kg-1'),
'm01s34i012': CFName('mass_fraction_of_methyl_hydroperoxide_in_air', None, 'kg kg-1'),
'm01s34i013': CFName('mass_fraction_of_nitrous_acid_in_air', None, 'kg kg-1'),
'm01s34i014': CFName('mass_fraction_of_ethane_in_air', None, 'kg kg-1'),
'm01s34i015': CFName(None, 'mass_fraction_of_ethyl_hydroperoxide_in_air', 'kg kg-1'),
'm01s34i016': CFName(None, 'mass_fraction_of_acetaldehyde_in_air', 'kg kg-1'),
'm01s34i017': CFName('mass_fraction_of_peroxyacetyl_nitrate_in_air', None, 'kg kg-1'),
'm01s34i018': CFName('mass_fraction_of_propane_in_air', None, 'kg kg-1'),
'm01s34i019': CFName(None, 'mass_fraction_of_n-propyl_hydroperoxide_in_air', 'kg kg-1'),
'm01s34i020': CFName(None, 'mass_fraction_of_i-propyl_hydroperoxide_in_air', 'kg kg-1'),
'm01s34i021': CFName(None, 'mass_fraction_of_propanal_in_air', 'kg kg-1'),
'm01s34i022': CFName(None, 'mass_fraction_of_acetone_in_air', 'kg kg-1'),
'm01s34i023': CFName(None, 'mass_fraction_of_acetonylhydroperoxide_in_air', 'kg kg-1'),
'm01s34i024': CFName(None, 'mass_fraction_of_peroxypropionyl_nitrate_in_air', 'kg kg-1'),
'm01s34i025': CFName(None, 'mass_fraction_of_methyl_nitrate_in_air', 'kg kg-1'),
'm01s34i026': CFName(None, 'mass_fraction_of_stratospheric_ozone_in_air', 'kg kg-1'),
'm01s34i027': CFName('mass_fraction_of_isoprene_in_air', None, 'kg kg-1'),
'm01s34i028': CFName(None, 'mass_fraction_of_isoprene_hydroperoxide_in_air', 'kg kg-1'),
'm01s34i030': CFName(None, 'mass_fraction_of_methacrolein_in_air', 'kg kg-1'),
'm01s34i031': CFName(None, 'mass_fraction_of_methacroyl_hydroperoxide_in_air', 'kg kg-1'),
'm01s34i032': CFName(None, 'mass_fraction_of_methacryloylperoxy_nitrate_in_air', 'kg kg-1'),
'm01s34i033': CFName(None, 'mass_fraction_of_hydroxyacetone_in_air', 'kg kg-1'),
'm01s34i034': CFName(None, 'mass_fraction_of_methlyglyoxal_in_air', 'kg kg-1'),
'm01s34i035': CFName(None, 'mass_fraction_of_second_generation_isoprene_nitrate_in_air', 'kg kg-1'),
'm01s34i036': CFName('mass_fraction_of_formic_acid_in_air', None, 'kg kg-1'),
'm01s34i037': CFName(None, 'mass_fraction_of_peracetic_acid_in_air', 'kg kg-1'),
'm01s34i038': CFName('mass_fraction_of_acetic_acid_in_air', None, 'kg kg-1'),
'm01s34i041': CFName('mass_fraction_of_atomic_chlorine_in_air', None, 'kg kg-1'),
'm01s34i042': CFName('mass_fraction_of_chlorine_monoxide_in_air', None, 'kg kg-1'),
'm01s34i043': CFName('mass_fraction_of_dichlorine_peroxide_in_air', None, 'kg kg-1'),
'm01s34i044': CFName('mass_fraction_of_chlorine_dioxide_in_air', None, 'kg kg-1'),
'm01s34i045': CFName('mass_fraction_of_atomic_bromine_in_air', None, 'kg kg-1'),
'm01s34i047': CFName('mass_fraction_of_bromine_chloride_in_air', None, 'kg kg-1'),
'm01s34i048': CFName('mass_fraction_of_bromine_nitrate_in_air', None, 'kg kg-1'),
'm01s34i049': CFName('mass_fraction_of_nitrous_oxide_in_air', None, 'kg kg-1'),
'm01s34i051': CFName('mass_fraction_of_hypochlorous_acid_in_air', None, 'kg kg-1'),
'm01s34i052': CFName('mass_fraction_of_hydrogen_bromide_in_air', None, 'kg kg-1'),
'm01s34i053': CFName('mole_fraction_of_hypobromous_acid_in_air', None, 'kg kg-1'),
'm01s34i054': CFName('mass_fraction_of_chlorine_nitrate_in_air', None, 'kg kg-1'),
'm01s34i055': CFName('mass_fraction_of_cfc11_in_air', None, 'kg kg-1'),
'm01s34i056': CFName('mass_fraction_of_cfc12_in_air', None, 'kg kg-1'),
'm01s34i057': CFName('mass_fraction_of_methyl_bromide_in_air', None, 'kg kg-1'),
'm01s34i058': CFName('mass_fraction_of_atomic_nitrogen_in_air', None, 'kg kg-1'),
'm01s34i059': CFName(None, 'mass_fraction_of_ground_state_atomic_oxygen_in_air', 'kg kg-1'),
'm01s34i070': CFName('mass_fraction_of_molecular_hydrogen_in_air', None, 'kg kg-1'),
'm01s34i071': CFName('mass_fraction_of_dimethyl_sulfide_in_air', None, 'kg kg-1'),
'm01s34i072': CFName('mass_fraction_of_sulfur_dioxide_in_air', None, 'kg kg-1'),
'm01s34i073': CFName('mass_fraction_of_sulfuric_acid_in_air', None, 'kg kg-1'),
'm01s34i074': CFName(None, 'mass_fraction_of_methanesulfonic_acid_in_air', 'kg kg-1'),
'm01s34i075': CFName(None, 'mass_fraction_of_dimethyl_sulfoxide', 'kg kg-1'),
'm01s34i076': CFName('mass_fraction_of_ammonia_in_air', None, 'kg kg-1'),
'm01s34i077': CFName(None, 'mass_fraction_of_carbon_disulfide_in_air', 'kg kg-1'),
'm01s34i078': CFName(None, 'mass_fraction_of_carbonyl_sulfide_in_air', 'kg kg-1'),
'm01s34i079': CFName(None, 'mass_fraction_of_hydrogen_sulfide_in_air', 'kg kg-1'),
'm01s34i080': CFName(None, 'mass_fraction_of_atomic_hydrogen_in_air', 'kg kg-1'),
'm01s34i081': CFName('mass_fraction_of_hydroxyl_radical_in_air', None, 'kg kg-1'),
'm01s34i082': CFName('mass_fraction_of_hydroperoxyl_radical_in_air', None, 'kg kg-1'),
'm01s34i083': CFName('mass_fraction_of_methyl_peroxy_radical_in_air', None, 'kg kg-1'),
'm01s34i084': CFName(None, 'mass_fraction_of_ethyl_peroxy_radical_in_air', 'kg kg-1'),
'm01s34i085': CFName(None, 'mass_fraction_of_peroxyacetyl_radical_in_air', 'kg kg-1'),
'm01s34i086': CFName(None, 'mass_fraction_of_n-propylperoxy_radical_in_air', 'kg kg-1'),
'm01s34i087': CFName(None, 'mass_fraction_of_isopropylperoxy_radical_in_air', 'kg kg-1'),
'm01s34i088': CFName(None, 'mass_fraction_of_peroxypropanoyl_radical_in_air', 'kg kg-1'),
'm01s34i089': CFName(None, 'mass_fraction_of_acetonyl_peroxy_radical_in_air', 'kg kg-1'),
'm01s34i093': CFName('mass_fraction_of_propene_in_air', None, 'kg kg-1'),
'm01s34i096': CFName(None, 'mass_fraction_of_methyl_ethyl_ketone_in_air', 'kg kg-1'),
'm01s34i097': CFName('mass_fraction_of_toluene_in_air', None, 'kg kg-1'),
'm01s34i100': CFName(None, 'mass_fraction_of_lumped_chlorine_expressed_as_hydrogen_chloride', 'kg kg-1'),
'm01s34i101': CFName(None, 'number_of_particles_per_air_molecule_of_soluble_nucleation_mode_aerosol_in_air', '1'),
'm01s34i102': CFName(None, 'mass_fraction_of_sulfuric_acid_in_soluble_nucleation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i103': CFName(None, 'number_of_particles_per_air_molecule_of_soluble_aitken_mode_aerosol_in_air', '1'),
'm01s34i104': CFName(None, 'mass_fraction_of_sulfuric_acid_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i105': CFName(None, 'mass_fraction_of_black_carbon_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i106': CFName(None, 'mass_fraction_of_particulate_organic_matter_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i107': CFName(None, 'number_of_particles_per_air_molecule_of_soluble_accumulation_mode_aerosol_in_air', '1'),
'm01s34i108': CFName(None, 'mass_fraction_of_sulfuric_acid_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i109': CFName(None, 'mass_fraction_of_black_carbon_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i110': CFName(None, 'mass_fraction_of_particulate_organic_matter_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i111': CFName(None, 'mass_fraction_of_seasalt_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i112': CFName(None, 'mass_fraction_of_dust_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i113': CFName(None, 'number_of_particles_per_air_molecule_of_soluble_coarse_mode_aerosol_in_air', '1'),
'm01s34i114': CFName(None, 'mass_fraction_of_sulfuric_acid_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i115': CFName(None, 'mass_fraction_of_black_carbon_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i116': CFName(None, 'mass_fraction_of_particulate_organic_matter_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i117': CFName(None, 'mass_fraction_of_seasalt_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i118': CFName(None, 'mass_fraction_of_dust_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i119': CFName(None, 'number_of_particles_per_air_molecule_of_insoluble_aitken_mode_aerosol_in_air', '1'),
'm01s34i120': CFName(None, 'mass_fraction_of_black_carbon_in_insoluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i121': CFName(None, 'mass_fraction_of_particulate_organic_matter_in_insoluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i122': CFName(None, 'number_of_particles_per_air_molecule_of_insoluble_accumulation_mode_aerosol_in_air', '1'),
'm01s34i123': CFName(None, 'mass_fraction_of_dust_in_insoluble_accumulation_mode_aerosol_in_air', 'kg kg-1'),
'm01s34i124': CFName(None, 'number_of_particles_per_air_molecule_of_insoluble_coarse_mode_aerosol_in_air', '1'),
'm01s34i125': CFName(None, 'mass_fraction_of_dust_in_insoluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i126': CFName(None, 'mass_fraction_of_particulate_organic_matter_in_soluble_nucleation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i127': CFName(None, 'mass_fraction_of_seasalt_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i128': CFName(None, 'mass_fraction_of_secondary_particulate_organic_matter_in_soluble_nucleation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i129': CFName(None, 'mass_fraction_of_secondary_particulate_organic_matter_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i130': CFName(None, 'mass_fraction_of_secondary_particulate_organic_matter_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i131': CFName(None, 'mass_fraction_of_secondary_particulate_organic_matter_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i132': CFName(None, 'mass_fraction_of_ammonium_in_soluble_nucleation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i133': CFName(None, 'mass_fraction_of_ammonium_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i134': CFName(None, 'mass_fraction_of_ammonium_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i135': CFName(None, 'mass_fraction_of_ammonium_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i136': CFName(None, 'mass_fraction_of_nitrate_in_soluble_nucleation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i137': CFName(None, 'mass_fraction_of_nitrate_in_soluble_aitken_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i138': CFName(None, 'mass_fraction_of_nitrate_in_soluble_accumulation_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i139': CFName(None, 'mass_fraction_of_nitrate_in_soluble_coarse_mode_dry_aerosol_in_air', 'kg kg-1'),
'm01s34i150': CFName('age_of_stratospheric_air', None, 's'),
'm01s34i159': CFName('equivalent_thickness_at_stp_of_atmosphere_ozone_content', None, 'DU'),
'm01s35i003': CFName(None, 'change_over_time_in_x_wind_due_to_stochastic_kinetic_energy_backscatter', 'm s-1'),
'm01s35i004': CFName(None, 'change_over_time_in_y_wind_due_to_stochastic_kinetic_energy_backscatter', 'm s-1'),
'm01s35i024': CFName(None, 'change_over_time_in_air_potential_temperature_due_to_stochastic_perturbation_of_tendencies', 'K'),
'm01s35i025': CFName(None, 'change_over_time_in_specific_humidity_due_to_stochastic_perturbation_of_tendencies', 'kg kg-1'),
'm01s35i026': CFName(None, 'change_over_time_in_x_wind_due_to_stochastic_perturbation_of_tendencies', 'm s-1'),
'm01s35i027': CFName(None, 'change_over_time_in_y_wind_due_to_stochastic_perturbation_of_tendencies', 'm s-1'),
'm01s35i029': CFName(None, 'change_over_time_in_air_temperature_due_to_stochastic_perturbation_of_tendencies', 'K'),
'm01s50i228': CFName('photolysis_rate_of_ozone_to_1D_oxygen_atom', None, 's-1'),
'm01s50i229': CFName('photolysis_rate_of_nitrogen_dioxide', None, 's-1'),
'm01s50i230': CFName('mass_concentration_of_nmvoc_expressed_as_carbon_in_air', None, 'ug m-3'),
'm02s00i101': CFName('sea_water_potential_temperature', None, 'degC'),
'm02s00i102': CFName('sea_water_salinity', None, '1e3 @0.035'),
'm02s00i121': CFName('baroclinic_eastward_sea_water_velocity', None, 'cm s-1'),
'm02s00i122': CFName('baroclinic_northward_sea_water_velocity', None, 'cm s-1'),
'm02s00i130': CFName('ocean_barotropic_streamfunction', None, 'cm3 s-1'),
'm02s00i131': CFName('ocean_barotropic_streamfunction', None, 'cm3 s-1'),
'm02s00i132': CFName('tendency_of_ocean_barotropic_streamfunction', None, 'cm3 s-2'),
'm02s00i133': CFName('tendency_of_ocean_barotropic_streamfunction', None, 'cm3 s-2'),
'm02s00i134': CFName('surface_air_pressure', None, 'g cm-1 s-2'),
'm02s00i135': CFName('barotropic_eastward_sea_water_velocity', None, 'cm s-1'),
'm02s00i136': CFName('barotropic_northward_sea_water_velocity', None, 'cm s-1'),
'm02s00i137': CFName('ocean_mixed_layer_thickness', None, 'm'),
'm02s00i139': CFName('downward_eastward_stress_at_sea_ice_base', None, 'Pa'),
'm02s00i140': CFName('downward_northward_stress_at_sea_ice_base', None, 'Pa'),
'm02s00i141': CFName('surface_snow_thickness', None, 'm'),
'm02s00i143': CFName('upward_sea_ice_basal_heat_flux', None, 'W m-2'),
'm02s00i146': CFName('sea_ice_area_fraction', None, '1'),
'm02s00i147': CFName('sea_ice_thickness', None, 'm'),
'm02s00i148': CFName('eastward_sea_ice_velocity', None, 'm s-1'),
'm02s00i149': CFName('northward_sea_ice_velocity', None, 'm s-1'),
'm02s00i150': CFName('surface_downward_eastward_stress', None, 'Pa'),
'm02s00i151': CFName('surface_downward_northward_stress', None, 'Pa'),
'm02s00i152': CFName('wind_mixing_energy_flux_into_sea_water', None, 'W m-2'),
'm02s00i166': CFName('water_flux_into_sea_water_from_rivers', None, 'kg m-2 s-1'),
'm02s00i171': CFName('snowfall_flux', None, 'kg m-2 s-1'),
'm02s00i172': CFName('surface_snow_and_ice_sublimation_flux', None, 'kg m-2 s-1'),
'm02s00i180': CFName('sea_surface_temperature', None, 'K'),
'm02s00i181': CFName('sea_surface_salinity', None, '1e3 @0.035'),
'm02s00i182': CFName('air_temperature', None, 'K'),
'm02s00i183': CFName('sea_ice_thickness', None, 'm'),
'm02s00i185': CFName('heat_flux_correction', None, 'W m-2'),
'm02s00i186': CFName('water_flux_correction', None, 'kg m-2 s-1'),
'm02s00i190': CFName('surface_snow_and_ice_melt_heat_flux', None, 'W m-2'),
'm02s00i191': CFName('downward_heat_flux_in_sea_ice', None, 'W m-2'),
'm02s00i192': CFName('water_flux_into_sea_water_due_to_sea_ice_thermodynamics', None, 'kg m-2 s-1'),
'm02s30i201': CFName('upward_sea_water_velocity', None, 'cm s-1'),
'm02s30i202': CFName('ocean_mixed_layer_thickness', None, 'm'),
'm02s30i211': CFName('northward_ocean_heat_transport', None, 'PW'),
'm02s30i212': CFName('northward_ocean_salt_transport', None, '1e7kg s-1'),
'm02s30i320': CFName('eastward_sea_water_velocity', None, 'cm s-1'),
'm02s30i321': CFName('northward_sea_water_velocity', None, 'cm s-1'),
'm02s30i324': CFName('ocean_mixed_layer_thickness', None, 'm'),
'm02s30i406': CFName(None, 'mole_concentration_of_dimethyl_sulphide_in_seawater', 'mol m-3'),
'm02s32i201': CFName('tendency_of_sea_ice_area_fraction_due_to_dynamics', None, 's-1'),
'm02s32i202': CFName('tendency_of_sea_ice_thickness_due_to_dynamics', None, 'm s-1'),
'm02s32i209': CFName('eastward_sea_ice_velocity', None, 'm s-1'),
'm02s32i210': CFName('northward_sea_ice_velocity', None, 'm s-1'),
'm02s32i211': CFName('tendency_of_sea_ice_area_fraction_due_to_thermodynamics', None, 's-1'),
'm02s32i212': CFName('tendency_of_sea_ice_thickness_due_to_thermodynamics', None, 'm s-1'),
'm02s32i215': CFName('snowfall_flux', None, 'kg m-2 s-1'),
'm02s32i219': CFName('downward_eastward_stress_at_sea_ice_base', None, 'Pa'),
'm02s32i220': CFName('downward_northward_stress_at_sea_ice_base', None, 'Pa'),
'm03s00i177': CFName(None, 'prescribed_heat_flux_into_slab_ocean', 'W m-2'),
'm04s06i001': CFName('sea_surface_wind_wave_significant_height', None, 'm'),
}
STASHCODE_IMPLIED_HEIGHTS = {
'm01s03i209': (10.0,),
'm01s03i210': (10.0,),
'm01s03i225': (10.0,),
'm01s03i226': (10.0,),
'm01s03i227': (10.0,),
'm01s03i230': (10.0,),
'm01s03i236': (1.5,),
'm01s03i237': (1.5,),
'm01s03i245': (1.5,),
'm01s03i247': (1.5,),
'm01s03i250': (1.5,),
'm01s03i281': (1.5,),
'm01s03i365': (10.0,),
'm01s03i366': (10.0,),
'm01s03i463': (10.0,),
'm01s15i212': (50.0,),
'm01s15i213': (50.0,),
}
CF_TO_LBFC = {
CFName(None, 'stratiform_snowfall_rate', 'kg m-2 s-1'): 118,
CFName('age_of_stratospheric_air', None, '1'): 501,
CFName('air_density', None, 'kg m-3'): 27,
CFName('air_potential_temperature', None, 'K'): 19,
CFName('air_pressure', None, 'Pa'): 8,
CFName('air_pressure_at_freezing_level', None, 'Pa'): 8,
CFName('air_pressure_at_sea_level', None, 'Pa'): 8,
CFName('air_temperature', None, 'K'): 16,
CFName('atmosphere_boundary_layer_thickness', None, 'm'): 5,
CFName('atmosphere_eastward_stress_due_to_gravity_wave_drag', None, 'Pa'): 61,
CFName('atmosphere_kinetic_energy_content', None, 'J m-2'): 63,
CFName('atmosphere_northward_stress_due_to_gravity_wave_drag', None, 'Pa'): 62,
CFName('atmosphere_relative_vorticity', None, 's-1'): 73,
CFName('cloud_area_fraction', None, '1'): 30,
CFName('cloud_area_fraction_in_atmosphere_layer', None, '1'): 1720,
CFName('convective_cloud_area_fraction', None, '1'): 34,
CFName('convective_rainfall_amount', None, 'kg m-2'): 94,
CFName('convective_snowfall_amount', None, 'kg m-2'): 117,
CFName('dimensionless_exner_function', None, '1'): 7,
CFName('divergence_of_wind', None, 's-1'): 74,
CFName('downward_heat_flux_in_sea_ice', None, 'W m-2'): 261,
CFName('downward_heat_flux_in_soil', None, 'W m-2'): 1564,
CFName('eastward_wind', None, 'm s-1'): 56,
CFName('ertel_potential_vorticity', None, 'K m2 kg-1 s-1'): 82,
CFName('geopotential_height', None, 'm'): 1,
CFName('lagrangian_tendency_of_air_pressure', None, 'Pa s-1'): 40,
CFName('land_binary_mask', None, '1'): 395,
CFName('large_scale_rainfall_rate', None, 'm s-1'): 99,
CFName('mass_fraction_of_carbon_dioxide_in_air', None, '1'): 1564,
CFName('mass_fraction_of_cloud_liquid_water_in_air', None, '1'): 79,
CFName('mass_fraction_of_dimethyl_sulfide_in_air', None, '1'): 1373,
CFName('mass_fraction_of_frozen_water_in_soil_moisture', None, '1'): 1386,
CFName('mass_fraction_of_ozone_in_air', None, '1'): 453,
CFName('mass_fraction_of_sulfur_dioxide_in_air', None, '1'): 1374,
CFName('mass_fraction_of_unfrozen_water_in_soil_moisture', None, '1'): 1385,
CFName('moisture_content_of_soil_layer', None, 'kg m-2'): 122,
CFName('mole_fraction_of_atomic_chlorine_in_air', None, '1'): 501,
CFName('mole_fraction_of_atomic_nitrogen_in_air', None, '1'): 501,
CFName('mole_fraction_of_bromine_chloride_in_air', None, '1'): 501,
CFName('mole_fraction_of_bromine_nitrate_in_air', None, '1'): 501,
CFName('mole_fraction_of_cfc11_in_air', None, '1'): 501,
CFName('mole_fraction_of_cfc12_in_air', None, '1'): 501,
CFName('mole_fraction_of_chlorine_dioxide_in_air', None, '1'): 501,
CFName('mole_fraction_of_chlorine_monoxide_in_air', None, '1'): 501,
CFName('mole_fraction_of_chlorine_nitrate_in_air', None, '1'): 501,
CFName('mole_fraction_of_dichlorine_peroxide_in_air', None, '1'): 501,
CFName('mole_fraction_of_hypochlorous_acid_in_air', None, '1'): 501,
CFName('mole_fraction_of_nitrous_oxide_in_air', None, '1'): 501,
CFName('northward_wind', None, 'm s-1'): 57,
CFName('rainfall_flux', None, 'kg m-2 s-1'): 97,
CFName('relative_humidity', None, '%'): 88,
CFName('root_depth', None, 'm'): 321,
CFName('sea_ice_albedo', None, '1'): 322,
CFName('sea_ice_area_fraction', None, '1'): 37,
CFName('sea_ice_temperature', None, 'K'): 209,
CFName('sea_ice_thickness', None, 'm'): 687,
CFName('sea_surface_elevation', None, 'm'): 608,
CFName('snow_grain_size', None, '1e-6 m'): 1507,
CFName('snowfall_amount', None, 'kg m-2'): 93,
CFName('snowfall_flux', None, 'kg m-2 s-1'): 108,
CFName('soil_albedo', None, '1'): 1395,
CFName('soil_carbon_content', None, 'kg m-2'): 1397,
CFName('soil_hydraulic_conductivity_at_saturation', None, 'm s-1'): 333,
CFName('soil_moisture_content_at_field_capacity', None, 'kg m-2'): 1559,
CFName('soil_porosity', None, '1'): 332,
CFName('soil_suction_at_saturation', None, 'Pa'): 342,
CFName('soil_temperature', None, 'K'): 23,
CFName('soil_thermal_capacity', None, 'J kg-1 K-1'): 335,
CFName('soil_thermal_conductivity', None, 'W m-1 K-1'): 336,
CFName('specific_kinetic_energy_of_air', None, 'm2 s-2'): 60,
CFName('stratiform_cloud_area_fraction_in_atmosphere_layer', None, '1'): 220,
CFName('stratiform_rainfall_amount', None, 'kg m-2'): 102,
CFName('stratiform_rainfall_rate', None, 'kg m-2 s-1'): 99,
CFName('stratiform_snowfall_amount', None, 'kg m-2'): 116,
CFName('subsurface_runoff_amount', None, 'kg m-2'): 112,
CFName('subsurface_runoff_flux', None, 'kg m-2 s-1'): 1533,
CFName('surface_albedo_assuming_deep_snow', None, '1'): 328,
CFName('surface_albedo_assuming_no_snow', None, '1'): 322,
CFName('surface_altitude', None, 'm'): 1,
CFName('surface_downwelling_shortwave_flux_in_air', None, 'W m-2'): 203,
CFName('surface_downwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'): 208,
CFName('surface_eastward_sea_water_velocity', None, 'm s-1'): 701,
CFName('surface_net_downward_longwave_flux', None, 'W m-2'): 187,
CFName('surface_net_downward_shortwave_flux', None, 'W m-2'): 186,
CFName('surface_northward_sea_water_velocity', None, 'm s-1'): 702,
CFName('surface_roughness_length', None, 'm'): 324,
CFName('surface_runoff_amount', None, 'kg m-2'): 111,
CFName('surface_runoff_flux', None, 'kg m-2 s-1'): 1532,
CFName('surface_snow_amount', None, 'kg m-2'): 93,
CFName('surface_temperature', None, 'K'): 16,
CFName('surface_upward_sensible_heat_flux', None, 'W m-2'): 178,
CFName('surface_upward_water_flux', None, 'kg m-2 s-1'): 184,
CFName('surface_upwelling_shortwave_flux_in_air_assuming_clear_sky', None, 'W m-2'): 207,
CFName('tendency_of_air_density', None, 'kg m-3 s-1'): 7,
CFName('tendency_of_air_temperature', None, 'K s-1'): 16,
CFName('tendency_of_air_temperature_due_to_diffusion', None, 'K s-1'): 16,
CFName('tendency_of_air_temperature_due_to_longwave_heating', None, 'K s-1'): 253,
CFName('tendency_of_eastward_wind', None, 'm s-1'): 56,
CFName('tendency_of_eastward_wind_due_to_diffusion', None, 'm s-1'): 56,
CFName('tendency_of_mass_fraction_of_cloud_ice_in_air', None, 's-1'): 78,
CFName('tendency_of_mass_fraction_of_cloud_liquid_water_in_air', None, 's-1'): 79,
CFName('tendency_of_northward_wind', None, 'm s-1'): 57,
CFName('tendency_of_northward_wind_due_to_diffusion', None, 'm s-1'): 57,
CFName('tendency_of_specific_humidity', None, 's-1'): 95,
CFName('tendency_of_specific_humidity_due_to_diffusion', None, 's-1'): 95,
CFName('tendency_of_upward_air_velocity', None, 'm s-1'): 42,
CFName('toa_incoming_shortwave_flux', None, 'W m-2'): 200,
CFName('toa_outgoing_longwave_flux', None, 'W m-2'): 206,
CFName('toa_outgoing_longwave_flux_assuming_clear_sky', None, 'W m-2'): 210,
CFName('toa_outgoing_shortwave_flux', None, 'W m-2'): 201,
CFName('toa_outgoing_shortwave_flux_assuming_clear_sky', None, 'W m-2'): 207,
CFName('tropopause_air_pressure', None, 'Pa'): 8,
CFName('tropopause_air_temperature', None, 'K'): 16,
CFName('tropopause_altitude', None, 'm'): 1,
CFName('upward_air_velocity', None, 'm s-1'): 42,
CFName('vegetation_area_fraction', None, '1'): 326,
CFName('virtual_temperature', None, 'K'): 16,
CFName('volume_fraction_of_condensed_water_in_soil_at_critical_point', None, '1'): 330,
CFName('volume_fraction_of_condensed_water_in_soil_at_wilting_point', None, '1'): 329,
CFName('water_potential_evaporation_flux', None, 'kg m-2 s-1'): 115,
CFName('wind_mixing_energy_flux_into_sea_water', None, 'W m-2'): 182,
CFName('wind_speed', None, 'm s-1'): 50,
CFName('x_wind', None, 'm s-1'): 56,
CFName('y_wind', None, 'm s-1'): 57,
}
| lgpl-3.0 |
40223134/0512 | static/Brython3.1.1-20150328-091302/Lib/datetime.py | 628 | 75044 | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
#brython does not have a _datetime, so lets comment this out for now.
#try:
# from _datetime import *
#except ImportError:
# pass
#else:
# # Clean up unused names
# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
# _build_struct_time, _call_tzinfo_method, _check_date_fields,
# _check_time_fields, _check_tzinfo_arg, _check_tzname,
# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
# _days_before_year, _days_in_month, _format_time, _is_leap,
# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
# _wrap_strftime, _ymd2ord)
# # XXX Since import * above excludes names that start with _,
# # docstring does not get overwritten. In the future, it may be
# # appropriate to maintain a single module level docstring and
# # remove the following line.
# #from _datetime import __doc__
| gpl-3.0 |
mahinthjoe/bedrock | bedrock/newsletter/tests/test_views.py | 21 | 29540 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import uuid
from django.http import HttpResponse
from django.test.client import RequestFactory
import basket
from bedrock.base.urlresolvers import reverse
from mock import DEFAULT, Mock, patch
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from bedrock.mozorg.tests import TestCase
from bedrock.newsletter.tests import newsletters
from bedrock.newsletter.views import (
general_error,
invalid_email_address,
newsletter_subscribe,
recovery_text,
unknown_address_text,
updated,
)
cache_mock = Mock()
cache_mock.get.return_value = None
def assert_redirect(response, url):
"""
Assert that the response indicates a redirect to the url.
"""
# This is like Django TestCase's assertRedirect, only we're not
# using Django TestCase due to our lack of a database, so we
# need to fake our own.
# Django seems to stick this into the Location header
url = "http://testserver" + url
assert url == response['Location'],\
"Response did not redirect to %s; Location=%s" % \
(url, response['Location'])
@patch('bedrock.newsletter.utils.cache', cache_mock)
class TestViews(TestCase):
def setUp(self):
self.rf = RequestFactory()
def test_hacks_newsletter_frames_allow(self):
"""
Bedrock pages get the 'x-frame-options: DENY' header by default.
The hacks newsletter page is framed, so needs to ALLOW.
"""
with self.activate('en-US'):
resp = self.client.get(reverse('mozorg.hacks_newsletter'))
ok_('x-frame-options' not in resp)
@patch('bedrock.newsletter.views.l10n_utils.render')
def test_updated_allows_good_tokens(self, mock_render):
token = unicode(uuid.uuid4())
req = self.rf.get('/', {'token': token, 'unsub': 1})
updated(req)
self.assertEqual(mock_render.call_args[0][2]['token'], token)
@patch('bedrock.newsletter.views.l10n_utils.render')
def test_updated_disallows_bad_tokens(self, mock_render):
token = 'the-dude'
req = self.rf.get('/', {'token': token, 'unsub': 1})
updated(req)
eq_(mock_render.call_args[0][2]['token'], None)
token = '\'>"><img src=x onerror=alert(1)>'
req = self.rf.get('/', {'token': token, 'unsub': 1})
updated(req)
eq_(mock_render.call_args[0][2]['token'], None)
# Always mock basket.request to be sure we never actually call basket
# during tests.
@patch('basket.base.request')
@patch('bedrock.newsletter.utils.cache', cache_mock)
class TestExistingNewsletterView(TestCase):
def setUp(self):
self.token = unicode(uuid.uuid4())
self.user = {
'newsletters': [u'mozilla-and-you'],
'token': self.token,
'email': u'user@example.com',
'lang': u'pt',
'country': u'br',
'format': u'T',
}
# By default, data matches user's existing data; change it
# in the test as desired. Also, user has accepted privacy
# checkbox.
self.data = {
u'form-MAX_NUM_FORMS': 4,
u'form-INITIAL_FORMS': 4,
u'form-TOTAL_FORMS': 4,
u'email': self.user['email'],
u'lang': self.user['lang'],
u'country': self.user['country'],
u'format': self.user['format'],
u'privacy': u'on',
u'form-0-newsletter': u'mozilla-and-you',
u'form-0-subscribed_radio': u'True',
u'form-1-newsletter': u'mobile',
u'form-1-subscribed_radio': u'False',
u'form-2-newsletter': u'firefox-tips',
u'form-2-subscribed_check': u'False',
u'form-3-newsletter': u'join-mozilla',
u'form-3-subscribed_check': u'False',
u'submit': u'Save Preferences',
}
super(TestExistingNewsletterView, self).setUp()
@patch('bedrock.newsletter.utils.get_newsletters')
def test_get_token(self, get_newsletters, mock_basket_request):
# If user gets page with valid token in their URL, they
# see their data, and no privacy checkbox is presented
get_newsletters.return_value = newsletters
url = reverse('newsletter.existing.token', args=(self.token,))
# noinspection PyUnresolvedReferences
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render') as render:
basket_patches['user'].return_value = self.user
render.return_value = HttpResponse('')
self.client.get(url)
request, template_name, context = render.call_args[0]
form = context['form']
self.assertNotIn('privacy', form.fields)
self.assertEqual(self.user['lang'], form.initial['lang'])
@patch('bedrock.newsletter.utils.get_newsletters')
def test_show(self, get_newsletters, mock_basket_request):
# Newsletters are only listed if the user is subscribed to them,
# or they are marked 'show' and 'active' in the settings
get_newsletters.return_value = newsletters
# Find a newsletter without 'show' and subscribe the user to it
for newsletter, data in newsletters.iteritems():
if not data.get('show', False):
self.user['newsletters'] = [newsletter]
break
url = reverse('newsletter.existing.token', args=(self.token,))
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render') as render:
basket_patches['user'].return_value = self.user
render.return_value = HttpResponse('')
self.client.get(url)
request, template_name, context = render.call_args[0]
forms = context['formset'].initial_forms
shown = set([form.initial['newsletter'] for form in forms])
inactive = set([newsletter for newsletter, data
in newsletters.iteritems()
if not data.get('active', False)])
to_show = set([newsletter for newsletter, data
in newsletters.iteritems()
if data.get('show', False)]) - inactive
subscribed = set(self.user['newsletters'])
# All subscribed newsletters except inactive ones are shown
self.assertEqual(set(), subscribed - inactive - shown)
# All 'show' newsletters are shown
self.assertEqual(set(), to_show - shown)
# No other newsletters are shown
self.assertEqual(set(), shown - subscribed - to_show)
def test_get_no_token(self, mock_basket_request):
# No token in URL - should redirect to recovery
url = reverse('newsletter.existing.token', args=('',))
rsp = self.client.get(url)
self.assertEqual(302, rsp.status_code)
self.assertTrue(rsp['Location'].endswith(reverse('newsletter.recovery')))
def test_get_user_not_found(self, mock_basket_request):
# Token in URL but not a valid token - should redirect to recovery
rand_token = unicode(uuid.uuid4())
url = reverse('newsletter.existing.token', args=(rand_token,))
with patch.multiple('basket',
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render') as render:
render.return_value = HttpResponse('')
with patch('django.contrib.messages.add_message') as add_msg:
basket_patches['user'].side_effect = basket.BasketException
rsp = self.client.get(url)
# Should have given a message
self.assertEqual(1, add_msg.call_count,
msg=repr(add_msg.call_args_list))
# Should have been redirected to recovery page
self.assertEqual(302, rsp.status_code)
self.assertTrue(rsp['Location'].endswith(reverse('newsletter.recovery')))
def test_invalid_token(self, mock_basket_request):
# "Token" in URL is not syntactically a UUID - should redirect to
# recovery *without* calling Exact Target
token = "not a token"
url = reverse('newsletter.existing.token', args=(token,))
with patch.multiple('basket', user=DEFAULT) as basket_patches:
with patch('django.contrib.messages.add_message') as add_msg:
rsp = self.client.get(url, follow=False)
self.assertEqual(0, basket_patches['user'].call_count)
self.assertEqual(1, add_msg.call_count)
self.assertEqual(302, rsp.status_code)
self.assertTrue(rsp['Location'].endswith(reverse('newsletter.recovery')))
def test_post_user_not_found(self, mock_basket_request):
# User submits form and passed token, but no user was found
# Should issue message and redirect to recovery
rand_token = unicode(uuid.uuid4())
url = reverse('newsletter.existing.token', args=(rand_token,))
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render') as render:
render.return_value = HttpResponse('')
with patch('django.contrib.messages.add_message') as add_msg:
basket_patches['user'].side_effect = basket.BasketException
rsp = self.client.post(url, self.data)
# Shouldn't call basket except for the attempt to find the user
self.assertEqual(0, basket_patches['update_user'].call_count)
self.assertEqual(0, basket_patches['unsubscribe'].call_count)
self.assertEqual(0, basket_patches['subscribe'].call_count)
# Should have given a message
self.assertEqual(1, add_msg.call_count,
msg=repr(add_msg.call_args_list))
# Should have been redirected to recovery page
self.assertEqual(302, rsp.status_code)
self.assertTrue(rsp['Location'].endswith(reverse('newsletter.recovery')))
@patch('bedrock.newsletter.utils.get_newsletters')
def test_subscribing(self, get_newsletters, mock_basket_request):
get_newsletters.return_value = newsletters
# They subscribe to firefox-tips
self.data['form-2-subscribed_check'] = u'True'
# in English - and that's their language too
self.user['lang'] = u'en'
self.data['lang'] = u'en'
url = reverse('newsletter.existing.token', args=(self.token,))
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('django.contrib.messages.add_message') as add_msg:
with patch('lib.l10n_utils.render'):
basket_patches['user'].return_value = self.user
rsp = self.client.post(url, self.data)
# Should have given no messages
self.assertEqual(0, add_msg.call_count,
msg=repr(add_msg.call_args_list))
# Should have called update_user with subscription list
self.assertEqual(1, basket_patches['update_user'].call_count)
kwargs = basket_patches['update_user'].call_args[1]
self.assertEqual(
{'newsletters': u'mozilla-and-you,firefox-tips'},
kwargs
)
# Should not have called unsubscribe
self.assertEqual(0, basket_patches['unsubscribe'].call_count)
# Should not have called subscribe
self.assertEqual(0, basket_patches['subscribe'].call_count)
# Should redirect to the 'updated' view
url = reverse('newsletter.updated')
assert_redirect(rsp, url)
@patch('bedrock.newsletter.utils.get_newsletters')
def test_unsubscribing(self, get_newsletters, mock_basket_request):
get_newsletters.return_value = newsletters
# They unsubscribe from the one newsletter they're subscribed to
self.data['form-0-subscribed_radio'] = u'False'
url = reverse('newsletter.existing.token', args=(self.token,))
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render'):
basket_patches['user'].return_value = self.user
rsp = self.client.post(url, self.data)
# Should have called update_user with list of newsletters
self.assertEqual(1, basket_patches['update_user'].call_count)
kwargs = basket_patches['update_user'].call_args[1]
self.assertEqual(
{'newsletters': u''},
kwargs
)
# Should not have called subscribe
self.assertEqual(0, basket_patches['subscribe'].call_count)
# Should not have called unsubscribe
self.assertEqual(0, basket_patches['unsubscribe'].call_count)
# Should redirect to the 'updated' view
url = reverse('newsletter.updated')
assert_redirect(rsp, url)
@patch('bedrock.newsletter.utils.get_newsletters')
def test_remove_all(self, get_newsletters, mock_basket_request):
get_newsletters.return_value = newsletters
self.data['remove_all'] = 'on' # any value should do
url = reverse('newsletter.existing.token', args=(self.token,))
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render'):
basket_patches['user'].return_value = self.user
rsp = self.client.post(url, self.data)
# Should not have updated user details at all
self.assertEqual(0, basket_patches['update_user'].call_count)
# Should have called unsubscribe
self.assertEqual(1, basket_patches['unsubscribe'].call_count)
# and said user opts out
args, kwargs = basket_patches['unsubscribe'].call_args
self.assertEqual((self.token, self.user['email']), args)
self.assertTrue(kwargs['optout'])
# Should redirect to the 'updated' view with unsub=1 and token
url = reverse('newsletter.updated') + "?unsub=1"
url += "&token=%s" % self.token
assert_redirect(rsp, url)
@patch('bedrock.newsletter.utils.get_newsletters')
def test_change_lang_country(self, get_newsletters, mock_basket_request):
get_newsletters.return_value = newsletters
self.data['lang'] = 'en'
self.data['country'] = 'us'
url = reverse('newsletter.existing.token', args=(self.token,))
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render'):
with patch('django.contrib.messages.add_message') as add_msg:
basket_patches['user'].return_value = self.user
rsp = self.client.post(url, self.data)
# We have an existing user with a change to their email data,
# but none to their subscriptions.
# 'subscribe' should not be called
self.assertEqual(0, basket_patches['subscribe'].call_count)
# update_user should be called once
self.assertEqual(1, basket_patches['update_user'].call_count)
# with the new lang and country and the newsletter list
kwargs = basket_patches['update_user'].call_args[1]
self.assertEqual(
{'lang': u'en',
'country': u'us',
'newsletters': u'mozilla-and-you'},
kwargs
)
# No messages should be emitted
self.assertEqual(0, add_msg.call_count,
msg=repr(add_msg.call_args_list))
# Should redirect to the 'updated' view
url = reverse('newsletter.updated')
assert_redirect(rsp, url)
@patch('bedrock.newsletter.utils.get_newsletters')
def test_newsletter_ordering(self, get_newsletters, mock_basket_request):
# Newsletters are listed in 'order' order, if they have an 'order'
# field
get_newsletters.return_value = newsletters
url = reverse('newsletter.existing.token', args=(self.token,))
self.user['newsletters'] = [u'mozilla-and-you', u'firefox-tips',
u'beta']
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render') as render:
basket_patches['user'].return_value = self.user
render.return_value = HttpResponse('')
self.client.get(url)
request, template_name, context = render.call_args[0]
forms = context['formset'].initial_forms
newsletters_in_order = [form.initial['newsletter'] for form in forms]
self.assertEqual([u'firefox-tips', u'mozilla-and-you'],
newsletters_in_order)
@patch('bedrock.newsletter.utils.get_newsletters')
def test_newsletter_no_order(self, get_newsletters, mock_basket_request):
"""Newsletter views should work if we get no order from basket."""
orderless_newsletters = {}
for key, val in newsletters.items():
nl_copy = val.copy()
del nl_copy['order']
orderless_newsletters[key] = nl_copy
get_newsletters.return_value = orderless_newsletters
url = reverse('newsletter.existing.token', args=(self.token,))
self.user['newsletters'] = [u'mozilla-and-you', u'firefox-tips',
u'beta']
with patch.multiple('basket',
update_user=DEFAULT,
subscribe=DEFAULT,
unsubscribe=DEFAULT,
user=DEFAULT) as basket_patches:
with patch('lib.l10n_utils.render') as render:
basket_patches['user'].return_value = self.user
render.return_value = HttpResponse('')
self.client.get(url)
request, template_name, context = render.call_args[0]
forms = context['formset'].initial_forms
newsletters_in_order = [form.initial['newsletter'] for form in forms]
self.assertEqual([u'mozilla-and-you', u'firefox-tips'],
newsletters_in_order)
@patch('bedrock.newsletter.utils.cache', cache_mock)
class TestConfirmView(TestCase):
def setUp(self):
self.token = unicode(uuid.uuid4())
self.url = reverse('newsletter.confirm', kwargs={'token': self.token})
def test_normal(self):
"""Confirm works with a valid token"""
with patch('basket.confirm') as confirm:
confirm.return_value = {'status': 'ok'}
with patch('lib.l10n_utils.render') as mock_render:
mock_render.return_value = HttpResponse('')
rsp = self.client.get(self.url, follow=True)
self.assertEqual(200, rsp.status_code)
confirm.assert_called_with(self.token)
context = mock_render.call_args[0][2]
self.assertTrue(context['success'])
self.assertFalse(context['generic_error'])
self.assertFalse(context['token_error'])
def test_basket_down(self):
"""If basket is down, we report the appropriate error"""
with patch('basket.confirm') as confirm:
confirm.side_effect = basket.BasketException()
with patch('lib.l10n_utils.render') as mock_render:
mock_render.return_value = HttpResponse('')
rsp = self.client.get(self.url, follow=True)
self.assertEqual(200, rsp.status_code)
confirm.assert_called_with(self.token)
context = mock_render.call_args[0][2]
self.assertFalse(context['success'])
self.assertTrue(context['generic_error'])
self.assertFalse(context['token_error'])
def test_bad_token(self):
"""If the token is bad, we report the appropriate error"""
with patch('basket.confirm') as confirm:
confirm.side_effect = basket.BasketException(status_code=403,
code=basket.errors.BASKET_UNKNOWN_TOKEN)
with patch('lib.l10n_utils.render') as mock_render:
mock_render.return_value = HttpResponse('')
rsp = self.client.get(self.url, follow=True)
self.assertEqual(200, rsp.status_code)
confirm.assert_called_with(self.token)
context = mock_render.call_args[0][2]
self.assertFalse(context['success'])
self.assertFalse(context['generic_error'])
self.assertTrue(context['token_error'])
class TestRecoveryView(TestCase):
def setUp(self):
with self.activate('en-US'):
self.url = reverse('newsletter.recovery')
def test_bad_email(self):
"""Email syntax errors are caught"""
data = {'email': 'not_an_email'}
rsp = self.client.post(self.url, data)
self.assertEqual(200, rsp.status_code)
self.assertIn('email', rsp.context['form'].errors)
@patch('basket.send_recovery_message', autospec=True)
def test_unknown_email(self, mock_basket):
"""Unknown email addresses give helpful error message"""
data = {'email': 'unknown@example.com'}
mock_basket.side_effect = basket.BasketException(status_code=404,
code=basket.errors.BASKET_UNKNOWN_EMAIL)
rsp = self.client.post(self.url, data)
self.assertTrue(mock_basket.called)
self.assertEqual(200, rsp.status_code)
form = rsp.context['form']
expected_error = unknown_address_text % \
reverse('newsletter.subscribe')
self.assertIn(expected_error, form.errors['email'])
@patch('django.contrib.messages.add_message', autospec=True)
@patch('basket.send_recovery_message', autospec=True)
def test_good_email(self, mock_basket, add_msg):
"""If basket returns success, don't report errors"""
data = {'email': 'known@example.com'}
mock_basket.return_value = {'status': 'ok'}
rsp = self.client.post(self.url, data)
self.assertTrue(mock_basket.called)
# On successful submit, we redirect
self.assertEqual(302, rsp.status_code)
rsp = self.client.get(rsp['Location'])
self.assertEqual(200, rsp.status_code)
self.assertFalse(rsp.context['form'])
# We also give them a success message
self.assertEqual(1, add_msg.call_count,
msg=repr(add_msg.call_args_list))
self.assertIn(recovery_text, add_msg.call_args[0])
class TestNewsletterSubscribe(TestCase):
def setUp(self):
self.rf = RequestFactory()
def ajax_request(self, data):
return self.request(data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def request(self, data=None, **kwargs):
if data:
req = self.rf.post('/', data, **kwargs)
else:
req = self.rf.get('/', **kwargs)
return newsletter_subscribe(req)
@patch('bedrock.newsletter.views.basket')
def test_returns_ajax_errors(self, basket_mock):
"""Incomplete data should return specific errors in JSON"""
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
}
resp = self.ajax_request(data)
resp_data = json.loads(resp.content)
self.assertFalse(resp_data['success'])
self.assertEqual(len(resp_data['errors']), 1)
self.assertIn('privacy', resp_data['errors'][0])
self.assertFalse(basket_mock.called)
@patch('bedrock.newsletter.views.basket')
def test_returns_sanitized_ajax_errors(self, basket_mock):
"""Error messages should be HTML escaped.
Bug 1116754
"""
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
'privacy': True,
'country': '<svg/onload=alert("NEFARIOUSNESS")>',
}
resp = self.ajax_request(data)
resp_data = json.loads(resp.content)
self.assertFalse(resp_data['success'])
self.assertEqual(len(resp_data['errors']), 1)
self.assertNotIn(data['country'], resp_data['errors'][0])
self.assertIn('NEFARIOUSNESS', resp_data['errors'][0])
self.assertIn('<svg', resp_data['errors'][0])
self.assertFalse(basket_mock.called)
@patch('bedrock.newsletter.views.basket')
def test_returns_ajax_success(self, basket_mock):
"""Good post should return success JSON"""
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
'privacy': True,
}
resp = self.ajax_request(data)
resp_data = json.loads(resp.content)
self.assertDictEqual(resp_data, {'success': True})
basket_mock.subscribe.assert_called_with('fred@example.com', 'flintstones',
format='H')
@patch.object(basket, 'subscribe')
def test_returns_ajax_invalid_email(self, subscribe_mock):
"""Invalid email AJAX post should return proper error."""
subscribe_mock.side_effect = basket.BasketException(
code=basket.errors.BASKET_INVALID_EMAIL)
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
'privacy': True,
}
resp = self.ajax_request(data)
resp_data = json.loads(resp.content)
self.assertFalse(resp_data['success'])
self.assertEqual(resp_data['errors'][0], unicode(invalid_email_address))
@patch.object(basket, 'subscribe')
def test_returns_ajax_basket_error(self, subscribe_mock):
"""Basket error AJAX post should return proper error."""
subscribe_mock.side_effect = basket.BasketException(
code=basket.errors.BASKET_NETWORK_FAILURE)
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
'privacy': True,
}
resp = self.ajax_request(data)
resp_data = json.loads(resp.content)
self.assertFalse(resp_data['success'])
self.assertEqual(resp_data['errors'][0], unicode(general_error))
def test_shows_normal_form(self):
"""A normal GET should show the form."""
resp = self.request()
doc = pq(resp.content)
self.assertTrue(doc('#newsletter-form'))
self.assertTrue(doc('input[value="mozilla-and-you"]'))
@patch('bedrock.newsletter.views.basket')
def test_returns_success(self, basket_mock):
"""Good non-ajax post should return thank-you page."""
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
'privacy': True,
}
resp = self.request(data)
doc = pq(resp.content)
self.assertFalse(doc('#footer_email_submit'))
self.assertFalse(doc('input[value="mozilla-and-you"]'))
self.assertTrue(doc('#email-form').hasClass('thank'))
basket_mock.subscribe.assert_called_with('fred@example.com', 'flintstones',
format='H')
@patch('bedrock.newsletter.views.basket')
def test_returns_failure(self, basket_mock):
"""Bad non-ajax post should return form with errors."""
data = {
'newsletters': 'flintstones',
'email': 'fred@example.com',
'fmt': 'H',
}
resp = self.request(data)
doc = pq(resp.content)
self.assertTrue(doc('#newsletter-form'))
self.assertFalse(doc('input[value="mozilla-and-you"]'))
self.assertTrue(doc('input[value="flintstones"]'))
self.assertFalse(doc('#email-form').hasClass('thank'))
self.assertTrue(doc('.field-privacy').hasClass('form-field-error'))
self.assertIn('privacy', doc('#footer-email-errors .errorlist li').eq(0).text())
self.assertFalse(basket_mock.subscribe.called)
| mpl-2.0 |
geshuning/lib | unit_testing/gtest-1.7.0/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| mit |
schlueter/ansible | test/units/modules/network/onyx/test_onyx_linkagg.py | 17 | 4547 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.onyx import onyx_linkagg
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxLinkaggModule(TestOnyxModule):
module = onyx_linkagg
def setUp(self):
super(TestOnyxLinkaggModule, self).setUp()
self.mock_get_config = patch.object(
onyx_linkagg.OnyxLinkAggModule,
"_get_port_channels")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxLinkaggModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixture(self, config_file):
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def load_port_channel_fixture(self):
config_file = 'onyx_port_channel_show.cfg'
self.load_fixture(config_file)
def load_mlag_port_channel_fixture(self):
config_file = 'onyx_mlag_port_channel_show.cfg'
self.load_fixture(config_file)
def test_port_channel_no_change(self):
set_module_args(dict(name='Po22', state='present',
members=['Eth1/7']))
self.load_port_channel_fixture()
self.execute_module(changed=False)
def test_port_channel_remove(self):
set_module_args(dict(name='Po22', state='absent'))
self.load_port_channel_fixture()
commands = ['no interface port-channel 22']
self.execute_module(changed=True, commands=commands)
def test_port_channel_add(self):
set_module_args(dict(name='Po23', state='present',
members=['Eth1/8']))
self.load_port_channel_fixture()
commands = ['interface port-channel 23', 'exit',
'interface ethernet 1/8 channel-group 23 mode on']
self.execute_module(changed=True, commands=commands)
def test_port_channel_add_member(self):
set_module_args(dict(name='Po22', state='present',
members=['Eth1/7', 'Eth1/8']))
self.load_port_channel_fixture()
commands = ['interface ethernet 1/8 channel-group 22 mode on']
self.execute_module(changed=True, commands=commands)
def test_port_channel_remove_member(self):
set_module_args(dict(name='Po22', state='present'))
self.load_port_channel_fixture()
commands = ['interface ethernet 1/7 no channel-group']
self.execute_module(changed=True, commands=commands)
def test_mlag_port_channel_no_change(self):
set_module_args(dict(name='Mpo33', state='present',
members=['Eth1/8']))
self.load_mlag_port_channel_fixture()
self.execute_module(changed=False)
def test_mlag_port_channel_remove(self):
set_module_args(dict(name='Mpo33', state='absent'))
self.load_mlag_port_channel_fixture()
commands = ['no interface mlag-port-channel 33']
self.execute_module(changed=True, commands=commands)
def test_mlag_port_channel_add(self):
set_module_args(dict(name='Mpo34', state='present',
members=['Eth1/9']))
self.load_mlag_port_channel_fixture()
commands = ['interface mlag-port-channel 34', 'exit',
'interface ethernet 1/9 mlag-channel-group 34 mode on']
self.execute_module(changed=True, commands=commands)
def test_mlag_port_channel_add_member(self):
set_module_args(dict(name='Mpo33', state='present',
members=['Eth1/8', 'Eth1/9']))
self.load_mlag_port_channel_fixture()
commands = ['interface ethernet 1/9 mlag-channel-group 33 mode on']
self.execute_module(changed=True, commands=commands)
def test_mlag_port_channel_remove_member(self):
set_module_args(dict(name='Mpo33', state='present'))
self.load_mlag_port_channel_fixture()
commands = ['interface ethernet 1/8 no mlag-channel-group']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
viruxel/ansible-modules-extras | windows/win_acl.py | 63 | 4336 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
# Copyright 2015, Trond Hindenes
# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_acl
version_added: "2.0"
short_description: Set file/directory permissions for a system user or group.
description:
- Add or remove rights/permissions for a given user or group for the specified src file or folder.
options:
path:
description:
- File or Directory
required: yes
user:
description:
- User or Group to add specified rights to act on src file/folder
required: yes
default: none
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified access rule
required: no
choices:
- present
- absent
default: present
type:
description:
- Specify whether to allow or deny the rights specified
required: yes
choices:
- allow
- deny
default: none
rights:
description:
- The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory. Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights Enumeration.
required: yes
choices:
- AppendData
- ChangePermissions
- Delete
- DeleteSubdirectoriesAndFiles
- ExecuteFile
- FullControl
- ListDirectory
- Modify
- Read
- ReadAndExecute
- ReadAttributes
- ReadData
- ReadExtendedAttributes
- ReadPermissions
- Synchronize
- TakeOwnership
- Traverse
- Write
- WriteAttributes
- WriteData
- WriteExtendedAttributes
default: none
inherit:
description:
- Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on the choices see MSDN InheritanceFlags Enumeration.
required: no
choices:
- ContainerInherit
- ObjectInherit
- None
default: For Leaf File, None; For Directory, ContainerInherit, ObjectInherit;
propagation:
description:
- Propagation flag on the ACL rules. For more information on the choices see MSDN PropagationFlags Enumeration.
required: no
choices:
- None
- NoPropagateInherit
- InheritOnly
default: "None"
author: Phil Schwartz (@schwartzmx), Trond Hindenes (@trondhindenes), Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = '''
# Restrict write,execute access to User Fed-Phil
$ ansible -i hosts -m win_acl -a "user=Fed-Phil path=C:\Important\Executable.exe type=deny rights='ExecuteFile,Write'" all
# Playbook example
# Add access rule to allow IIS_IUSRS FullControl to MySite
---
- name: Add IIS_IUSRS allow rights
win_acl:
path: 'C:\inetpub\wwwroot\MySite'
user: 'IIS_IUSRS'
rights: 'FullControl'
type: 'allow'
state: 'present'
inherit: 'ContainerInherit, ObjectInherit'
propagation: 'None'
# Remove previously added rule for IIS_IUSRS
- name: Remove FullControl AccessRule for IIS_IUSRS
path: 'C:\inetpub\wwwroot\MySite'
user: 'IIS_IUSRS'
rights: 'FullControl'
type: 'allow'
state: 'absent'
inherit: 'ContainerInherit, ObjectInherit'
propagation: 'None'
# Deny Intern
- name: Deny Deny
path: 'C:\Administrator\Documents'
user: 'Intern'
rights: 'Read,Write,Modify,FullControl,Delete'
type: 'deny'
state: 'present'
'''
| gpl-3.0 |
bikeshare/bikeshare-web | javascript/OpenLayers-2.13.1/tests/selenium/remotecontrol/selenium.py | 254 | 69389 |
"""
Copyright 2006 ThoughtWorks, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
# This file has been automatically generated via XSL
import httplib
import urllib
import re
class selenium:
"""
Defines an object that runs Selenium commands.
Element Locators
~~~~~~~~~~~~~~~~
Element Locators tell Selenium which HTML element a command refers to.
The format of a locator is:
\ *locatorType*\ **=**\ \ *argument*
We support the following strategies for locating elements:
* \ **identifier**\ =\ *id*:
Select the element with the specified @id attribute. If no match is
found, select the first element whose @name attribute is \ *id*.
(This is normally the default; see below.)
* \ **id**\ =\ *id*:
Select the element with the specified @id attribute.
* \ **name**\ =\ *name*:
Select the first element with the specified @name attribute.
* username
* name=username
The name may optionally be followed by one or more \ *element-filters*, separated from the name by whitespace. If the \ *filterType* is not specified, \ **value**\ is assumed.
* name=flavour value=chocolate
* \ **dom**\ =\ *javascriptExpression*:
Find an element by evaluating the specified string. This allows you to traverse the HTML Document Object
Model using JavaScript. Note that you must not return a value in this string; simply make it the last expression in the block.
* dom=document.forms['myForm'].myDropdown
* dom=document.images[56]
* dom=function foo() { return document.links[1]; }; foo();
* \ **xpath**\ =\ *xpathExpression*:
Locate an element using an XPath expression.
* xpath=//img[@alt='The image alt text']
* xpath=//table[@id='table1']//tr[4]/td[2]
* xpath=//a[contains(@href,'#id1')]
* xpath=//a[contains(@href,'#id1')]/@class
* xpath=(//table[@class='stylee'])//th[text()='theHeaderText']/../td
* xpath=//input[@name='name2' and @value='yes']
* xpath=//\*[text()="right"]
* \ **link**\ =\ *textPattern*:
Select the link (anchor) element which contains text matching the
specified \ *pattern*.
* link=The link text
* \ **css**\ =\ *cssSelectorSyntax*:
Select the element using css selectors. Please refer to CSS2 selectors, CSS3 selectors for more information. You can also check the TestCssLocators test in the selenium test suite for an example of usage, which is included in the downloaded selenium core package.
* css=a[href="#id3"]
* css=span#firstChild + span
Currently the css selector locator supports all css1, css2 and css3 selectors except namespace in css3, some pseudo classes(:nth-of-type, :nth-last-of-type, :first-of-type, :last-of-type, :only-of-type, :visited, :hover, :active, :focus, :indeterminate) and pseudo elements(::first-line, ::first-letter, ::selection, ::before, ::after).
Without an explicit locator prefix, Selenium uses the following default
strategies:
* \ **dom**\ , for locators starting with "document."
* \ **xpath**\ , for locators starting with "//"
* \ **identifier**\ , otherwise
Element Filters
~~~~~~~~~~~~~~~
Element filters can be used with a locator to refine a list of candidate elements. They are currently used only in the 'name' element-locator.
Filters look much like locators, ie.
\ *filterType*\ **=**\ \ *argument*
Supported element-filters are:
\ **value=**\ \ *valuePattern*
Matches elements based on their values. This is particularly useful for refining a list of similarly-named toggle-buttons.
\ **index=**\ \ *index*
Selects a single element based on its position in the list (offset from zero).
String-match Patterns
~~~~~~~~~~~~~~~~~~~~~
Various Pattern syntaxes are available for matching string values:
* \ **glob:**\ \ *pattern*:
Match a string against a "glob" (aka "wildmat") pattern. "Glob" is a
kind of limited regular-expression syntax typically used in command-line
shells. In a glob pattern, "\*" represents any sequence of characters, and "?"
represents any single character. Glob patterns match against the entire
string.
* \ **regexp:**\ \ *regexp*:
Match a string using a regular-expression. The full power of JavaScript
regular-expressions is available.
* \ **regexpi:**\ \ *regexpi*:
Match a string using a case-insensitive regular-expression.
* \ **exact:**\ \ *string*:
Match a string exactly, verbatim, without any of that fancy wildcard
stuff.
If no pattern prefix is specified, Selenium assumes that it's a "glob"
pattern.
For commands that return multiple values (such as verifySelectOptions),
the string being matched is a comma-separated list of the return values,
where both commas and backslashes in the values are backslash-escaped.
When providing a pattern, the optional matching syntax (i.e. glob,
regexp, etc.) is specified once, as usual, at the beginning of the
pattern.
"""
### This part is hard-coded in the XSL
def __init__(self, host, port, browserStartCommand, browserURL):
self.host = host
self.port = port
self.browserStartCommand = browserStartCommand
self.browserURL = browserURL
self.sessionId = None
def start(self):
result = self.get_string("getNewBrowserSession", [self.browserStartCommand, self.browserURL])
try:
self.sessionId = result
except ValueError:
raise Exception, result
def stop(self):
self.do_command("testComplete", [])
self.sessionId = None
def do_command(self, verb, args):
conn = httplib.HTTPConnection(self.host, self.port)
commandString = u'/selenium-server/driver/?cmd=' + urllib.quote_plus(unicode(verb).encode('utf-8'))
for i in range(len(args)):
commandString = commandString + '&' + unicode(i+1) + '=' + urllib.quote_plus(unicode(args[i]).encode('utf-8'))
if (None != self.sessionId):
commandString = commandString + "&sessionId=" + unicode(self.sessionId)
conn.request("GET", commandString)
response = conn.getresponse()
#print response.status, response.reason
data = unicode(response.read(), "UTF-8")
result = response.reason
#print "Selenium Result: " + repr(data) + "\n\n"
if (not data.startswith('OK')):
raise Exception, data
return data
def get_string(self, verb, args):
result = self.do_command(verb, args)
return result[3:]
def get_string_array(self, verb, args):
csv = self.get_string(verb, args)
token = ""
tokens = []
escape = False
for i in range(len(csv)):
letter = csv[i]
if (escape):
token = token + letter
escape = False
continue
if (letter == '\\'):
escape = True
elif (letter == ','):
tokens.append(token)
token = ""
else:
token = token + letter
tokens.append(token)
return tokens
def get_number(self, verb, args):
# Is there something I need to do here?
return self.get_string(verb, args)
def get_number_array(self, verb, args):
# Is there something I need to do here?
return self.get_string_array(verb, args)
def get_boolean(self, verb, args):
boolstr = self.get_string(verb, args)
if ("true" == boolstr):
return True
if ("false" == boolstr):
return False
raise ValueError, "result is neither 'true' nor 'false': " + boolstr
def get_boolean_array(self, verb, args):
boolarr = self.get_string_array(verb, args)
for i in range(len(boolarr)):
if ("true" == boolstr):
boolarr[i] = True
continue
if ("false" == boolstr):
boolarr[i] = False
continue
raise ValueError, "result is neither 'true' nor 'false': " + boolarr[i]
return boolarr
### From here on, everything's auto-generated from XML
def click(self,locator):
"""
Clicks on a link, button, checkbox or radio button. If the click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
"""
self.do_command("click", [locator,])
def double_click(self,locator):
"""
Double clicks on a link, button, checkbox or radio button. If the double click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
"""
self.do_command("doubleClick", [locator,])
def context_menu(self,locator):
"""
Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element).
'locator' is an element locator
"""
self.do_command("contextMenu", [locator,])
def click_at(self,locator,coordString):
"""
Clicks on a link, button, checkbox or radio button. If the click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("clickAt", [locator,coordString,])
def double_click_at(self,locator,coordString):
"""
Doubleclicks on a link, button, checkbox or radio button. If the action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("doubleClickAt", [locator,coordString,])
def context_menu_at(self,locator,coordString):
"""
Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element).
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("contextMenuAt", [locator,coordString,])
def fire_event(self,locator,eventName):
"""
Explicitly simulate an event, to trigger the corresponding "on\ *event*"
handler.
'locator' is an element locator
'eventName' is the event name, e.g. "focus" or "blur"
"""
self.do_command("fireEvent", [locator,eventName,])
def focus(self,locator):
"""
Move the focus to the specified element; for example, if the element is an input field, move the cursor to that field.
'locator' is an element locator
"""
self.do_command("focus", [locator,])
def key_press(self,locator,keySequence):
"""
Simulates a user pressing and releasing a key.
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyPress", [locator,keySequence,])
def shift_key_down(self):
"""
Press the shift key and hold it down until doShiftUp() is called or a new page is loaded.
"""
self.do_command("shiftKeyDown", [])
def shift_key_up(self):
"""
Release the shift key.
"""
self.do_command("shiftKeyUp", [])
def meta_key_down(self):
"""
Press the meta key and hold it down until doMetaUp() is called or a new page is loaded.
"""
self.do_command("metaKeyDown", [])
def meta_key_up(self):
"""
Release the meta key.
"""
self.do_command("metaKeyUp", [])
def alt_key_down(self):
"""
Press the alt key and hold it down until doAltUp() is called or a new page is loaded.
"""
self.do_command("altKeyDown", [])
def alt_key_up(self):
"""
Release the alt key.
"""
self.do_command("altKeyUp", [])
def control_key_down(self):
"""
Press the control key and hold it down until doControlUp() is called or a new page is loaded.
"""
self.do_command("controlKeyDown", [])
def control_key_up(self):
"""
Release the control key.
"""
self.do_command("controlKeyUp", [])
def key_down(self,locator,keySequence):
"""
Simulates a user pressing a key (without releasing it yet).
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyDown", [locator,keySequence,])
def key_up(self,locator,keySequence):
"""
Simulates a user releasing a key.
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyUp", [locator,keySequence,])
def mouse_over(self,locator):
"""
Simulates a user hovering a mouse over the specified element.
'locator' is an element locator
"""
self.do_command("mouseOver", [locator,])
def mouse_out(self,locator):
"""
Simulates a user moving the mouse pointer away from the specified element.
'locator' is an element locator
"""
self.do_command("mouseOut", [locator,])
def mouse_down(self,locator):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseDown", [locator,])
def mouse_down_at(self,locator,coordString):
"""
Simulates a user pressing the mouse button (without releasing it yet) at
the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseDownAt", [locator,coordString,])
def mouse_up(self,locator):
"""
Simulates the event that occurs when the user releases the mouse button (i.e., stops
holding the button down) on the specified element.
'locator' is an element locator
"""
self.do_command("mouseUp", [locator,])
def mouse_up_at(self,locator,coordString):
"""
Simulates the event that occurs when the user releases the mouse button (i.e., stops
holding the button down) at the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseUpAt", [locator,coordString,])
def mouse_move(self,locator):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseMove", [locator,])
def mouse_move_at(self,locator,coordString):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseMoveAt", [locator,coordString,])
def type(self,locator,value):
"""
Sets the value of an input field, as though you typed it in.
Can also be used to set the value of combo boxes, check boxes, etc. In these cases,
value should be the value of the option selected, not the visible text.
'locator' is an element locator
'value' is the value to type
"""
self.do_command("type", [locator,value,])
def type_keys(self,locator,value):
"""
Simulates keystroke events on the specified element, as though you typed the value key-by-key.
This is a convenience method for calling keyDown, keyUp, keyPress for every character in the specified string;
this is useful for dynamic UI widgets (like auto-completing combo boxes) that require explicit key events.
Unlike the simple "type" command, which forces the specified value into the page directly, this command
may or may not have any visible effect, even in cases where typing keys would normally have a visible effect.
For example, if you use "typeKeys" on a form element, you may or may not see the results of what you typed in
the field.
In some cases, you may need to use the simple "type" command to set the value of the field and then the "typeKeys" command to
send the keystroke events corresponding to what you just typed.
'locator' is an element locator
'value' is the value to type
"""
self.do_command("typeKeys", [locator,value,])
def set_speed(self,value):
"""
Set execution speed (i.e., set the millisecond length of a delay which will follow each selenium operation). By default, there is no such delay, i.e.,
the delay is 0 milliseconds.
'value' is the number of milliseconds to pause after operation
"""
self.do_command("setSpeed", [value,])
def get_speed(self):
"""
Get execution speed (i.e., get the millisecond length of the delay following each selenium operation). By default, there is no such delay, i.e.,
the delay is 0 milliseconds.
See also setSpeed.
"""
return self.get_string("getSpeed", [])
def check(self,locator):
"""
Check a toggle-button (checkbox/radio)
'locator' is an element locator
"""
self.do_command("check", [locator,])
def uncheck(self,locator):
"""
Uncheck a toggle-button (checkbox/radio)
'locator' is an element locator
"""
self.do_command("uncheck", [locator,])
def select(self,selectLocator,optionLocator):
"""
Select an option from a drop-down using an option locator.
Option locators provide different ways of specifying options of an HTML
Select element (e.g. for selecting a specific option, or for asserting
that the selected option satisfies a specification). There are several
forms of Select Option Locator.
* \ **label**\ =\ *labelPattern*:
matches options based on their labels, i.e. the visible text. (This
is the default.)
* label=regexp:^[Oo]ther
* \ **value**\ =\ *valuePattern*:
matches options based on their values.
* value=other
* \ **id**\ =\ *id*:
matches options based on their ids.
* id=option1
* \ **index**\ =\ *index*:
matches an option based on its index (offset from zero).
* index=2
If no option locator prefix is provided, the default behaviour is to match on \ **label**\ .
'selectLocator' is an element locator identifying a drop-down menu
'optionLocator' is an option locator (a label by default)
"""
self.do_command("select", [selectLocator,optionLocator,])
def add_selection(self,locator,optionLocator):
"""
Add a selection to the set of selected options in a multi-select element using an option locator.
@see #doSelect for details of option locators
'locator' is an element locator identifying a multi-select box
'optionLocator' is an option locator (a label by default)
"""
self.do_command("addSelection", [locator,optionLocator,])
def remove_selection(self,locator,optionLocator):
"""
Remove a selection from the set of selected options in a multi-select element using an option locator.
@see #doSelect for details of option locators
'locator' is an element locator identifying a multi-select box
'optionLocator' is an option locator (a label by default)
"""
self.do_command("removeSelection", [locator,optionLocator,])
def remove_all_selections(self,locator):
"""
Unselects all of the selected options in a multi-select element.
'locator' is an element locator identifying a multi-select box
"""
self.do_command("removeAllSelections", [locator,])
def submit(self,formLocator):
"""
Submit the specified form. This is particularly useful for forms without
submit buttons, e.g. single-input "Search" forms.
'formLocator' is an element locator for the form you want to submit
"""
self.do_command("submit", [formLocator,])
def open(self,url):
"""
Opens an URL in the test frame. This accepts both relative and absolute
URLs.
The "open" command waits for the page to load before proceeding,
ie. the "AndWait" suffix is implicit.
\ *Note*: The URL must be on the same domain as the runner HTML
due to security restrictions in the browser (Same Origin Policy). If you
need to open an URL on another domain, use the Selenium Server to start a
new browser session on that domain.
'url' is the URL to open; may be relative or absolute
"""
self.do_command("open", [url,])
def open_window(self,url,windowID):
"""
Opens a popup window (if a window with that ID isn't already open).
After opening the window, you'll need to select it using the selectWindow
command.
This command can also be a useful workaround for bug SEL-339. In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example).
In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using
an empty (blank) url, like this: openWindow("", "myFunnyWindow").
'url' is the URL to open, which can be blank
'windowID' is the JavaScript window ID of the window to select
"""
self.do_command("openWindow", [url,windowID,])
def select_window(self,windowID):
"""
Selects a popup window using a window locator; once a popup window has been selected, all
commands go to that window. To select the main window again, use null
as the target.
Window locators provide different ways of specifying the window object:
by title, by internal JavaScript "name," or by JavaScript variable.
* \ **title**\ =\ *My Special Window*:
Finds the window using the text that appears in the title bar. Be careful;
two windows can share the same title. If that happens, this locator will
just pick one.
* \ **name**\ =\ *myWindow*:
Finds the window using its internal JavaScript "name" property. This is the second
parameter "windowName" passed to the JavaScript method window.open(url, windowName, windowFeatures, replaceFlag)
(which Selenium intercepts).
* \ **var**\ =\ *variableName*:
Some pop-up windows are unnamed (anonymous), but are associated with a JavaScript variable name in the current
application window, e.g. "window.foo = window.open(url);". In those cases, you can open the window using
"var=foo".
If no window locator prefix is provided, we'll try to guess what you mean like this:
1.) if windowID is null, (or the string "null") then it is assumed the user is referring to the original window instantiated by the browser).
2.) if the value of the "windowID" parameter is a JavaScript variable name in the current application window, then it is assumed
that this variable contains the return value from a call to the JavaScript window.open() method.
3.) Otherwise, selenium looks in a hash it maintains that maps string names to window "names".
4.) If \ *that* fails, we'll try looping over all of the known windows to try to find the appropriate "title".
Since "title" is not necessarily unique, this may have unexpected behavior.
If you're having trouble figuring out the name of a window that you want to manipulate, look at the Selenium log messages
which identify the names of windows created via window.open (and therefore intercepted by Selenium). You will see messages
like the following for each window as it is opened:
``debug: window.open call intercepted; window ID (which you can use with selectWindow()) is "myNewWindow"``
In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example).
(This is bug SEL-339.) In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using
an empty (blank) url, like this: openWindow("", "myFunnyWindow").
'windowID' is the JavaScript window ID of the window to select
"""
self.do_command("selectWindow", [windowID,])
def select_frame(self,locator):
"""
Selects a frame within the current window. (You may invoke this command
multiple times to select nested frames.) To select the parent frame, use
"relative=parent" as a locator; to select the top frame, use "relative=top".
You can also select a frame by its 0-based index number; select the first frame with
"index=0", or the third frame with "index=2".
You may also use a DOM expression to identify the frame you want directly,
like this: ``dom=frames["main"].frames["subframe"]``
'locator' is an element locator identifying a frame or iframe
"""
self.do_command("selectFrame", [locator,])
def get_whether_this_frame_match_frame_expression(self,currentFrameString,target):
"""
Determine whether current/locator identify the frame containing this running code.
This is useful in proxy injection mode, where this code runs in every
browser frame and window, and sometimes the selenium server needs to identify
the "current" frame. In this case, when the test calls selectFrame, this
routine is called for each frame to figure out which one has been selected.
The selected frame will return true, while all others will return false.
'currentFrameString' is starting frame
'target' is new frame (which might be relative to the current one)
"""
return self.get_boolean("getWhetherThisFrameMatchFrameExpression", [currentFrameString,target,])
def get_whether_this_window_match_window_expression(self,currentWindowString,target):
"""
Determine whether currentWindowString plus target identify the window containing this running code.
This is useful in proxy injection mode, where this code runs in every
browser frame and window, and sometimes the selenium server needs to identify
the "current" window. In this case, when the test calls selectWindow, this
routine is called for each window to figure out which one has been selected.
The selected window will return true, while all others will return false.
'currentWindowString' is starting window
'target' is new window (which might be relative to the current one, e.g., "_parent")
"""
return self.get_boolean("getWhetherThisWindowMatchWindowExpression", [currentWindowString,target,])
def wait_for_pop_up(self,windowID,timeout):
"""
Waits for a popup window to appear and load up.
'windowID' is the JavaScript window "name" of the window that will appear (not the text of the title bar)
'timeout' is a timeout in milliseconds, after which the action will return with an error
"""
self.do_command("waitForPopUp", [windowID,timeout,])
def choose_cancel_on_next_confirmation(self):
"""
By default, Selenium's overridden window.confirm() function will
return true, as if the user had manually clicked OK; after running
this command, the next call to confirm() will return false, as if
the user had clicked Cancel. Selenium will then resume using the
default behavior for future confirmations, automatically returning
true (OK) unless/until you explicitly call this command for each
confirmation.
"""
self.do_command("chooseCancelOnNextConfirmation", [])
def choose_ok_on_next_confirmation(self):
"""
Undo the effect of calling chooseCancelOnNextConfirmation. Note
that Selenium's overridden window.confirm() function will normally automatically
return true, as if the user had manually clicked OK, so you shouldn't
need to use this command unless for some reason you need to change
your mind prior to the next confirmation. After any confirmation, Selenium will resume using the
default behavior for future confirmations, automatically returning
true (OK) unless/until you explicitly call chooseCancelOnNextConfirmation for each
confirmation.
"""
self.do_command("chooseOkOnNextConfirmation", [])
def answer_on_next_prompt(self,answer):
"""
Instructs Selenium to return the specified answer string in response to
the next JavaScript prompt [window.prompt()].
'answer' is the answer to give in response to the prompt pop-up
"""
self.do_command("answerOnNextPrompt", [answer,])
def go_back(self):
"""
Simulates the user clicking the "back" button on their browser.
"""
self.do_command("goBack", [])
def refresh(self):
"""
Simulates the user clicking the "Refresh" button on their browser.
"""
self.do_command("refresh", [])
def close(self):
"""
Simulates the user clicking the "close" button in the titlebar of a popup
window or tab.
"""
self.do_command("close", [])
def is_alert_present(self):
"""
Has an alert occurred?
This function never throws an exception
"""
return self.get_boolean("isAlertPresent", [])
def is_prompt_present(self):
"""
Has a prompt occurred?
This function never throws an exception
"""
return self.get_boolean("isPromptPresent", [])
def is_confirmation_present(self):
"""
Has confirm() been called?
This function never throws an exception
"""
return self.get_boolean("isConfirmationPresent", [])
def get_alert(self):
"""
Retrieves the message of a JavaScript alert generated during the previous action, or fail if there were no alerts.
Getting an alert has the same effect as manually clicking OK. If an
alert is generated but you do not get/verify it, the next Selenium action
will fail.
NOTE: under Selenium, JavaScript alerts will NOT pop up a visible alert
dialog.
NOTE: Selenium does NOT support JavaScript alerts that are generated in a
page's onload() event handler. In this case a visible dialog WILL be
generated and Selenium will hang until someone manually clicks OK.
"""
return self.get_string("getAlert", [])
def get_confirmation(self):
"""
Retrieves the message of a JavaScript confirmation dialog generated during
the previous action.
By default, the confirm function will return true, having the same effect
as manually clicking OK. This can be changed by prior execution of the
chooseCancelOnNextConfirmation command. If an confirmation is generated
but you do not get/verify it, the next Selenium action will fail.
NOTE: under Selenium, JavaScript confirmations will NOT pop up a visible
dialog.
NOTE: Selenium does NOT support JavaScript confirmations that are
generated in a page's onload() event handler. In this case a visible
dialog WILL be generated and Selenium will hang until you manually click
OK.
"""
return self.get_string("getConfirmation", [])
def get_prompt(self):
"""
Retrieves the message of a JavaScript question prompt dialog generated during
the previous action.
Successful handling of the prompt requires prior execution of the
answerOnNextPrompt command. If a prompt is generated but you
do not get/verify it, the next Selenium action will fail.
NOTE: under Selenium, JavaScript prompts will NOT pop up a visible
dialog.
NOTE: Selenium does NOT support JavaScript prompts that are generated in a
page's onload() event handler. In this case a visible dialog WILL be
generated and Selenium will hang until someone manually clicks OK.
"""
return self.get_string("getPrompt", [])
def get_location(self):
"""
Gets the absolute URL of the current page.
"""
return self.get_string("getLocation", [])
def get_title(self):
"""
Gets the title of the current page.
"""
return self.get_string("getTitle", [])
def get_body_text(self):
"""
Gets the entire text of the page.
"""
return self.get_string("getBodyText", [])
def get_value(self,locator):
"""
Gets the (whitespace-trimmed) value of an input field (or anything else with a value parameter).
For checkbox/radio elements, the value will be "on" or "off" depending on
whether the element is checked or not.
'locator' is an element locator
"""
return self.get_string("getValue", [locator,])
def get_text(self,locator):
"""
Gets the text of an element. This works for any element that contains
text. This command uses either the textContent (Mozilla-like browsers) or
the innerText (IE-like browsers) of the element, which is the rendered
text shown to the user.
'locator' is an element locator
"""
return self.get_string("getText", [locator,])
def highlight(self,locator):
"""
Briefly changes the backgroundColor of the specified element yellow. Useful for debugging.
'locator' is an element locator
"""
self.do_command("highlight", [locator,])
def get_eval(self,script):
"""
Gets the result of evaluating the specified JavaScript snippet. The snippet may
have multiple lines, but only the result of the last line will be returned.
Note that, by default, the snippet will run in the context of the "selenium"
object itself, so ``this`` will refer to the Selenium object. Use ``window`` to
refer to the window of your application, e.g. ``window.document.getElementById('foo')``
If you need to use
a locator to refer to a single element in your application page, you can
use ``this.browserbot.findElement("id=foo")`` where "id=foo" is your locator.
'script' is the JavaScript snippet to run
"""
return self.get_string("getEval", [script,])
def is_checked(self,locator):
"""
Gets whether a toggle-button (checkbox/radio) is checked. Fails if the specified element doesn't exist or isn't a toggle-button.
'locator' is an element locator pointing to a checkbox or radio button
"""
return self.get_boolean("isChecked", [locator,])
def get_table(self,tableCellAddress):
"""
Gets the text from a cell of a table. The cellAddress syntax
tableLocator.row.column, where row and column start at 0.
'tableCellAddress' is a cell address, e.g. "foo.1.4"
"""
return self.get_string("getTable", [tableCellAddress,])
def get_selected_labels(self,selectLocator):
"""
Gets all option labels (visible text) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedLabels", [selectLocator,])
def get_selected_label(self,selectLocator):
"""
Gets option label (visible text) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedLabel", [selectLocator,])
def get_selected_values(self,selectLocator):
"""
Gets all option values (value attributes) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedValues", [selectLocator,])
def get_selected_value(self,selectLocator):
"""
Gets option value (value attribute) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedValue", [selectLocator,])
def get_selected_indexes(self,selectLocator):
"""
Gets all option indexes (option number, starting at 0) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedIndexes", [selectLocator,])
def get_selected_index(self,selectLocator):
"""
Gets option index (option number, starting at 0) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedIndex", [selectLocator,])
def get_selected_ids(self,selectLocator):
"""
Gets all option element IDs for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedIds", [selectLocator,])
def get_selected_id(self,selectLocator):
"""
Gets option element ID for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedId", [selectLocator,])
def is_something_selected(self,selectLocator):
"""
Determines whether some option in a drop-down menu is selected.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_boolean("isSomethingSelected", [selectLocator,])
def get_select_options(self,selectLocator):
"""
Gets all option labels in the specified select drop-down.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectOptions", [selectLocator,])
def get_attribute(self,attributeLocator):
"""
Gets the value of an element attribute. The value of the attribute may
differ across browsers (this is the case for the "style" attribute, for
example).
'attributeLocator' is an element locator followed by an @ sign and then the name of the attribute, e.g. "foo@bar"
"""
return self.get_string("getAttribute", [attributeLocator,])
def is_text_present(self,pattern):
"""
Verifies that the specified text pattern appears somewhere on the rendered page shown to the user.
'pattern' is a pattern to match with the text of the page
"""
return self.get_boolean("isTextPresent", [pattern,])
def is_element_present(self,locator):
"""
Verifies that the specified element is somewhere on the page.
'locator' is an element locator
"""
return self.get_boolean("isElementPresent", [locator,])
def is_visible(self,locator):
"""
Determines if the specified element is visible. An
element can be rendered invisible by setting the CSS "visibility"
property to "hidden", or the "display" property to "none", either for the
element itself or one if its ancestors. This method will fail if
the element is not present.
'locator' is an element locator
"""
return self.get_boolean("isVisible", [locator,])
def is_editable(self,locator):
"""
Determines whether the specified input element is editable, ie hasn't been disabled.
This method will fail if the specified element isn't an input element.
'locator' is an element locator
"""
return self.get_boolean("isEditable", [locator,])
def get_all_buttons(self):
"""
Returns the IDs of all buttons on the page.
If a given button has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllButtons", [])
def get_all_links(self):
"""
Returns the IDs of all links on the page.
If a given link has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllLinks", [])
def get_all_fields(self):
"""
Returns the IDs of all input fields on the page.
If a given field has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllFields", [])
def get_attribute_from_all_windows(self,attributeName):
"""
Returns every instance of some attribute from all known windows.
'attributeName' is name of an attribute on the windows
"""
return self.get_string_array("getAttributeFromAllWindows", [attributeName,])
def dragdrop(self,locator,movementsString):
"""
deprecated - use dragAndDrop instead
'locator' is an element locator
'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300"
"""
self.do_command("dragdrop", [locator,movementsString,])
def set_mouse_speed(self,pixels):
"""
Configure the number of pixels between "mousemove" events during dragAndDrop commands (default=10).
Setting this value to 0 means that we'll send a "mousemove" event to every single pixel
in between the start location and the end location; that can be very slow, and may
cause some browsers to force the JavaScript to timeout.
If the mouse speed is greater than the distance between the two dragged objects, we'll
just send one "mousemove" at the start location and then one final one at the end location.
'pixels' is the number of pixels between "mousemove" events
"""
self.do_command("setMouseSpeed", [pixels,])
def get_mouse_speed(self):
"""
Returns the number of pixels between "mousemove" events during dragAndDrop commands (default=10).
"""
return self.get_number("getMouseSpeed", [])
def drag_and_drop(self,locator,movementsString):
"""
Drags an element a certain distance and then drops it
'locator' is an element locator
'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300"
"""
self.do_command("dragAndDrop", [locator,movementsString,])
def drag_and_drop_to_object(self,locatorOfObjectToBeDragged,locatorOfDragDestinationObject):
"""
Drags an element and drops it on another element
'locatorOfObjectToBeDragged' is an element to be dragged
'locatorOfDragDestinationObject' is an element whose location (i.e., whose center-most pixel) will be the point where locatorOfObjectToBeDragged is dropped
"""
self.do_command("dragAndDropToObject", [locatorOfObjectToBeDragged,locatorOfDragDestinationObject,])
def window_focus(self):
"""
Gives focus to the currently selected window
"""
self.do_command("windowFocus", [])
def window_maximize(self):
"""
Resize currently selected window to take up the entire screen
"""
self.do_command("windowMaximize", [])
def get_all_window_ids(self):
"""
Returns the IDs of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowIds", [])
def get_all_window_names(self):
"""
Returns the names of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowNames", [])
def get_all_window_titles(self):
"""
Returns the titles of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowTitles", [])
def get_html_source(self):
"""
Returns the entire HTML source between the opening and
closing "html" tags.
"""
return self.get_string("getHtmlSource", [])
def set_cursor_position(self,locator,position):
"""
Moves the text cursor to the specified position in the given input element or textarea.
This method will fail if the specified element isn't an input element or textarea.
'locator' is an element locator pointing to an input element or textarea
'position' is the numerical position of the cursor in the field; position should be 0 to move the position to the beginning of the field. You can also set the cursor to -1 to move it to the end of the field.
"""
self.do_command("setCursorPosition", [locator,position,])
def get_element_index(self,locator):
"""
Get the relative index of an element to its parent (starting from 0). The comment node and empty text node
will be ignored.
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementIndex", [locator,])
def is_ordered(self,locator1,locator2):
"""
Check if these two elements have same parent and are ordered siblings in the DOM. Two same elements will
not be considered ordered.
'locator1' is an element locator pointing to the first element
'locator2' is an element locator pointing to the second element
"""
return self.get_boolean("isOrdered", [locator1,locator2,])
def get_element_position_left(self,locator):
"""
Retrieves the horizontal position of an element
'locator' is an element locator pointing to an element OR an element itself
"""
return self.get_number("getElementPositionLeft", [locator,])
def get_element_position_top(self,locator):
"""
Retrieves the vertical position of an element
'locator' is an element locator pointing to an element OR an element itself
"""
return self.get_number("getElementPositionTop", [locator,])
def get_element_width(self,locator):
"""
Retrieves the width of an element
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementWidth", [locator,])
def get_element_height(self,locator):
"""
Retrieves the height of an element
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementHeight", [locator,])
def get_cursor_position(self,locator):
"""
Retrieves the text cursor position in the given input element or textarea; beware, this may not work perfectly on all browsers.
Specifically, if the cursor/selection has been cleared by JavaScript, this command will tend to
return the position of the last location of the cursor, even though the cursor is now gone from the page. This is filed as SEL-243.
This method will fail if the specified element isn't an input element or textarea, or there is no cursor in the element.
'locator' is an element locator pointing to an input element or textarea
"""
return self.get_number("getCursorPosition", [locator,])
def get_expression(self,expression):
"""
Returns the specified expression.
This is useful because of JavaScript preprocessing.
It is used to generate commands like assertExpression and waitForExpression.
'expression' is the value to return
"""
return self.get_string("getExpression", [expression,])
def get_xpath_count(self,xpath):
"""
Returns the number of nodes that match the specified xpath, eg. "//table" would give
the number of tables.
'xpath' is the xpath expression to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you.
"""
return self.get_number("getXpathCount", [xpath,])
def assign_id(self,locator,identifier):
"""
Temporarily sets the "id" attribute of the specified element, so you can locate it in the future
using its ID rather than a slow/complicated XPath. This ID will disappear once the page is
reloaded.
'locator' is an element locator pointing to an element
'identifier' is a string to be used as the ID of the specified element
"""
self.do_command("assignId", [locator,identifier,])
def allow_native_xpath(self,allow):
"""
Specifies whether Selenium should use the native in-browser implementation
of XPath (if any native version is available); if you pass "false" to
this function, we will always use our pure-JavaScript xpath library.
Using the pure-JS xpath library can improve the consistency of xpath
element locators between different browser vendors, but the pure-JS
version is much slower than the native implementations.
'allow' is boolean, true means we'll prefer to use native XPath; false means we'll only use JS XPath
"""
self.do_command("allowNativeXpath", [allow,])
def ignore_attributes_without_value(self,ignore):
"""
Specifies whether Selenium will ignore xpath attributes that have no
value, i.e. are the empty string, when using the non-native xpath
evaluation engine. You'd want to do this for performance reasons in IE.
However, this could break certain xpaths, for example an xpath that looks
for an attribute whose value is NOT the empty string.
The hope is that such xpaths are relatively rare, but the user should
have the option of using them. Note that this only influences xpath
evaluation when using the ajaxslt engine (i.e. not "javascript-xpath").
'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness.
"""
self.do_command("ignoreAttributesWithoutValue", [ignore,])
def wait_for_condition(self,script,timeout):
"""
Runs the specified JavaScript snippet repeatedly until it evaluates to "true".
The snippet may have multiple lines, but only the result of the last line
will be considered.
Note that, by default, the snippet will be run in the runner's test window, not in the window
of your application. To get the window of your application, you can use
the JavaScript snippet ``selenium.browserbot.getCurrentWindow()``, and then
run your JavaScript in there
'script' is the JavaScript snippet to run
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForCondition", [script,timeout,])
def set_timeout(self,timeout):
"""
Specifies the amount of time that Selenium will wait for actions to complete.
Actions that require waiting include "open" and the "waitFor\*" actions.
The default timeout is 30 seconds.
'timeout' is a timeout in milliseconds, after which the action will return with an error
"""
self.do_command("setTimeout", [timeout,])
def wait_for_page_to_load(self,timeout):
"""
Waits for a new page to load.
You can use this command instead of the "AndWait" suffixes, "clickAndWait", "selectAndWait", "typeAndWait" etc.
(which are only available in the JS API).
Selenium constantly keeps track of new pages loading, and sets a "newPageLoaded"
flag when it first notices a page load. Running any other Selenium command after
turns the flag to false. Hence, if you want to wait for a page to load, you must
wait immediately after a Selenium command that caused a page-load.
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForPageToLoad", [timeout,])
def wait_for_frame_to_load(self,frameAddress,timeout):
"""
Waits for a new frame to load.
Selenium constantly keeps track of new pages and frames loading,
and sets a "newPageLoaded" flag when it first notices a page load.
See waitForPageToLoad for more information.
'frameAddress' is FrameAddress from the server side
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForFrameToLoad", [frameAddress,timeout,])
def get_cookie(self):
"""
Return all cookies of the current page under test.
"""
return self.get_string("getCookie", [])
def get_cookie_by_name(self,name):
"""
Returns the value of the cookie with the specified name, or throws an error if the cookie is not present.
'name' is the name of the cookie
"""
return self.get_string("getCookieByName", [name,])
def is_cookie_present(self,name):
"""
Returns true if a cookie with the specified name is present, or false otherwise.
'name' is the name of the cookie
"""
return self.get_boolean("isCookiePresent", [name,])
def create_cookie(self,nameValuePair,optionsString):
"""
Create a new cookie whose path and domain are same with those of current page
under test, unless you specified a path for this cookie explicitly.
'nameValuePair' is name and value of the cookie in a format "name=value"
'optionsString' is options for the cookie. Currently supported options include 'path', 'max_age' and 'domain'. the optionsString's format is "path=/path/, max_age=60, domain=.foo.com". The order of options are irrelevant, the unit of the value of 'max_age' is second. Note that specifying a domain that isn't a subset of the current domain will usually fail.
"""
self.do_command("createCookie", [nameValuePair,optionsString,])
def delete_cookie(self,name,optionsString):
"""
Delete a named cookie with specified path and domain. Be careful; to delete a cookie, you
need to delete it using the exact same path and domain that were used to create the cookie.
If the path is wrong, or the domain is wrong, the cookie simply won't be deleted. Also
note that specifying a domain that isn't a subset of the current domain will usually fail.
Since there's no way to discover at runtime the original path and domain of a given cookie,
we've added an option called 'recurse' to try all sub-domains of the current domain with
all paths that are a subset of the current path. Beware; this option can be slow. In
big-O notation, it operates in O(n\*m) time, where n is the number of dots in the domain
name and m is the number of slashes in the path.
'name' is the name of the cookie to be deleted
'optionsString' is options for the cookie. Currently supported options include 'path', 'domain' and 'recurse.' The optionsString's format is "path=/path/, domain=.foo.com, recurse=true". The order of options are irrelevant. Note that specifying a domain that isn't a subset of the current domain will usually fail.
"""
self.do_command("deleteCookie", [name,optionsString,])
def delete_all_visible_cookies(self):
"""
Calls deleteCookie with recurse=true on all cookies visible to the current page.
As noted on the documentation for deleteCookie, recurse=true can be much slower
than simply deleting the cookies using a known domain/path.
"""
self.do_command("deleteAllVisibleCookies", [])
def set_browser_log_level(self,logLevel):
"""
Sets the threshold for browser-side logging messages; log messages beneath this threshold will be discarded.
Valid logLevel strings are: "debug", "info", "warn", "error" or "off".
To see the browser logs, you need to
either show the log window in GUI mode, or enable browser-side logging in Selenium RC.
'logLevel' is one of the following: "debug", "info", "warn", "error" or "off"
"""
self.do_command("setBrowserLogLevel", [logLevel,])
def run_script(self,script):
"""
Creates a new "script" tag in the body of the current test window, and
adds the specified text into the body of the command. Scripts run in
this way can often be debugged more easily than scripts executed using
Selenium's "getEval" command. Beware that JS exceptions thrown in these script
tags aren't managed by Selenium, so you should probably wrap your script
in try/catch blocks if there is any chance that the script will throw
an exception.
'script' is the JavaScript snippet to run
"""
self.do_command("runScript", [script,])
def add_location_strategy(self,strategyName,functionDefinition):
"""
Defines a new function for Selenium to locate elements on the page.
For example,
if you define the strategy "foo", and someone runs click("foo=blah"), we'll
run your function, passing you the string "blah", and click on the element
that your function
returns, or throw an "Element not found" error if your function returns null.
We'll pass three arguments to your function:
* locator: the string the user passed in
* inWindow: the currently selected window
* inDocument: the currently selected document
The function must return null if the element can't be found.
'strategyName' is the name of the strategy to define; this should use only letters [a-zA-Z] with no spaces or other punctuation.
'functionDefinition' is a string defining the body of a function in JavaScript. For example: ``return inDocument.getElementById(locator);``
"""
self.do_command("addLocationStrategy", [strategyName,functionDefinition,])
def capture_entire_page_screenshot(self,filename):
"""
Saves the entire contents of the current window canvas to a PNG file.
Currently this only works in Mozilla and when running in chrome mode.
Contrast this with the captureScreenshot command, which captures the
contents of the OS viewport (i.e. whatever is currently being displayed
on the monitor), and is implemented in the RC only. Implementation
mostly borrowed from the Screengrab! Firefox extension. Please see
http://www.screengrab.org for details.
'filename' is the path to the file to persist the screenshot as. No filename extension will be appended by default. Directories will not be created if they do not exist, and an exception will be thrown, possibly by native code.
"""
self.do_command("captureEntirePageScreenshot", [filename,])
def set_context(self,context):
"""
Writes a message to the status bar and adds a note to the browser-side
log.
'context' is the message to be sent to the browser
"""
self.do_command("setContext", [context,])
def attach_file(self,fieldLocator,fileLocator):
"""
Sets a file input (upload) field to the file listed in fileLocator
'fieldLocator' is an element locator
'fileLocator' is a URL pointing to the specified file. Before the file can be set in the input field (fieldLocator), Selenium RC may need to transfer the file to the local machine before attaching the file in a web page form. This is common in selenium grid configurations where the RC server driving the browser is not the same machine that started the test. Supported Browsers: Firefox ("\*chrome") only.
"""
self.do_command("attachFile", [fieldLocator,fileLocator,])
def capture_screenshot(self,filename):
"""
Captures a PNG screenshot to the specified file.
'filename' is the absolute path to the file to be written, e.g. "c:\blah\screenshot.png"
"""
self.do_command("captureScreenshot", [filename,])
def shut_down_selenium_server(self):
"""
Kills the running Selenium Server and all browser sessions. After you run this command, you will no longer be able to send
commands to the server; you can't remotely start the server once it has been stopped. Normally
you should prefer to run the "stop" command, which terminates the current browser session, rather than
shutting down the entire server.
"""
self.do_command("shutDownSeleniumServer", [])
def key_down_native(self,keycode):
"""
Simulates a user pressing a key (without releasing it yet) by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyDownNative", [keycode,])
def key_up_native(self,keycode):
"""
Simulates a user releasing a key by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyUpNative", [keycode,])
def key_press_native(self,keycode):
"""
Simulates a user pressing and releasing a key by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyPressNative", [keycode,])
| gpl-2.0 |
lintzc/gpdb | src/test/tinc/tincrepo/mpp/models/sql_performance_tc.py | 9 | 20308 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import unittest2 as unittest
import tinctest
from tinctest.runner import TINCTextTestResult
from tinctest.lib import Gpdiff
from mpp.models import SQLTestCase
from mpp.lib.PSQL import PSQL
@tinctest.skipLoading("Test model. No tests loaded.")
class SQLPerformanceTestCase(SQLTestCase):
"""
@metadata: repetitions: number of times the sql should be executed (default: 3)
@metadata: threshold: if the current performance of the query is worse than baseline by this much
percentage, the test is marked failed (default: 5)
@metadata: timeout: number of seconds to wait for the query to complete. When timeout is reached,
the query is terminated. Value of 0 means no timeout. (default: 0)
@metadata: drop_caches: whether to drop system cache and restart cluster before running each query (default: True)
@metadata: explain: whether to gather explain plan details for each query (default: True)
@metadata: plandiff: tdb
"""
def __init__(self, methodName, baseline_result = None, sql_file=None, db_name = None):
self.repetitions = None
self.threshold = None
self.timeout = None
self.plandiff = True
self.doexplain = True
self.drop_caches = True
self._runtime = -1.0
self._result_string = None
# Add field to store plan body
self._plan_body = ''
# Switch to control whether to avoid run test or not
self._avoid_execution = False;
# Add dict to store optimization times
self._optimization_time = {};
# Add string to store explain analyze output
self._ea_output = ''
super(SQLPerformanceTestCase, self).__init__(methodName, baseline_result, sql_file, db_name)
self.gucs.add('statement_timeout='+str(self.timeout))
def _infer_metadata(self):
super(SQLPerformanceTestCase, self)._infer_metadata()
self.repetitions = int(self._metadata.get('repetitions', '3'))
self.threshold = int(self._metadata.get('threshold', '5'))
self.timeout = int(self._metadata.get('timeout', '0')) # 0 means unbounded by default.
if self._metadata.get('drop_caches', 'True') == 'False':
self.drop_caches = False
if self._metadata.get('explain', 'True') == 'False':
self.doexplain = False
if self._metadata.get('plandiff', 'True') == 'False':
self.plandiff = False
def setUp(self):
# Setup the database by calling out to the super class
tinctest.logger.trace_in()
super(SQLPerformanceTestCase, self).setUp()
#Collect explain output and then compare with that of the last run
if self.doexplain:
self._compare_previous_plan()
tinctest.logger.trace_out()
def _compare_previous_plan(self):
"""
Get plan first and then compare with that of the previous run. If nothing change in the plan structure,
there is no need to re-execute that query. The result will be copied from the previous run.
"""
#execute the explain sql to fetch plan
explain_sql_file = os.path.join(self.get_out_dir(), os.path.basename(self.sql_file).replace('.sql','_explain.sql'))
with open(explain_sql_file, 'w') as o:
with open(self.sql_file, 'r') as f:
explain_write = False
for line in f:
if not line.startswith('--') and not explain_write:
#keep all the GUCs
o.write('-- start_ignore\n')
for guc_string in self.gucs:
o.write("SET %s;" %guc_string)
o.write(line)
for orca_guc_string in self.orcagucs:
o.write("%s;\n"%orca_guc_string)
# Add gucs to print optimization time to log
o.write("SET optimizer_print_optimization_stats=on;\n")
o.write("SET client_min_messages='log';\n")
o.write("SELECT gp_opt_version();\n")
o.write("SELECT current_timestamp;\n")
o.write('-- end_ignore\n')
o.write('explain %s' %line)
explain_write = True
else:
o.write(line);
explain_out_file = os.path.join(self.get_out_dir(), os.path.basename(explain_sql_file).replace('.sql','.out'))
tinctest.logger.info("Gathering explain from sql : " + explain_sql_file)
PSQL.run_sql_file(explain_sql_file, dbname = self.db_name, out_file = explain_out_file)
# rewrite plan to keep plan body
self._rewrite_plan_file(explain_out_file)
# retrieve previous plan and store it into local file
if self.baseline_result and self.plandiff:
if self.baseline_result.result_detail:
if 'plan_body' in self.baseline_result.result_detail.keys():
previous_explain_output = self.baseline_result.result_detail['plan_body']
previous_explain_output_file = explain_out_file.replace('.out','_previous.out')
with open(previous_explain_output_file, 'w') as o:
o.write(previous_explain_output)
# call GPDiff to compare two plans
if Gpdiff.are_files_equal(previous_explain_output_file, explain_out_file):
# two plans are the same, avoid execution
self._avoid_execution = True
self._runtime = self.baseline_result.value # copy the runtime from previous result
self._result_string = self.baseline_result.result_string
# comment it out as we are experiencing some problems during parse.
if 'explain_analyze' in self.baseline_result.result_detail.keys():
tmp_ea = self.baseline_result.result_detail['explain_analyze']
self._ea_output = tmp_ea.replace('\\','')
if len(self._ea_output) == 0: # if there is no previous explain analyze output, generate it
self._generate_explain_analyze_output()
else:
self._generate_explain_analyze_output()
def _generate_explain_analyze_output(self):
"""
execute explain analyze output for a given query
"""
ea_sql_file = os.path.join(self.get_out_dir(), os.path.basename(self.sql_file).replace('.sql','_explain_analyze.sql'))
with open(ea_sql_file, 'w') as o:
with open(self.sql_file, 'r') as f:
explain_write = False
for line in f:
if not line.startswith('--') and not explain_write:
#keep all the GUCs
o.write('-- start_ignore\n')
for guc_string in self.gucs:
o.write("SET %s;" %guc_string)
o.write(line)
for orca_guc_string in self.orcagucs:
o.write("%s;\n"%orca_guc_string)
# Add gucs to print optimization time to log
o.write("SET optimizer_print_optimization_stats=on;\n")
o.write("SET client_min_messages='log';\n")
o.write('-- end_ignore\n')
o.write('explain analyze %s' %line)
explain_write = True
else:
o.write(line);
ea_out_file = ea_sql_file.replace('.sql','.out')
PSQL.run_sql_file(ea_sql_file, dbname = self.db_name, out_file = ea_out_file)
with open(ea_out_file, 'r') as f:
self._ea_output = f.read()
def _rewrite_plan_file(self, explain_out_file):
"""
rewrite explain output to keep only GUC info and plan body
"""
# initialize time variable
dxl_query_serialization_time = 0.0
dxl_expr_translation_time = 0.0
group_merge_time = 0.0
total_opt_time = 0.0
stats_derivation_time = 0.0
expr_dxl_translation_time = 0.0
dxl_plan_serialization_time = 0.0
guc_plan_content = ''
with open(explain_out_file, 'r') as f:
able_to_write = True
fall_back_check = False
for line in f:
# ignore the part that from '-- end_ignore' to 'QUERY PLAN'
if line.startswith('-- end_ignore'):
guc_plan_content += line
able_to_write = False
elif line.find('QUERY PLAN') != -1:
guc_plan_content += '-- force_explain\n'
able_to_write = True
if able_to_write:
guc_plan_content += line
# collect total optimization time and statistics derivation time
if line.find("Statistics Derivation Time")!=-1:
try:
stats_derivation_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
stats_derivation_time = -1.0
elif line.find("Total Optimization Time")!=-1:
try:
total_opt_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
total_opt_time = -1.0
elif line.find("DXL Query Serialization Time")!=-1:
try:
dxl_query_serialization_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
dxl_query_serialization_time = -1.0
elif line.find("DXL To Expr Translation Time")!=-1:
try:
dxl_expr_translation_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
dxl_expr_translation_time = -1.0
elif line.find("Group Merge Time")!=-1:
try:
group_merge_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
group_merge_time = -1.0
elif line.find("Expr To DXL Translation Time")!=-1:
try:
expr_dxl_translation_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
expr_dxl_translation_time = -1.0
elif line.find("DXL Plan Serialization Time")!=-1:
try:
dxl_plan_serialization_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip())
except ValueError:
dxl_plan_serialization_time = -1.0
elif line.find('Planner produced plan :0')!=-1 and fall_back_check == False:
fall_back_stats_path = os.path.join(self.get_out_dir(), 'fall_back_stats.txt')
existing = os.path.exists(fall_back_stats_path)
mode = 'a' if existing else 'w'
with open(fall_back_stats_path, mode) as f:
f.write('%s Expected fall back\n'%self.sql_file)
fall_back_check = True
elif line.find('Planner produced plan :1')!=-1 and fall_back_check == False:
fall_back_stats_path = os.path.join(self.get_out_dir(), 'fall_back_stats.txt')
existing = os.path.exists(fall_back_stats_path)
mode = 'a' if existing else 'w'
with open(fall_back_stats_path, mode) as f:
f.write('%s Unexpected fall back\n'%self.sql_file)
fall_back_check = True
self._optimization_time['total_opt_time'] = total_opt_time
self._optimization_time['statistics_time'] = stats_derivation_time
self._optimization_time['dxl_query_serialization_time'] = dxl_query_serialization_time
self._optimization_time['dxl_expr_translation_time'] = dxl_expr_translation_time
self._optimization_time['group_merge_time'] = group_merge_time
self._optimization_time['expr_dxl_translation_time'] = expr_dxl_translation_time
self._optimization_time['dxl_plan_serialization_time'] = dxl_plan_serialization_time
self._plan_body = guc_plan_content
with open(explain_out_file, 'w') as o:
o.write(guc_plan_content)
def run_test(self):
"""
The method that subclasses should override to execute a sql test case differently.
This encapsulates the execution mechanism of SQLTestCase. Given a base sql file and
an ans file, runs all the sql files for the test case.
Note that this also runs the other part sqls that make up the test case. For eg: if the
base sql is query1.sql, the part sqls are of the form query1_part*.sql in the same location
as the base sql.
"""
tinctest.logger.trace_in()
sql_file = self.sql_file
ans_file = self.ans_file
# if the plan is the same as previous one, skip this run
if self._avoid_execution:
tinctest.logger.info("Skipping test execution as there is no plan change w.r.t previous run.")
str_runtime_list = []
for i in range(self.repetitions):
str_runtime_list.append(str(self._runtime))
# dump statistics to a runtime_stats.csv file
output_file_path = os.path.join(self.get_out_dir(), 'runtime_stats.csv')
existing = os.path.exists(output_file_path)
mode = 'a' if existing else 'w'
with open(output_file_path, mode) as f:
f.write("%s,%s\n" % (os.path.basename(sql_file), ",".join(str_runtime_list)))
if self._result_string == 'FAIL' or self._result_string == 'ERROR':
tinctest.logger.trace_out("False")
return False
else:
tinctest.logger.trace_out("True")
return True
guc_sql_file = self._add_gucs_to_sql_file(sql_file)
runtime_list = []
for i in range(self.repetitions):
# refresh the caches after each iteration
if self.drop_caches:
self._restart_cluster(refresh_cache=True)
runtime_list.append(self._run_and_measure_sql_file(guc_sql_file, i, ans_file))
# dump statistics to a runtime_stats.csv file
str_runtime_list = [str(x) for x in runtime_list]
output_file_path = os.path.join(self.get_out_dir(), 'runtime_stats.csv')
existing = os.path.exists(output_file_path)
mode = 'a' if existing else 'w'
with open(output_file_path, mode) as f:
f.write("%s,%s\n" % (os.path.basename(sql_file), ",".join(str_runtime_list)))
self._runtime = min(runtime_list)
tinctest.logger.trace_out("True")
return True
def _run_and_measure_sql_file(self, sql_file, iteration, ans_file = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case databse (self.db_name) and verifies the output with the ans file.
"""
result = True
self.test_artifacts.append(sql_file)
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace(".sql","_iter_%s.out" %iteration))
self.test_artifacts.append(out_file)
PSQL.run_sql_file(sql_file, dbname = self.db_name, out_file = out_file)
if ans_file is not None:
self.test_artifacts.append(ans_file)
result = Gpdiff.are_files_equal(out_file, ans_file)
if result == False:
self.test_artifacts.append(out_file.replace('.out', '.diff'))
self.fail('Diff failed between %s and %s' %(out_file, ans_file))
return self._get_runtime(out_file)
def _get_runtime(self, out_file):
"""
Matches pattern <Time: 123.25 ms> and returns the sum of all the values found in the out file
"""
total_time = 0.0
with open(out_file, 'r') as f:
for line in f:
if re.match('^Time: \d+\.\d+ ms', line):
total_time += float(line.split()[1])
return total_time
def _add_gucs_to_sql_file(self, sql_file, gucs_sql_file=None, optimizer=None):
"""
Form test sql file by adding the defined gucs to the sql file
@param sql_file Path to the test sql file
@param gucs_sql_file Path where the guc sql file should be generated.
@param optimizer Boolean that specifies whether optimizer is on or off.
"""
if not gucs_sql_file:
gucs_sql_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file))
with open(gucs_sql_file, 'w') as o:
gucs_write = False
with open(sql_file, 'r') as f:
for line in f:
if (line.find('--') != 0) and not gucs_write:
# Add gucs and then add the line
o.write('\n-- start_ignore\n')
for guc_string in self.gucs:
o.write("SET %s;\n" %guc_string)
for orca_guc_string in self.orcagucs:
o.write("%s;\n"%orca_guc_string)
gucs_write = True
# Write optimizer mode
optimizer_mode_str = ''
if optimizer is not None:
optimizer_mode_str = 'on' if optimizer else 'off'
if optimizer_mode_str:
o.write("SET optimizer=%s;\n" %optimizer_mode_str)
if optimizer is not None and optimizer:
for guc_string in self._optimizer_gucs:
o.write("SET %s;\n" %guc_string)
o.write('\\timing on\n')
o.write('\n-- end_ignore\n')
o.write(line)
else:
o.write(line)
self.test_artifacts.append(gucs_sql_file)
return gucs_sql_file
def defaultTestResult(self, stream=None, descriptions=None, verbosity=None):
if stream and descriptions and verbosity:
return SQLPerformanceTestCaseResult(stream, descriptions, verbosity)
else:
return unittest.TestResult()
class SQLPerformanceTestCaseResult(TINCTextTestResult):
def __init__(self, stream, descriptions, verbosity):
super(SQLPerformanceTestCaseResult, self).__init__(stream, descriptions, verbosity)
def addSuccess(self, test):
# Add test._runtime to result.value
self.value = test._runtime
super(SQLPerformanceTestCaseResult, self).addSuccess(test)
def addFailure(self, test, err):
"""
Collect explain plan and an explain analyze output
"""
self.value = test._runtime
super(SQLPerformanceTestCaseResult, self).addFailure(test, err)
| apache-2.0 |
lightningwolf/Flask-JqueryUiBootstrap | flask_jqueryuibootstrap/__init__.py | 1 | 1024 | #!/usr/bin/env python
# coding=utf8
from flask import Blueprint
try:
from wtforms.fields import HiddenField
def is_hidden_field_filter(field):
return isinstance(field, HiddenField)
except ImportError:
def HiddenField():
pass
def is_hidden_field_filter(field):
raise RuntimeError('WTForms is not installed.')
class JqueryUiBootstrap(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('BOOTSTRAP_HTML5_SHIM', True)
app.config.setdefault('BOOTSTRAP_GOOGLE_ANALYTICS_ACCOUNT', None)
blueprint = Blueprint(
'jquery_ui_bootstrap',
__name__,
template_folder='templates',
static_folder='static',
static_url_path=app.static_url_path + '/jquery_ui_bootstrap')
app.register_blueprint(blueprint)
app.jinja_env.filters['bootstrap_is_hidden_field'] =\
is_hidden_field_filter
| mit |
kushview/libjuce | waflib/extras/c_emscripten.py | 55 | 2528 | #!/usr/bin/env python
# -*- coding: utf-8 vi:ts=4:noexpandtab
import subprocess, shlex, sys
from waflib.Tools import ccroot, gcc, gxx
from waflib.Configure import conf
from waflib.TaskGen import after_method, feature
from waflib.Tools.compiler_c import c_compiler
from waflib.Tools.compiler_cxx import cxx_compiler
for supported_os in ('linux', 'darwin', 'gnu', 'aix'):
c_compiler[supported_os].append('c_emscripten')
cxx_compiler[supported_os].append('c_emscripten')
@conf
def get_emscripten_version(conf, cc):
"""
Emscripten doesn't support processing '-' like clang/gcc
"""
dummy = conf.cachedir.parent.make_node("waf-emscripten.c")
dummy.write("")
cmd = cc + ['-dM', '-E', '-x', 'c', dummy.abspath()]
env = conf.env.env or None
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out = p.communicate()[0]
except Exception as e:
conf.fatal('Could not determine emscripten version %r: %s' % (cmd, e))
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'latin-1')
k = {}
out = out.splitlines()
for line in out:
lst = shlex.split(line)
if len(lst)>2:
key = lst[1]
val = lst[2]
k[key] = val
if not ('__clang__' in k and 'EMSCRIPTEN' in k):
conf.fatal('Could not determine the emscripten compiler version.')
conf.env.DEST_OS = 'generic'
conf.env.DEST_BINFMT = 'elf'
conf.env.DEST_CPU = 'asm-js'
conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__'])
return k
@conf
def find_emscripten(conf):
cc = conf.find_program(['emcc'], var='CC')
conf.get_emscripten_version(cc)
conf.env.CC = cc
conf.env.CC_NAME = 'emscripten'
cxx = conf.find_program(['em++'], var='CXX')
conf.env.CXX = cxx
conf.env.CXX_NAME = 'emscripten'
conf.find_program(['emar'], var='AR')
def configure(conf):
conf.find_emscripten()
conf.find_ar()
conf.gcc_common_flags()
conf.gxx_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
conf.env.ARFLAGS = ['rcs']
conf.env.cshlib_PATTERN = '%s.js'
conf.env.cxxshlib_PATTERN = '%s.js'
conf.env.cstlib_PATTERN = '%s.a'
conf.env.cxxstlib_PATTERN = '%s.a'
conf.env.cprogram_PATTERN = '%s.html'
conf.env.cxxprogram_PATTERN = '%s.html'
conf.env.CXX_TGT_F = ['-c', '-o', '']
conf.env.CC_TGT_F = ['-c', '-o', '']
conf.env.CXXLNK_TGT_F = ['-o', '']
conf.env.CCLNK_TGT_F = ['-o', '']
conf.env.append_value('LINKFLAGS',['-Wl,--enable-auto-import'])
| gpl-2.0 |
amenonsen/ansible | hacking/build_library/build_ansible/commands.py | 29 | 1541 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod, abstractproperty
class Command:
"""
Subcommands of :program:`build-ansible.py`.
This defines an interface that all subcommands must conform to. :program:`build-ansible.py` will
require that these things are present in order to proceed.
"""
@staticmethod
@abstractproperty
def name():
"""Name of the command. The same as the string is invoked with"""
@staticmethod
@abstractmethod
def init_parser(add_parser):
"""
Initialize and register an argparse ArgumentParser
:arg add_parser: function which creates an ArgumentParser for the main program.
Implementations should first create an ArgumentParser using `add_parser` and then populate
it with the command line arguments that are needed.
.. seealso:
`add_parser` information in the :py:meth:`ArgumentParser.add_subparsers` documentation.
"""
@staticmethod
@abstractmethod
def main(arguments):
"""
Run the command
:arg arguments: The **parsed** command line args
This is the Command's entrypoint. The command line args are already parsed but from here
on, the command can do its work.
"""
| gpl-3.0 |
douban/code | tests/pulls/test_web_ui.py | 3 | 2157 | # encoding: UTF-8
import os
from vilya.models.project import CodeDoubanProject
from vilya.models.pull import PullRequest
from tests.base import TestApp
from mock import patch
import tests
from tests.base import TestCase
from tests.utils import clone, mock_method
data_path = os.path.join(os.path.dirname(tests.__file__), 'data')
class TestWebUI(TestCase):
def create_project_and_a_fork(self):
from nose import SkipTest
raise SkipTest(
"These tests have Segmentation Fault") # guibog 20121105
orig = CodeDoubanProject.add('orig', 'origuser')
with clone(orig.git_real_path) as workdir:
with open(os.path.join(workdir, 'a'), 'w') as f:
f.write("a line of code\n")
fork = orig.fork('fork', 'forkuser')
with clone(fork.git_real_path) as workdir:
with open(os.path.join(workdir, 'b'), 'w') as f:
f.write("another line of code\n")
return orig, fork
def create_an_auto_mergable_pull_request(self, from_proj):
app = TestApp(extra_environ={'REMOTE_USER': str(from_proj.owner_id)})
res = app.get('/%s' % str(from_proj.name))
res = res.click("Pull Request")
form = res.forms[1]
form['body'] = "test"
res = form.submit()
while 300 < res.status_int < 400:
res = res.follow()
return res
def test_new_pull_request_ticket_should_be_created_in_target_project(self):
orig, fork = self.create_project_and_a_fork()
res = self.create_an_auto_mergable_pull_request(fork)
assert res.request.path == '/orig/pull/1'
@patch.object(PullRequest, 'is_auto_mergable',
mock_method(PullRequest.is_auto_mergable))
def test_check_auto_mergable_should_be_async_to_speed_up_page_response(self): # noqa
orig, fork = self.create_project_and_a_fork()
res = self.create_an_auto_mergable_pull_request(fork)
pull_url = res.request.path
app = TestApp(extra_environ={'REMOTE_USER': str(orig.owner_id)})
res = app.get(pull_url)
assert not PullRequest.is_auto_mergable.mock.called
| bsd-3-clause |
ImageEngine/gaffer | python/GafferSceneUITest/RotateToolTest.py | 6 | 17104 | ##########################################################################
#
# Copyright (c) 2017, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import imath
import IECore
import Gaffer
import GafferUITest
import GafferScene
import GafferSceneUI
class RotateToolTest( GafferUITest.TestCase ) :
def testRotate( self ) :
script = Gaffer.ScriptNode()
script["cube"] = GafferScene.Cube()
view = GafferSceneUI.SceneView()
view["in"].setInput( script["cube"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/cube" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
for i in range( 0, 6 ) :
tool.rotate( imath.Eulerf( 0, 90, 0 ) )
self.assertAlmostEqual( script["cube"]["transform"]["rotate"]["y"].getValue(), (i + 1) * 90, delta = 0.0001 )
def testInteractionWithGroupRotation( self ) :
script = Gaffer.ScriptNode()
script["cube"] = GafferScene.Cube()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["cube"]["out"] )
# Rotates the X axis onto the negative Z axis
script["group"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/cube" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
# Rotates 90 degrees using the Z handle. This will
# rotate about the X axis in world space, because the
# handle orientation has been affected by the group
# transform (because default orientation is Parent).
tool.rotate( imath.Eulerf( 0, 0, 90 ) )
# We expect this to have aligned the cube's local X axis onto
# the Y axis in world space, and the local Y axis onto the world
# Z axis.
self.assertTrue(
imath.V3f( 0, 1, 0 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * script["group"]["out"].fullTransform( "/group/cube" ),
0.000001
)
)
self.assertTrue(
imath.V3f( 0, 0, 1 ).equalWithAbsError(
imath.V3f( 0, 1, 0 ) * script["group"]["out"].fullTransform( "/group/cube" ),
0.000001
)
)
def testOrientation( self ) :
script = Gaffer.ScriptNode()
script["cube"] = GafferScene.Cube()
script["cube"]["transform"]["rotate"]["y"].setValue( 90 )
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["cube"]["out"] )
script["group"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/cube" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
# Local
tool["orientation"].setValue( tool.Orientation.Local )
with Gaffer.UndoScope( script ) :
tool.rotate( imath.Eulerf( 0, 0, 90 ) )
self.assertTrue(
imath.V3f( 0, 1, 0 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * script["group"]["out"].fullTransform( "/group/cube" ),
0.000001
)
)
script.undo()
# Parent
tool["orientation"].setValue( tool.Orientation.Parent )
with Gaffer.UndoScope( script ) :
tool.rotate( imath.Eulerf( 90, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 1, 0 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * script["group"]["out"].fullTransform( "/group/cube" ),
0.000001
)
)
script.undo()
# World
tool["orientation"].setValue( tool.Orientation.World )
with Gaffer.UndoScope( script ) :
tool.rotate( imath.Eulerf( 0, 0, 90 ) )
self.assertTrue(
imath.V3f( 0, -1, 0 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * script["group"]["out"].fullTransform( "/group/cube" ),
0.000001
)
)
def testTransformWithRotation( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["transformFilter"] = GafferScene.PathFilter()
script["transformFilter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
script["transform"] = GafferScene.Transform()
script["transform"]["in"].setInput( script["plane"]["out"] )
script["transform"]["filter"].setInput( script["transformFilter"]["out"] )
script["transform"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["transform"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
tool.rotate( imath.Eulerf( 90, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 1, 0 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * script["transform"]["out"].fullTransform( "/plane" ),
0.000001
)
)
self.assertTrue(
imath.V3f( 0, 0, 1 ).equalWithAbsError(
imath.V3f( 0, 1, 0 ) * script["transform"]["out"].fullTransform( "/plane" ),
0.000001
)
)
def testPivotAffectsHandlesTransform( self ) :
script = Gaffer.ScriptNode()
script["cube"] = GafferScene.Cube()
view = GafferSceneUI.SceneView()
view["in"].setInput( script["cube"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/cube" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
self.assertEqual( tool.handlesTransform(), imath.M44f() )
script["cube"]["transform"]["pivot"].setValue( imath.V3f( 1, 0, 0 ) )
self.assertEqual(
tool.handlesTransform(),
imath.M44f().translate(
script["cube"]["transform"]["pivot"].getValue()
)
)
script["cube"]["transform"]["translate"].setValue( imath.V3f( 1, 2, -1 ) )
self.assertEqual(
tool.handlesTransform(),
imath.M44f().translate(
script["cube"]["transform"]["pivot"].getValue() +
script["cube"]["transform"]["translate"].getValue()
)
)
def testPivotAndExistingTransform( self ) :
script = Gaffer.ScriptNode()
script["cube"] = GafferScene.Cube()
script["transformFilter"] = GafferScene.PathFilter()
script["transformFilter"]["paths"].setValue( IECore.StringVectorData( [ "/cube" ] ) )
script["transform"] = GafferScene.Transform()
script["transform"]["in"].setInput( script["cube"]["out"] )
script["transform"]["filter"].setInput( script["transformFilter"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["transform"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/cube" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
# Start with default pivot
self.assertEqual(
imath.V3f( 0 ) * tool.handlesTransform(),
imath.V3f( 0, 0, 0 ),
)
# Offset it
script["transform"]["transform"]["pivot"].setValue( imath.V3f( 1, 0, 0 ) )
self.assertEqual(
imath.V3f( 0 ) * tool.handlesTransform(),
imath.V3f( 1, 0, 0 ),
)
# Now add an existing transform on the cube, prior
# to it entering the transform node we're editing.
# The pivot's world space position should be affected
# because the Transform node is operating in Local space.
script["cube"]["transform"]["rotate"]["y"].setValue( 90 )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
imath.V3f( 0 ) * tool.handlesTransform(),
0.0000001,
)
)
# But if we edit in World space, then the existing transform
# should have no relevance.
script["transform"]["space"].setValue( script["transform"].Space.World )
self.assertEqual(
imath.V3f( 0 ) * tool.handlesTransform(),
imath.V3f( 1, 0, 0 ),
)
def testEditScopes( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["sphere"]["transform"]["translate"].setValue( imath.V3f( 1, 0, 0 ) )
script["editScope"] = Gaffer.EditScope()
script["editScope"].setup( script["sphere"]["out"] )
script["editScope"]["in"].setInput( script["sphere"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["editScope"]["out"] )
view["editScope"].setInput( script["editScope"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/sphere" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( len( tool.selection() ), 1 )
self.assertTrue( tool.selection()[0].editable() )
self.assertFalse( GafferScene.EditScopeAlgo.hasTransformEdit( script["editScope"], "/sphere" ) )
self.assertEqual( script["editScope"]["out"].transform( "/sphere" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
tool.rotate( imath.Eulerf( 0, 90, 0 ) )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( len( tool.selection() ), 1 )
self.assertTrue( tool.selection()[0].editable() )
self.assertTrue( GafferScene.EditScopeAlgo.hasTransformEdit( script["editScope"], "/sphere" ) )
self.assertEqual(
script["editScope"]["out"].transform( "/sphere" ),
imath.M44f().translate( imath.V3f( 1, 0, 0 ) ).rotate( imath.V3f( 0, math.pi / 2, 0 ) ),
)
def testInteractionWithPointConstraint( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["cube"] = GafferScene.Cube()
script["cube"]["transform"]["translate"].setValue( imath.V3f( 5, 5, 0 ) )
script["parent"] = GafferScene.Parent()
script["parent"]["in"].setInput( script["sphere"]["out"] )
script["parent"]["children"][0].setInput( script["cube"]["out"] )
script["parent"]["parent"].setValue( "/" )
script["sphereFilter"] = GafferScene.PathFilter()
script["sphereFilter"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
script["constraint"] = GafferScene.PointConstraint()
script["constraint"]["in"].setInput( script["parent"]["out"] )
script["constraint"]["filter"].setInput( script["sphereFilter"]["out"] )
script["constraint"]["target"].setValue( "/cube" )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["constraint"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/sphere" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
for orientation in ( tool.Orientation.Local, tool.Orientation.Parent, tool.Orientation.World ) :
tool["orientation"].setValue( orientation )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( script["cube"]["transform"]["translate"].getValue() ) )
def testInteractionWithParentConstraint( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["cube"] = GafferScene.Cube()
script["cube"]["transform"]["translate"].setValue( imath.V3f( 5, 5, 0 ) )
script["cube"]["transform"]["rotate"]["x"].setValue( 90 )
script["parent"] = GafferScene.Parent()
script["parent"]["in"].setInput( script["sphere"]["out"] )
script["parent"]["children"][0].setInput( script["cube"]["out"] )
script["parent"]["parent"].setValue( "/" )
script["sphereFilter"] = GafferScene.PathFilter()
script["sphereFilter"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
script["constraint"] = GafferScene.ParentConstraint()
script["constraint"]["in"].setInput( script["parent"]["out"] )
script["constraint"]["filter"].setInput( script["sphereFilter"]["out"] )
script["constraint"]["target"].setValue( "/cube" )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["constraint"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/sphere" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Parent )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( script["cube"]["transform"]["translate"].getValue() ) )
tool["orientation"].setValue( tool.Orientation.Local )
self.assertEqual( tool.handlesTransform(), script["constraint"]["out"].transform( "/sphere" ) )
tool.rotate( imath.Eulerf( 0, 90, 0 ) )
self.assertEqual( script["sphere"]["transform"]["rotate"].getValue(), imath.V3f( 0, 90, 0 ) )
def testNegativeLocalScale( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["transform"]["scale"]["z"].setValue( -10 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Local )
# We want the direction of the handles to reflect the
# flipped scale, but not its magnitude.
self.assertTrue(
tool.handlesTransform().equalWithAbsError(
imath.M44f().scale( imath.V3f( 1, 1, -1 ) ),
0.000001
)
)
# And the handles need to move the object in the right
# direction still.
with Gaffer.UndoScope( script ) :
tool.rotate( imath.Eulerf( 0, 45, 0 ) )
self.assertTrue(
script["plane"]["transform"]["rotate"].getValue().equalWithAbsError(
imath.V3f( 0, -45, 0 ),
0.0001
)
)
script.undo()
# When orientation is Parent or World, the scale should
# not be reflected in the handles.
for orientation in ( tool.Orientation.World, tool.Orientation.Parent ) :
tool["orientation"].setValue( orientation )
self.assertEqual( tool.handlesTransform(), imath.M44f() )
with Gaffer.UndoScope( script ) :
tool.rotate( imath.Eulerf( 0, 45, 0 ) )
self.assertTrue(
script["plane"]["transform"]["rotate"].getValue().equalWithAbsError(
imath.V3f( 0, 45, 0 ),
0.0001
)
)
script.undo()
def testNegativeParentScale( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["transform"]["scale"]["z"].setValue( -10 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
tool = GafferSceneUI.RotateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Local )
# When using Parent or Local orientation, we want the direction of
# the handles to reflect the flipped scale, but not its magnitude.
for orientation in ( tool.Orientation.Parent, tool.Orientation.Local ) :
tool["orientation"].setValue( orientation )
self.assertTrue(
tool.handlesTransform().equalWithAbsError(
imath.M44f().scale( imath.V3f( 1, 1, -1 ) ),
0.000001
)
)
with Gaffer.UndoScope( script ) :
tool.rotate( imath.Eulerf( 0, 45, 0 ) )
self.assertTrue(
script["plane"]["transform"]["rotate"].getValue().equalWithAbsError(
imath.V3f( 0, 45, 0 ),
0.0001
)
)
script.undo()
# When orientation is World, the scale should
# not be reflected in the handles.
tool["orientation"].setValue( tool.Orientation.World )
self.assertEqual( tool.handlesTransform(), imath.M44f() )
tool.rotate( imath.Eulerf( 0, 45, 0 ) )
self.assertTrue(
script["plane"]["transform"]["rotate"].getValue().equalWithAbsError(
imath.V3f( 0, -45, 0 ),
0.0001
)
)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
akshitsoota/Chicago-Crime-Data | dataprocessor.py | 1 | 6412 | import shutil, json, time, sys, csv, os
# PROGRESS BAR STUFF
# Reference: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
if filled_len == bar_len:
bar = '=' * filled_len
else:
bar = '=' * (filled_len - 1) + '>' + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ... %s\r' % (bar, percents, '%', status))
sys.stdout.flush() # As suggested by Rom Ruben (see: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113#comment50529068_27871113)
# CRIME DATA PROCESSOR
# User controlled variables
file_name = "/home/axe/Downloads/Crimes_-_2001_to_present.csv"
# Processor code
extract = {"desc_month_year": [['Primary Type', []],
['Date', [(0, 2), (6, 10)]]]}
flex = {"_condensed": ["ID", "Date", "Primary Type", "Beat"]}
conditions = {}
debug_break_point_processing = None
debug_run_count_check_on_folders = True
# Some pre-processing
if len(extract) == 0:
print "Must have some information to extract"
sys.exit(0)
if len(flex) == 0:
flex = {"": []}
if len(conditions) == 0:
conditions = {"_": lambda _, __: True}
# Count the number of records
num_records = -1
with open(file_name, "r") as crime_file:
progress(0, 100, "Counting number of Chicago Crime Records since 2001...")
num_records = len(crime_file.readlines()) - 1
progress(100, 100, "Counting number of Chicago Crime Records since 2001...")
print "\r"
print "Number of records found: %d" % num_records
progress(0, num_records, "Processing Chicago Crime Data since 2001... (%d of %d)" %
(0, (num_records * (len(extract) * len(flex) * len(conditions)))))
# Clean up the directories and initialize them too
for folder_name in extract:
shutil.rmtree(folder_name, ignore_errors=True)
os.makedirs(folder_name)
# Now, go over each record
progress_count, total_count = 0, num_records * len(extract) * len(flex) * len(conditions)
files_created = {}
# Open each file based on extract and flex
for class_name, classification_criteria in extract.iteritems():
for file_extension, required_fields in flex.iteritems():
with open(file_name, "r") as crime_file:
# Open the CSV and start processing each row
crime_csv_parser = csv.reader(crime_file,
delimiter=',',
quotechar='"',
lineterminator='\n')
header = []
for counter, row in enumerate(crime_csv_parser):
# Catch header if we haven't got it
if header == []:
header.extend(row)
continue
# Check that this row meets all the conditions
meet_all_conditions = True
for _, condition_func in conditions.iteritems():
meet_all_conditions = meet_all_conditions and condition_func(class_name, row)
if not meet_all_conditions:
# Skip this iteration but update the progress
progress_count = progress_count + 1
progress(progress_count, total_count, "Processing Chicago Crime Data since 2001... (%d of %d)" % (progress_count, total_count))
# For each criteria, form the file name
dest_file_name = []
for criteria_item in classification_criteria:
column_name, split_points = criteria_item
# Start extracting the information
field_item = row[header.index(column_name)]
new_field = field_item
if len(split_points) != 0:
new_field = "_".join([field_item[start:end] for start, end in split_points])
new_field = new_field.replace("/", "_").replace(" ", "_")
# Add to the dest_file_name
dest_file_name.append(new_field)
# Make the row into a dictionary
neat_row = {}
if required_fields == []:
# Get them all
neat_row = {header[idx]: row[idx] for idx in range(len(header))}
else:
# Filter
neat_row = {header[idx]: row[idx] for idx in range(len(header)) if header[idx] in required_fields}
# Form the file name and check in hashmap and add as necessary
dest_file_name = "_".join(dest_file_name)
dest_file_name = os.path.join(class_name, dest_file_name + file_extension + '.json')
if dest_file_name in files_created:
with open(dest_file_name, "a") as write_to:
write_to.write(",%s" % (json.dumps(neat_row)))
else:
with open(dest_file_name, "w") as write_to:
write_to.write("[%s" % (json.dumps(neat_row)))
files_created[dest_file_name] = True
# Print progress
progress_count = progress_count + 1
progress(progress_count, total_count, "Processing Chicago Crime Data since 2001... (%d of %d)" % (progress_count, total_count))
# Check for break point
if debug_break_point_processing is not None and \
counter >= debug_break_point_processing: break
# Close the list parenthesis for each of the files
for file_name in files_created:
with open(file_name, "a") as write_to:
write_to.write("]")
# Run checks if enabled
if debug_run_count_check_on_folders == True:
print "\r"
# Check if assertion can be done because of the conditions
if len(conditions) > 1 or "_" not in conditions:
print "Unable to assert count because of conditions being placed on the output objects"
sys.exit(-1)
# Assertion can be done
progress(0, len(files_created), "Asserting folder and file object sum integrity (0 of %d)" % len(files_created))
progress_count = 0
# Classify
folder_file_mapping = {}
for file_name in files_created:
folder, file_name = file_name.split("/")
if folder in folder_file_mapping:
folder_file_mapping[folder].append(file_name)
else:
folder_file_mapping[folder] = [file_name]
# Now, sum and check
folder_count_mapping = {folder: 0 for folder in folder_file_mapping}
for folder_name, file_names in folder_file_mapping.iteritems():
for file_name in file_names:
with open(os.path.join(folder_name, file_name), "r") as file_to_count_in:
folder_count_mapping[folder_name] += len(json.load(file_to_count_in))
# Increment progress
progress_count = progress_count + 1
progress(progress_count, len(files_created), "Asserting folder and file object sum integrity (%d of %d)" % (progress_count, len(files_created)))
# Run assert
try:
assert folder_count_mapping[folder_name] == num_records
except AssertionError:
print "\r\nAssertion failed on folder %s with files" % folder_name
print file_names
sys.exit(-1)
# Print and exit
print "\r"
| mit |
eduNEXT/edx-platform | lms/djangoapps/grades/course_grade.py | 3 | 13380 | """
CourseGrade Class
"""
from abc import abstractmethod
from collections import OrderedDict, defaultdict
from ccx_keys.locator import CCXLocator
from django.conf import settings
from lazy import lazy
from openedx.core.lib.grade_utils import round_away_from_zero
from xmodule import block_metadata_utils
from .config import assume_zero_if_absent
from .scores import compute_percent
from .subsection_grade import ZeroSubsectionGrade
from .subsection_grade_factory import SubsectionGradeFactory
class CourseGradeBase:
"""
Base class for Course Grades.
"""
def __init__(self, user, course_data, percent=0.0, letter_grade=None, passed=False, force_update_subsections=False):
self.user = user
self.course_data = course_data
self.percent = percent
self.passed = passed
# Convert empty strings to None when reading from the table
self.letter_grade = letter_grade or None
self.force_update_subsections = force_update_subsections
def __str__(self):
return 'Course Grade: percent: {}, letter_grade: {}, passed: {}'.format(
str(self.percent),
self.letter_grade,
self.passed,
)
@property
def attempted(self):
"""
Returns whether at least one problem was attempted
by the user in the course.
"""
return False
def subsection_grade(self, subsection_key):
"""
Returns the subsection grade for the given subsection usage key.
Raises `KeyError` if the course structure does not contain the key.
If the course structure contains the key, this will always succeed
(and return a grade) regardless of whether the user can access that section;
it is up to the caller to ensure that the grade isn't
shown to users that shouldn't be able to access it
(e.g. a student shouldn't see a grade for an unreleased subsection);
"""
# look in the user structure first and fallback to the collected;
# however, we assume the state of course_data is intentional,
# so we use effective_structure to avoid additional fetching
subsection = (
self.course_data.effective_structure[subsection_key]
if subsection_key in self.course_data.effective_structure
else self.course_data.collected_structure[subsection_key]
)
return self._get_subsection_grade(subsection)
@lazy
def graded_subsections_by_format(self):
"""
Returns grades for the subsections in the course in
a dict keyed by subsection format types.
"""
subsections_by_format = defaultdict(OrderedDict)
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
if subsection_grade.graded:
graded_total = subsection_grade.graded_total
if graded_total.possible > 0:
subsections_by_format[subsection_grade.format][subsection_grade.location] = subsection_grade
return subsections_by_format
@lazy
def chapter_grades(self):
"""
Returns a dictionary of dictionaries.
The primary dictionary is keyed by the chapter's usage_key.
The secondary dictionary contains the chapter's
subsection grades, display name, and url name.
"""
course_structure = self.course_data.structure
grades = OrderedDict()
for chapter_key in course_structure.get_children(self.course_data.location):
grades[chapter_key] = self._get_chapter_grade_info(course_structure[chapter_key], course_structure)
return grades
@lazy
def subsection_grades(self):
"""
Returns an ordered dictionary of subsection grades,
keyed by subsection location.
"""
subsection_grades = defaultdict(OrderedDict)
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
subsection_grades[subsection_grade.location] = subsection_grade
return subsection_grades
@lazy
def problem_scores(self):
"""
Returns a dict of problem scores keyed by their locations.
"""
problem_scores = {}
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
problem_scores.update(subsection_grade.problem_scores)
return problem_scores
def chapter_percentage(self, chapter_key):
"""
Returns the rounded aggregate weighted percentage for the given chapter.
Raises:
KeyError if the chapter is not found.
"""
earned, possible = 0.0, 0.0
chapter_grade = self.chapter_grades[chapter_key]
for section in chapter_grade['sections']:
earned += section.graded_total.earned
possible += section.graded_total.possible
return compute_percent(earned, possible)
def score_for_module(self, location):
"""
Calculate the aggregate weighted score for any location in the course.
This method returns a tuple containing (earned_score, possible_score).
If the location is of 'problem' type, this method will return the
possible and earned scores for that problem. If the location refers to a
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.problem_scores:
score = self.problem_scores[location]
return score.earned, score.possible
children = self.course_data.structure.get_children(location)
earned, possible = 0.0, 0.0
for child in children:
child_earned, child_possible = self.score_for_module(child)
earned += child_earned
possible += child_possible
return earned, possible
@lazy
def grader_result(self):
"""
Returns the result from the course grader.
"""
course = self._prep_course_for_grading(self.course_data.course)
return course.grader.grade(
self.graded_subsections_by_format,
generate_random_scores=settings.GENERATE_PROFILE_SCORES,
)
@property
def summary(self):
"""
Returns the grade summary as calculated by the course's grader.
DEPRECATED: To be removed as part of TNL-5291.
"""
# TODO(TNL-5291) Remove usages of this deprecated property.
grade_summary = self.grader_result
grade_summary['percent'] = self.percent
grade_summary['grade'] = self.letter_grade
return grade_summary
@classmethod
def get_subsection_type_graders(cls, course):
"""
Returns a dictionary mapping subsection types to their
corresponding configured graders, per grading policy.
"""
course = cls._prep_course_for_grading(course)
return {
subsection_type: subsection_type_grader
for (subsection_type_grader, subsection_type, _)
in course.grader.subgraders
}
@classmethod
def _prep_course_for_grading(cls, course):
"""
Make sure any overrides to the grading policy are used.
This is most relevant for CCX courses.
Right now, we still access the grading policy from the course
object. Once we get the grading policy from the BlockStructure
this will no longer be needed - since BlockStructure correctly
retrieves/uses all field overrides.
"""
if isinstance(course.id, CCXLocator):
# clean out any field values that may have been set from the
# parent course of the CCX course.
course._field_data_cache = {} # pylint: disable=protected-access
# this is "magic" code that automatically retrieves any overrides
# to the grading policy and updates the course object.
course.set_grading_policy(course.grading_policy)
return course
def _get_chapter_grade_info(self, chapter, course_structure):
"""
Helper that returns a dictionary of chapter grade information.
"""
chapter_subsection_grades = self._get_subsection_grades(course_structure, chapter.location)
return {
'display_name': block_metadata_utils.display_name_with_default(chapter),
'url_name': block_metadata_utils.url_name_for_block(chapter),
'sections': chapter_subsection_grades,
}
def _get_subsection_grades(self, course_structure, chapter_key):
"""
Returns a list of subsection grades for the given chapter.
"""
return [
self._get_subsection_grade(course_structure[subsection_key], self.force_update_subsections)
for subsection_key in _uniqueify_and_keep_order(course_structure.get_children(chapter_key))
]
@abstractmethod
def _get_subsection_grade(self, subsection, force_update_subsections=False):
"""
Abstract method to be implemented by subclasses for returning
the grade of the given subsection.
"""
raise NotImplementedError
class ZeroCourseGrade(CourseGradeBase):
"""
Course Grade class for Zero-value grades when no problems were
attempted in the course.
"""
def _get_subsection_grade(self, subsection, force_update_subsections=False):
return ZeroSubsectionGrade(subsection, self.course_data)
class CourseGrade(CourseGradeBase):
"""
Course Grade class when grades are updated or read from storage.
"""
def __init__(self, user, course_data, *args, **kwargs):
super().__init__(user, course_data, *args, **kwargs)
self._subsection_grade_factory = SubsectionGradeFactory(user, course_data=course_data)
def update(self):
"""
Updates the grade for the course. Also updates subsection grades
if self.force_update_subsections is true, via the lazy call
to self.grader_result.
"""
# TODO update this code to be more functional and readable.
# Currently, it is hard to follow since there are plenty of
# side-effects. Once functional, force_update_subsections
# can be passed through and not confusingly stored and used
# at a later time.
grade_cutoffs = self.course_data.course.grade_cutoffs
self.percent = self._compute_percent(self.grader_result)
self.letter_grade = self._compute_letter_grade(grade_cutoffs, self.percent)
self.passed = self._compute_passed(grade_cutoffs, self.percent)
return self
@lazy
def attempted(self): # lint-amnesty, pylint: disable=invalid-overridden-method
"""
Returns whether any of the subsections in this course
have been attempted by the student.
"""
if assume_zero_if_absent(self.course_data.course_key):
return True
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
if subsection_grade.all_total.first_attempted:
return True
return False
def _get_subsection_grade(self, subsection, force_update_subsections=False):
if self.force_update_subsections:
return self._subsection_grade_factory.update(subsection, force_update_subsections=force_update_subsections)
else:
# Pass read_only here so the subsection grades can be persisted in bulk at the end.
return self._subsection_grade_factory.create(subsection, read_only=True)
@staticmethod
def _compute_percent(grader_result):
"""
Computes and returns the grade percentage from the given
result from the grader.
"""
# Confused about the addition of .05 here? See https://openedx.atlassian.net/browse/TNL-6972
return round_away_from_zero(grader_result['percent'] * 100 + 0.05) / 100
@staticmethod
def _compute_letter_grade(grade_cutoffs, percent):
"""
Computes and returns the course letter grade given the
inputs, as defined in the grading_policy (e.g. 'A' 'B' 'C')
or None if not passed.
"""
letter_grade = None
# Possible grades, sorted in descending order of score
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
for possible_grade in descending_grades:
if percent >= grade_cutoffs[possible_grade]:
letter_grade = possible_grade
break
return letter_grade
@staticmethod
def _compute_passed(grade_cutoffs, percent):
"""
Computes and returns whether the given percent value
is a passing grade according to the given grade cutoffs.
"""
nonzero_cutoffs = [cutoff for cutoff in grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
return success_cutoff and percent >= success_cutoff
def _uniqueify_and_keep_order(iterable):
return list(OrderedDict([(item, None) for item in iterable]).keys())
| agpl-3.0 |
googleinterns/schemaorg-generator | protogenerator/utils/constants.py | 1 | 2080 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rdflib
proto_primitives = {
'double',
'float',
'int32',
'int64',
'uint32',
'uint64',
'sint32',
'sint64',
'fixed32',
'fixed64',
'sfixed32',
'sfixed64',
'bool',
'string',
'bytes'}
schema_primitives = {
'Text': 'string',
'Number': 'double',
'Boolean': 'bool',
'Integer': 'int64',
'Float': 'double',
'URL': 'string',
}
schema_datatypes = {
'Date',
'DateTime',
'Time',
'DataType',
'Duration',
'Distance',
'Energy',
'Mass'}
schema_constants = {
'Type': rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
'Class': rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#Class'),
'Property': rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#Property'),
'rangeIncludes': rdflib.URIRef('http://schema.org/rangeIncludes'),
'domainIncludes': rdflib.URIRef('http://schema.org/domainIncludes'),
'subClassOf': rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#subClassOf'),
'Enumeration': rdflib.URIRef('http://schema.org/Enumeration'),
'Thing': rdflib.URIRef('http://schema.org/Thing'),
'Number': rdflib.URIRef('http://schema.org/Number'),
'Integer': rdflib.URIRef('http://schema.org/Integer'),
'Float': rdflib.URIRef('http://schema.org/Float'),
'Text': rdflib.URIRef('http://schema.org/Text'),
'URL': rdflib.URIRef('http://schema.org/URL'),
'Comment': rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#comment')
}
| apache-2.0 |
nephomaniac/eutester | testcases/cloud_admin/3-2/Euca5033.py | 6 | 2599 | '''
Created on Oct 9, 2012
@author: mmunn
Unit test : EUCA-5033 Cannot detach root EBS volume from stopped instance
This test assumes you have created and registered an ebs-image
you can use:
create_bfebs_img_test.py
--url http://192.168.7.65/bfebs-image/vmware/bfebs_vmwaretools.img
--config ../../cloud_admin/cloud.conf
-p foobar
setUp : creates tester and credentials
test : runs euca-detach-volume and euca-attach-volume on "stopped" EBS instance
tearDown : terminate instance remove credentials
cloud.conf:( put in same directory as this testcase )
IP_ADDRESS CENTOS 6.3 64 BZR [CC00 CLC SC00 WS]
IP_ADDRESS CENTOS 6.3 64 BZR [NC00]
'''
import unittest
import shutil
from eucaops import Eucaops
class Euca5033(unittest.TestCase):
def setUp(self):
self.conf = "cloud.conf"
self.device = "/dev/sda1"
self.tester = Eucaops( config_file=self.conf, password="foobar" )
self.doAuth()
def tearDown(self):
self.tester.cleanup_artifacts()
self.tester.delete_keypair(self.keypair)
self.tester.local("rm " + self.keypair.name + ".pem")
shutil.rmtree(self.tester.credpath)
def doAuth(self):
self.keypair = self.tester.add_keypair()
self.group = self.tester.add_group()
self.tester.authorize_group(self.group)
def testEBS(self):
# Get the existing EBS emi
self.emi = self.tester.get_emi(root_device_type='ebs')
# Start instance
self.reservation = self.tester.run_instance(self.emi, keypair=self.keypair.name, group=self.group, is_reachable=False)
# Make sure the instance is running set instance variables
for instance in self.reservation.instances:
if instance.state == "running":
self.instance = instance
self.zone = instance.placement
# Run test
self.rootVolume = self.tester.get_volume(attached_dev=self.device)
self.tester.stop_instances(self.reservation)
# EBS Instance now in stopped state, try and detach/attach root volume.
self.tester.detach_volume(self.rootVolume)
self.tester.attach_volume(self.instance, self.rootVolume, self.device )
# Make sure instance will start.
self.tester.start_instances(self.reservation)
pass
if __name__ == "__main__":
unittest.main("Euca5033")
| bsd-2-clause |
ashhher3/invenio | modules/bibformat/lib/elements/bfe_appears_in_collections.py | 35 | 1840 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints publisher name
"""
__revision__ = "$Id$"
from invenio.search_engine import get_all_collections_of_a_record, \
create_navtrail_links
def format_element(bfo, separator="<br />"):
"""Prints the list of collections the record belongs to.
@param separator: a separator between each collection link.
"""
coll_names = get_all_collections_of_a_record(bfo.recID)
navtrails = [create_navtrail_links(coll_name, ln=bfo.lang) for coll_name in coll_names]
navtrails = [navtrail for navtrail in navtrails if navtrail]
navtrails.sort(lambda x, y: cmp(len(y), len(x)))
final_navtrails = []
for navtrail in navtrails:
for final_navtrail in final_navtrails:
if navtrail in final_navtrail:
break
else:
final_navtrails.append(navtrail)
return separator.join(final_navtrails)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
IsaacHaze/tweepy | tweepy/cache.py | 64 | 12909 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
import time
import datetime
import threading
import os
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import hashlib
except ImportError:
# python 2.4
import md5 as hashlib
try:
import fcntl
except ImportError:
# Probably on a windows system
# TODO: use win32file
pass
class Cache(object):
"""Cache interface"""
def __init__(self, timeout=60):
"""Initialize the cache
timeout: number of seconds to keep a cached entry
"""
self.timeout = timeout
def store(self, key, value):
"""Add new record to cache
key: entry key
value: data of entry
"""
raise NotImplementedError
def get(self, key, timeout=None):
"""Get cached entry if exists and not expired
key: which entry to get
timeout: override timeout with this value [optional]
"""
raise NotImplementedError
def count(self):
"""Get count of entries currently stored in cache"""
raise NotImplementedError
def cleanup(self):
"""Delete any expired entries in cache."""
raise NotImplementedError
def flush(self):
"""Delete all cached entries"""
raise NotImplementedError
class MemoryCache(Cache):
"""In-memory cache"""
def __init__(self, timeout=60):
Cache.__init__(self, timeout)
self._entries = {}
self.lock = threading.Lock()
def __getstate__(self):
# pickle
return {'entries': self._entries, 'timeout': self.timeout}
def __setstate__(self, state):
# unpickle
self.lock = threading.Lock()
self._entries = state['entries']
self.timeout = state['timeout']
def _is_expired(self, entry, timeout):
return timeout > 0 and (time.time() - entry[0]) >= timeout
def store(self, key, value):
self.lock.acquire()
self._entries[key] = (time.time(), value)
self.lock.release()
def get(self, key, timeout=None):
self.lock.acquire()
try:
# check to see if we have this key
entry = self._entries.get(key)
if not entry:
# no hit, return nothing
return None
# use provided timeout in arguments if provided
# otherwise use the one provided during init.
if timeout is None:
timeout = self.timeout
# make sure entry is not expired
if self._is_expired(entry, timeout):
# entry expired, delete and return nothing
del self._entries[key]
return None
# entry found and not expired, return it
return entry[1]
finally:
self.lock.release()
def count(self):
return len(self._entries)
def cleanup(self):
self.lock.acquire()
try:
for k, v in dict(self._entries).items():
if self._is_expired(v, self.timeout):
del self._entries[k]
finally:
self.lock.release()
def flush(self):
self.lock.acquire()
self._entries.clear()
self.lock.release()
class FileCache(Cache):
"""File-based cache"""
# locks used to make cache thread-safe
cache_locks = {}
def __init__(self, cache_dir, timeout=60):
Cache.__init__(self, timeout)
if os.path.exists(cache_dir) is False:
os.mkdir(cache_dir)
self.cache_dir = cache_dir
if cache_dir in FileCache.cache_locks:
self.lock = FileCache.cache_locks[cache_dir]
else:
self.lock = threading.Lock()
FileCache.cache_locks[cache_dir] = self.lock
if os.name == 'posix':
self._lock_file = self._lock_file_posix
self._unlock_file = self._unlock_file_posix
elif os.name == 'nt':
self._lock_file = self._lock_file_win32
self._unlock_file = self._unlock_file_win32
else:
print('Warning! FileCache locking not supported on this system!')
self._lock_file = self._lock_file_dummy
self._unlock_file = self._unlock_file_dummy
def _get_path(self, key):
md5 = hashlib.md5()
md5.update(key.encode('utf-8'))
return os.path.join(self.cache_dir, md5.hexdigest())
def _lock_file_dummy(self, path, exclusive=True):
return None
def _unlock_file_dummy(self, lock):
return
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def _unlock_file_posix(self, lock):
lock.close()
def _lock_file_win32(self, path, exclusive=True):
# TODO: implement
return None
def _unlock_file_win32(self, lock):
# TODO: implement
return
def _delete_file(self, path):
os.remove(path)
if os.path.exists(path + '.lock'):
os.remove(path + '.lock')
def store(self, key, value):
path = self._get_path(key)
self.lock.acquire()
try:
# acquire lock and open file
f_lock = self._lock_file(path)
datafile = open(path, 'wb')
# write data
pickle.dump((time.time(), value), datafile)
# close and unlock file
datafile.close()
self._unlock_file(f_lock)
finally:
self.lock.release()
def get(self, key, timeout=None):
return self._get(self._get_path(key), timeout)
def _get(self, path, timeout):
if os.path.exists(path) is False:
# no record
return None
self.lock.acquire()
try:
# acquire lock and open
f_lock = self._lock_file(path, False)
datafile = open(path, 'rb')
# read pickled object
created_time, value = pickle.load(datafile)
datafile.close()
# check if value is expired
if timeout is None:
timeout = self.timeout
if timeout > 0:
if (time.time() - created_time) >= timeout:
# expired! delete from cache
value = None
self._delete_file(path)
# unlock and return result
self._unlock_file(f_lock)
return value
finally:
self.lock.release()
def count(self):
c = 0
for entry in os.listdir(self.cache_dir):
if entry.endswith('.lock'):
continue
c += 1
return c
def cleanup(self):
for entry in os.listdir(self.cache_dir):
if entry.endswith('.lock'):
continue
self._get(os.path.join(self.cache_dir, entry), None)
def flush(self):
for entry in os.listdir(self.cache_dir):
if entry.endswith('.lock'):
continue
self._delete_file(os.path.join(self.cache_dir, entry))
class MemCacheCache(Cache):
"""Cache interface"""
def __init__(self, client, timeout=60):
"""Initialize the cache
client: The memcache client
timeout: number of seconds to keep a cached entry
"""
self.client = client
self.timeout = timeout
def store(self, key, value):
"""Add new record to cache
key: entry key
value: data of entry
"""
self.client.set(key, value, time=self.timeout)
def get(self, key, timeout=None):
"""Get cached entry if exists and not expired
key: which entry to get
timeout: override timeout with this value [optional].
DOES NOT WORK HERE
"""
return self.client.get(key)
def count(self):
"""Get count of entries currently stored in cache. RETURN 0"""
raise NotImplementedError
def cleanup(self):
"""Delete any expired entries in cache. NO-OP"""
raise NotImplementedError
def flush(self):
"""Delete all cached entries. NO-OP"""
raise NotImplementedError
class RedisCache(Cache):
"""Cache running in a redis server"""
def __init__(self, client,
timeout=60,
keys_container='tweepy:keys',
pre_identifier='tweepy:'):
Cache.__init__(self, timeout)
self.client = client
self.keys_container = keys_container
self.pre_identifier = pre_identifier
def _is_expired(self, entry, timeout):
# Returns true if the entry has expired
return timeout > 0 and (time.time() - entry[0]) >= timeout
def store(self, key, value):
"""Store the key, value pair in our redis server"""
# Prepend tweepy to our key,
# this makes it easier to identify tweepy keys in our redis server
key = self.pre_identifier + key
# Get a pipe (to execute several redis commands in one step)
pipe = self.client.pipeline()
# Set our values in a redis hash (similar to python dict)
pipe.set(key, pickle.dumps((time.time(), value)))
# Set the expiration
pipe.expire(key, self.timeout)
# Add the key to a set containing all the keys
pipe.sadd(self.keys_container, key)
# Execute the instructions in the redis server
pipe.execute()
def get(self, key, timeout=None):
"""Given a key, returns an element from the redis table"""
key = self.pre_identifier + key
# Check to see if we have this key
unpickled_entry = self.client.get(key)
if not unpickled_entry:
# No hit, return nothing
return None
entry = pickle.loads(unpickled_entry)
# Use provided timeout in arguments if provided
# otherwise use the one provided during init.
if timeout is None:
timeout = self.timeout
# Make sure entry is not expired
if self._is_expired(entry, timeout):
# entry expired, delete and return nothing
self.delete_entry(key)
return None
# entry found and not expired, return it
return entry[1]
def count(self):
"""Note: This is not very efficient,
since it retreives all the keys from the redis
server to know how many keys we have"""
return len(self.client.smembers(self.keys_container))
def delete_entry(self, key):
"""Delete an object from the redis table"""
pipe = self.client.pipeline()
pipe.srem(self.keys_container, key)
pipe.delete(key)
pipe.execute()
def cleanup(self):
"""Cleanup all the expired keys"""
keys = self.client.smembers(self.keys_container)
for key in keys:
entry = self.client.get(key)
if entry:
entry = pickle.loads(entry)
if self._is_expired(entry, self.timeout):
self.delete_entry(key)
def flush(self):
"""Delete all entries from the cache"""
keys = self.client.smembers(self.keys_container)
for key in keys:
self.delete_entry(key)
class MongodbCache(Cache):
"""A simple pickle-based MongoDB cache sytem."""
def __init__(self, db, timeout=3600, collection='tweepy_cache'):
"""Should receive a "database" cursor from pymongo."""
Cache.__init__(self, timeout)
self.timeout = timeout
self.col = db[collection]
self.col.create_index('created', expireAfterSeconds=timeout)
def store(self, key, value):
from bson.binary import Binary
now = datetime.datetime.utcnow()
blob = Binary(pickle.dumps(value))
self.col.insert({'created': now, '_id': key, 'value': blob})
def get(self, key, timeout=None):
if timeout:
raise NotImplementedError
obj = self.col.find_one({'_id': key})
if obj:
return pickle.loads(obj['value'])
def count(self):
return self.col.find({}).count()
def delete_entry(self, key):
return self.col.remove({'_id': key})
def cleanup(self):
"""MongoDB will automatically clear expired keys."""
pass
def flush(self):
self.col.drop()
self.col.create_index('created', expireAfterSeconds=self.timeout)
| mit |
kytvi2p/Sigil | 3rdparty/python/Lib/test/test_importlib/import_/test_packages.py | 84 | 4605 | from .. import util
from . import util as import_util
import sys
import unittest
import importlib
from test import support
class ParentModuleTests:
"""Importing a submodule should import the parent modules."""
def test_import_parent(self):
with util.mock_spec('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('pkg.module')
self.assertIn('pkg', sys.modules)
def test_bad_parent(self):
with util.mock_spec('pkg.module') as mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ImportError) as cm:
self.__import__('pkg.module')
self.assertEqual(cm.exception.name, 'pkg')
def test_raising_parent_after_importing_child(self):
def __init__():
import pkg.module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_relative_importing_child(self):
def __init__():
from . import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from . import module"
# line, not sure why.
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
# XXX False
#self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_double_relative_importing_child(self):
def __init__():
from ..subpkg import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.subpkg.__init__',
'pkg.subpkg.module',
module_code={'pkg.subpkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from ..subpkg import module"
# line, not sure why.
self.__import__('pkg.subpkg')
self.assertNotIn('pkg.subpkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.subpkg.module')
self.assertNotIn('pkg.subpkg', sys.modules)
# XXX False
#self.assertIn('pkg.subpkg.module', sys.modules)
def test_module_not_package(self):
# Try to import a submodule from a non-package should raise ImportError.
assert not hasattr(sys, '__path__')
with self.assertRaises(ImportError) as cm:
self.__import__('sys.no_submodules_here')
self.assertEqual(cm.exception.name, 'sys.no_submodules_here')
def test_module_not_package_but_side_effects(self):
# If a module injects something into sys.modules as a side-effect, then
# pick up on that fact.
name = 'mod'
subname = name + '.b'
def module_injection():
sys.modules[subname] = 'total bunk'
mock_spec = util.mock_spec('mod',
module_code={'mod': module_injection})
with mock_spec as mock:
with util.import_state(meta_path=[mock]):
try:
submodule = self.__import__(subname)
finally:
support.unload(subname)
Frozen_ParentTests, Source_ParentTests = util.test_both(
ParentModuleTests, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
eeshangarg/oh-mainline | vendor/packages/whoosh/src/whoosh/__init__.py | 16 | 2063 | # Copyright 2008 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
__version__ = (2, 3, 0)
def versionstring(build=True, extra=True):
"""Returns the version number of Whoosh as a string.
:param build: Whether to include the build number in the string.
:param extra: Whether to include alpha/beta/rc etc. tags. Only
checked if build is True.
:rtype: str
"""
if build:
first = 3
else:
first = 2
s = ".".join(str(n) for n in __version__[:first])
if build and extra:
s += "".join(str(n) for n in __version__[3:])
return s
| agpl-3.0 |
Codefans-fan/odoo | addons/crm/res_partner.py | 47 | 4790 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
""" Inherits partner and adds CRM information in the partner form """
_inherit = 'res.partner'
def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids))
# the user may not have access rights for opportunities or meetings
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = {
'opportunity_count': len(partner.opportunity_ids),
'meeting_count': len(partner.meeting_ids),
}
except:
pass
for partner in self.browse(cr, uid, ids, context):
res[partner.id]['phonecall_count'] = len(partner.phonecall_ids)
return res
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\
'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]),
'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id',
'Meetings'),
'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\
'Phonecalls'),
'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'),
'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'),
'phonecall_count': fields.function(_opportunity_meeting_phonecall_count, string="Phonecalls", type="integer", multi='opp_meet'),
}
def redirect_partner_form(self, cr, uid, partner_id, context=None):
search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter')
value = {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False
}
return value
def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None):
categ_obj = self.pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')])
lead_obj = self.pool.get('crm.lead')
opportunity_ids = {}
for partner in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner.id
opportunity_id = lead_obj.create(cr, uid, {
'name' : opportunity_summary,
'planned_revenue' : planned_revenue,
'probability' : probability,
'partner_id' : partner_id,
'categ_ids' : categ_ids and categ_ids[0:1] or [],
'type': 'opportunity'
}, context=context)
opportunity_ids[partner_id] = opportunity_id
return opportunity_ids
def schedule_meeting(self, cr, uid, ids, context=None):
partner_ids = list(ids)
partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'search_default_partner_ids': list(ids),
'default_partner_ids': partner_ids,
}
return res
| agpl-3.0 |
MarkusHackspacher/unknown-horizons | horizons/world/building/path.py | 1 | 2878 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.component.componentholder import ComponentHolder
from horizons.constants import LAYERS
from horizons.scheduler import Scheduler
from horizons.util.tile_orientation import get_tile_alignment_action
from horizons.world.building.buildable import BuildableLine
from horizons.world.building.building import BasicBuilding
class Path(ComponentHolder):
"""Object with path functionality"""
walkable = True
# no __init__
def load(self, db, worldid):
super().load(db, worldid)
def init(self):
# this does not belong in __init__, it's just here that all the data should be consistent
self.__init()
def __init(self):
self.island.path_nodes.register_road(self)
if self.session.world.inited:
self.recalculate_surrounding_tile_orientation()
self.recalculate_orientation()
else:
# don't always recalculate while loading, we'd recalculate too often.
# do it once when everything is finished.
Scheduler().add_new_object(self.recalculate_orientation, self, run_in=0)
def remove(self):
super().remove()
self.island.path_nodes.unregister_road(self)
self.recalculate_surrounding_tile_orientation()
def is_road(self, tile):
return (tile is not None and
tile.object is not None and
self.island.path_nodes.is_road(tile.x, tile.y) and
tile.object.owner == self.owner)
def recalculate_surrounding_tile_orientation(self):
for tile in self.island.get_surrounding_tiles(self.position):
if self.is_road(tile):
tile.object.recalculate_orientation()
def recalculate_orientation(self):
def is_similar_tile(position):
tile = self.island.get_tile(position)
return self.is_road(tile)
origin = self.position.origin
action = get_tile_alignment_action(origin, is_similar_tile)
location = self._instance.getLocation()
self.act(action, location, True)
class Road(Path, BasicBuilding, BuildableLine):
"""Actual buildable road."""
layer = LAYERS.FIELDS
| gpl-2.0 |
BartSaelen/kubb_match | kubb_match/service/tournament_service.py | 1 | 2821 | # -*- coding: utf-8 -*-
from kubb_match.data.models import Round, GridPosition
from kubb_match.service.battle_service import BattleService
from kubb_match.service.knock_out_service import KnockOutService
class TournamentService(object):
def __init__(self, data_manager):
self.data_manager = data_manager
self.battle_service = BattleService()
self.knock_out_service = KnockOutService()
def init_battle_phase(self, phase):
round1 = Round()
positions = []
teams = self.data_manager.get_teams()
t = 0
for row in ('A', 'B', 'C', 'D', 'E'):
for x in range(1, 9):
key = row + str(x)
team = teams[t]
grid_pos = GridPosition(position=key, team_id=team.id)
grid_pos.team = team
t += 1
positions.append(grid_pos)
round1.positions = positions
round1.games = self.battle_service.create_games(positions)
phase.rounds.append(round1)
self.data_manager.save(phase)
return round1
def next_battle_round(self, phase):
prev_round = next((r for r in phase.rounds if not r.played))
round = self.battle_service.calculate_next_round(prev_round)
prev_round.played = True
self.data_manager.save(prev_round)
phase.rounds.append(round)
self.data_manager.save(phase)
return round
def final_battle_round(self, phase):
prev_round = next((r for r in phase.rounds if not r.played))
final_round = self.battle_service.calculate_next_round(prev_round, final=True)
final_round.final = True
prev_round.played = True
self.data_manager.save(prev_round)
phase.rounds.append(final_round)
self.data_manager.save(phase)
return final_round
def init_ko_phase(self, phase, final_positions):
ko_rounds = self.knock_out_service.create_initial_rounds(final_positions)
for round in ko_rounds:
phase.rounds.append(ko_rounds[round])
self.data_manager.save(phase)
return ko_rounds
def next_ko_round(self, phase):
prev_ko_rounds = [r for r in phase.rounds if not r.played]
field ={'A': 1, 'B': 9, 'C': 13, 'D': 17}
for prev_round in prev_ko_rounds:
prev_round.played = True
self.data_manager.save(prev_round)
round = self.knock_out_service.calculate_next_round(prev_round,
field[prev_round.label]) if not prev_round.final else prev_round
round.label = prev_round.label
phase.rounds.append(round)
self.data_manager.save(phase)
return phase.rounds
def final_ko_round(self, phase):
pass
| apache-2.0 |
zhmz90/git | contrib/svn-fe/svnrdump_sim.py | 328 | 2044 | #!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys
import os
if sys.hexversion < 0x02040000:
# The limiter is the ValueError() calls. This may be too conservative
sys.stderr.write("svnrdump-sim.py: requires Python 2.4 or later.\n")
sys.exit(1)
def getrevlimit():
var = 'SVNRMAX'
if var in os.environ:
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/':
filename = filename[:-1] # remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r')
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '':
break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and \
l == 'Revision-number: %s\n' % upper:
break
if state == 'header' or state == 'selection':
if state == 'selection':
wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print("usage: %s dump URL -rLOWER:UPPER")
sys.exit(1)
if not sys.argv[1] == 'dump':
raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None:
r[1] = getrevlimit()
if writedump(url, r[0], r[1]):
ret = 0
else:
ret = 1
sys.exit(ret)
| gpl-2.0 |
bguillot/OpenUpgrade | addons/purchase/__init__.py | 439 | 1185 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase
import partner
import stock
import wizard
import report
import stock
import company
import edi
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ArcherSys/ArcherSys | Lib/test/test_pulldom.py | 1 | 37541 | <<<<<<< HEAD
<<<<<<< HEAD
import io
import unittest
import sys
import xml.sax
from xml.sax.xmlreader import AttributesImpl
from xml.dom import pulldom
from test.support import run_unittest, findfile
tstfile = findfile("test.xml", subdir="xmltestdata")
# A handy XML snippet, containing attributes, a namespace prefix, and a
# self-closing tag:
SMALL_SAMPLE = """<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:xdc="http://www.xml.com/books">
<!-- A comment -->
<title>Introduction to XSL</title>
<hr/>
<p><xdc:author xdc:attrib="prefixed attribute" attrib="other attrib">A. Namespace</xdc:author></p>
</html>"""
class PullDOMTestCase(unittest.TestCase):
def test_parse(self):
"""Minimal test of DOMEventStream.parse()"""
# This just tests that parsing from a stream works. Actual parser
# semantics are tested using parseString with a more focused XML
# fragment.
# Test with a filename:
handler = pulldom.parse(tstfile)
self.addCleanup(handler.stream.close)
list(handler)
# Test with a file object:
with open(tstfile, "rb") as fin:
list(pulldom.parse(fin))
def test_parse_semantics(self):
"""Test DOMEventStream parsing semantics."""
items = pulldom.parseString(SMALL_SAMPLE)
evt, node = next(items)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
self.assertEqual(pulldom.START_DOCUMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
self.assertEqual(2, len(node.attributes))
self.assertEqual(node.attributes.getNamedItem("xmlns:xdc").value,
"http://www.xml.com/books")
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt) # Line break
evt, node = next(items)
# XXX - A comment should be reported here!
# self.assertEqual(pulldom.COMMENT, evt)
# Line break after swallowed comment:
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual("title", node.tagName)
title_node = node
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("Introduction to XSL", node.data)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("title", node.tagName)
self.assertTrue(title_node is node)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
# XXX No END_DOCUMENT item is ever obtained:
#evt, node = next(items)
#self.assertEqual(pulldom.END_DOCUMENT, evt)
def test_expandItem(self):
"""Ensure expandItem works as expected."""
items = pulldom.parseString(SMALL_SAMPLE)
# Loop through the nodes until we get to a "title" start tag:
for evt, item in items:
if evt == pulldom.START_ELEMENT and item.tagName == "title":
items.expandNode(item)
self.assertEqual(1, len(item.childNodes))
break
else:
self.fail("No \"title\" element detected in SMALL_SAMPLE!")
# Loop until we get to the next start-element:
for evt, node in items:
if evt == pulldom.START_ELEMENT:
break
self.assertEqual("hr", node.tagName,
"expandNode did not leave DOMEventStream in the correct state.")
# Attempt to expand a standalone element:
items.expandNode(node)
self.assertEqual(next(items)[0], pulldom.CHARACTERS)
evt, node = next(items)
self.assertEqual(node.tagName, "p")
items.expandNode(node)
next(items) # Skip character data
evt, node = next(items)
self.assertEqual(node.tagName, "html")
with self.assertRaises(StopIteration):
next(items)
items.clear()
self.assertIsNone(items.parser)
self.assertIsNone(items.stream)
@unittest.expectedFailure
def test_comment(self):
"""PullDOM does not receive "comment" events."""
items = pulldom.parseString(SMALL_SAMPLE)
for evt, _ in items:
if evt == pulldom.COMMENT:
break
else:
self.fail("No comment was encountered")
@unittest.expectedFailure
def test_end_document(self):
"""PullDOM does not receive "end-document" events."""
items = pulldom.parseString(SMALL_SAMPLE)
# Read all of the nodes up to and including </html>:
for evt, node in items:
if evt == pulldom.END_ELEMENT and node.tagName == "html":
break
try:
# Assert that the next node is END_DOCUMENT:
evt, node = next(items)
self.assertEqual(pulldom.END_DOCUMENT, evt)
except StopIteration:
self.fail(
"Ran out of events, but should have received END_DOCUMENT")
class ThoroughTestCase(unittest.TestCase):
"""Test the hard-to-reach parts of pulldom."""
def test_thorough_parse(self):
"""Test some of the hard-to-reach parts of PullDOM."""
self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))
@unittest.expectedFailure
def test_sax2dom_fail(self):
"""SAX2DOM can"t handle a PI before the root element."""
pd = SAX2DOMTestHelper(None, SAXExerciser(), 12)
self._test_thorough(pd)
def test_thorough_sax2dom(self):
"""Test some of the hard-to-reach parts of SAX2DOM."""
pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)
self._test_thorough(pd, False)
def _test_thorough(self, pd, before_root=True):
"""Test some of the hard-to-reach parts of the parser, using a mock
parser."""
evt, node = next(pd)
self.assertEqual(pulldom.START_DOCUMENT, evt)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
if before_root:
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("text", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_DOCUMENT, evt)
class SAXExerciser(object):
"""A fake sax parser that calls some of the harder-to-reach sax methods to
ensure it emits the correct events"""
def setContentHandler(self, handler):
self._handler = handler
def parse(self, _):
h = self._handler
h.startDocument()
# The next two items ensure that items preceding the first
# start_element are properly stored and emitted:
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
def stub(self, *args, **kwargs):
"""Stub method. Does nothing."""
pass
setProperty = stub
setFeature = stub
class SAX2DOMExerciser(SAXExerciser):
"""The same as SAXExerciser, but without the processing instruction and
comment before the root element, because S2D can"t handle it"""
def parse(self, _):
h = self._handler
h.startDocument()
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
class SAX2DOMTestHelper(pulldom.DOMEventStream):
"""Allows us to drive SAX2DOM from a DOMEventStream."""
def reset(self):
self.pulldom = pulldom.SAX2DOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
class SAX2DOMTestCase(unittest.TestCase):
def confirm(self, test, testname="Test"):
self.assertTrue(test, testname)
def test_basic(self):
"""Ensure SAX2DOM can parse from a stream."""
with io.StringIO(SMALL_SAMPLE) as fin:
sd = SAX2DOMTestHelper(fin, xml.sax.make_parser(),
len(SMALL_SAMPLE))
for evt, node in sd:
if evt == pulldom.START_ELEMENT and node.tagName == "html":
break
# Because the buffer is the same length as the XML, all the
# nodes should have been parsed and added:
self.assertGreater(len(node.childNodes), 0)
def testSAX2DOM(self):
"""Ensure SAX2DOM expands nodes as expected."""
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.assertIsNone(text1.previousSibling)
self.assertIs(text1.nextSibling, elm1)
self.assertIs(elm1.previousSibling, text1)
self.assertIs(elm1.nextSibling, text2)
self.assertIs(text2.previousSibling, elm1)
self.assertIsNone(text2.nextSibling)
self.assertIsNone(text3.previousSibling)
self.assertIsNone(text3.nextSibling)
self.assertIs(root.parentNode, doc)
self.assertIs(text1.parentNode, root)
self.assertIs(elm1.parentNode, root)
self.assertIs(text2.parentNode, root)
self.assertIs(text3.parentNode, elm1)
doc.unlink()
def test_main():
run_unittest(PullDOMTestCase, ThoroughTestCase, SAX2DOMTestCase)
if __name__ == "__main__":
test_main()
=======
import io
import unittest
import sys
import xml.sax
from xml.sax.xmlreader import AttributesImpl
from xml.dom import pulldom
from test.support import run_unittest, findfile
tstfile = findfile("test.xml", subdir="xmltestdata")
# A handy XML snippet, containing attributes, a namespace prefix, and a
# self-closing tag:
SMALL_SAMPLE = """<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:xdc="http://www.xml.com/books">
<!-- A comment -->
<title>Introduction to XSL</title>
<hr/>
<p><xdc:author xdc:attrib="prefixed attribute" attrib="other attrib">A. Namespace</xdc:author></p>
</html>"""
class PullDOMTestCase(unittest.TestCase):
def test_parse(self):
"""Minimal test of DOMEventStream.parse()"""
# This just tests that parsing from a stream works. Actual parser
# semantics are tested using parseString with a more focused XML
# fragment.
# Test with a filename:
handler = pulldom.parse(tstfile)
self.addCleanup(handler.stream.close)
list(handler)
# Test with a file object:
with open(tstfile, "rb") as fin:
list(pulldom.parse(fin))
def test_parse_semantics(self):
"""Test DOMEventStream parsing semantics."""
items = pulldom.parseString(SMALL_SAMPLE)
evt, node = next(items)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
self.assertEqual(pulldom.START_DOCUMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
self.assertEqual(2, len(node.attributes))
self.assertEqual(node.attributes.getNamedItem("xmlns:xdc").value,
"http://www.xml.com/books")
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt) # Line break
evt, node = next(items)
# XXX - A comment should be reported here!
# self.assertEqual(pulldom.COMMENT, evt)
# Line break after swallowed comment:
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual("title", node.tagName)
title_node = node
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("Introduction to XSL", node.data)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("title", node.tagName)
self.assertTrue(title_node is node)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
# XXX No END_DOCUMENT item is ever obtained:
#evt, node = next(items)
#self.assertEqual(pulldom.END_DOCUMENT, evt)
def test_expandItem(self):
"""Ensure expandItem works as expected."""
items = pulldom.parseString(SMALL_SAMPLE)
# Loop through the nodes until we get to a "title" start tag:
for evt, item in items:
if evt == pulldom.START_ELEMENT and item.tagName == "title":
items.expandNode(item)
self.assertEqual(1, len(item.childNodes))
break
else:
self.fail("No \"title\" element detected in SMALL_SAMPLE!")
# Loop until we get to the next start-element:
for evt, node in items:
if evt == pulldom.START_ELEMENT:
break
self.assertEqual("hr", node.tagName,
"expandNode did not leave DOMEventStream in the correct state.")
# Attempt to expand a standalone element:
items.expandNode(node)
self.assertEqual(next(items)[0], pulldom.CHARACTERS)
evt, node = next(items)
self.assertEqual(node.tagName, "p")
items.expandNode(node)
next(items) # Skip character data
evt, node = next(items)
self.assertEqual(node.tagName, "html")
with self.assertRaises(StopIteration):
next(items)
items.clear()
self.assertIsNone(items.parser)
self.assertIsNone(items.stream)
@unittest.expectedFailure
def test_comment(self):
"""PullDOM does not receive "comment" events."""
items = pulldom.parseString(SMALL_SAMPLE)
for evt, _ in items:
if evt == pulldom.COMMENT:
break
else:
self.fail("No comment was encountered")
@unittest.expectedFailure
def test_end_document(self):
"""PullDOM does not receive "end-document" events."""
items = pulldom.parseString(SMALL_SAMPLE)
# Read all of the nodes up to and including </html>:
for evt, node in items:
if evt == pulldom.END_ELEMENT and node.tagName == "html":
break
try:
# Assert that the next node is END_DOCUMENT:
evt, node = next(items)
self.assertEqual(pulldom.END_DOCUMENT, evt)
except StopIteration:
self.fail(
"Ran out of events, but should have received END_DOCUMENT")
class ThoroughTestCase(unittest.TestCase):
"""Test the hard-to-reach parts of pulldom."""
def test_thorough_parse(self):
"""Test some of the hard-to-reach parts of PullDOM."""
self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))
@unittest.expectedFailure
def test_sax2dom_fail(self):
"""SAX2DOM can"t handle a PI before the root element."""
pd = SAX2DOMTestHelper(None, SAXExerciser(), 12)
self._test_thorough(pd)
def test_thorough_sax2dom(self):
"""Test some of the hard-to-reach parts of SAX2DOM."""
pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)
self._test_thorough(pd, False)
def _test_thorough(self, pd, before_root=True):
"""Test some of the hard-to-reach parts of the parser, using a mock
parser."""
evt, node = next(pd)
self.assertEqual(pulldom.START_DOCUMENT, evt)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
if before_root:
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("text", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_DOCUMENT, evt)
class SAXExerciser(object):
"""A fake sax parser that calls some of the harder-to-reach sax methods to
ensure it emits the correct events"""
def setContentHandler(self, handler):
self._handler = handler
def parse(self, _):
h = self._handler
h.startDocument()
# The next two items ensure that items preceding the first
# start_element are properly stored and emitted:
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
def stub(self, *args, **kwargs):
"""Stub method. Does nothing."""
pass
setProperty = stub
setFeature = stub
class SAX2DOMExerciser(SAXExerciser):
"""The same as SAXExerciser, but without the processing instruction and
comment before the root element, because S2D can"t handle it"""
def parse(self, _):
h = self._handler
h.startDocument()
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
class SAX2DOMTestHelper(pulldom.DOMEventStream):
"""Allows us to drive SAX2DOM from a DOMEventStream."""
def reset(self):
self.pulldom = pulldom.SAX2DOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
class SAX2DOMTestCase(unittest.TestCase):
def confirm(self, test, testname="Test"):
self.assertTrue(test, testname)
def test_basic(self):
"""Ensure SAX2DOM can parse from a stream."""
with io.StringIO(SMALL_SAMPLE) as fin:
sd = SAX2DOMTestHelper(fin, xml.sax.make_parser(),
len(SMALL_SAMPLE))
for evt, node in sd:
if evt == pulldom.START_ELEMENT and node.tagName == "html":
break
# Because the buffer is the same length as the XML, all the
# nodes should have been parsed and added:
self.assertGreater(len(node.childNodes), 0)
def testSAX2DOM(self):
"""Ensure SAX2DOM expands nodes as expected."""
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.assertIsNone(text1.previousSibling)
self.assertIs(text1.nextSibling, elm1)
self.assertIs(elm1.previousSibling, text1)
self.assertIs(elm1.nextSibling, text2)
self.assertIs(text2.previousSibling, elm1)
self.assertIsNone(text2.nextSibling)
self.assertIsNone(text3.previousSibling)
self.assertIsNone(text3.nextSibling)
self.assertIs(root.parentNode, doc)
self.assertIs(text1.parentNode, root)
self.assertIs(elm1.parentNode, root)
self.assertIs(text2.parentNode, root)
self.assertIs(text3.parentNode, elm1)
doc.unlink()
def test_main():
run_unittest(PullDOMTestCase, ThoroughTestCase, SAX2DOMTestCase)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import io
import unittest
import sys
import xml.sax
from xml.sax.xmlreader import AttributesImpl
from xml.dom import pulldom
from test.support import run_unittest, findfile
tstfile = findfile("test.xml", subdir="xmltestdata")
# A handy XML snippet, containing attributes, a namespace prefix, and a
# self-closing tag:
SMALL_SAMPLE = """<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:xdc="http://www.xml.com/books">
<!-- A comment -->
<title>Introduction to XSL</title>
<hr/>
<p><xdc:author xdc:attrib="prefixed attribute" attrib="other attrib">A. Namespace</xdc:author></p>
</html>"""
class PullDOMTestCase(unittest.TestCase):
def test_parse(self):
"""Minimal test of DOMEventStream.parse()"""
# This just tests that parsing from a stream works. Actual parser
# semantics are tested using parseString with a more focused XML
# fragment.
# Test with a filename:
handler = pulldom.parse(tstfile)
self.addCleanup(handler.stream.close)
list(handler)
# Test with a file object:
with open(tstfile, "rb") as fin:
list(pulldom.parse(fin))
def test_parse_semantics(self):
"""Test DOMEventStream parsing semantics."""
items = pulldom.parseString(SMALL_SAMPLE)
evt, node = next(items)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
self.assertEqual(pulldom.START_DOCUMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
self.assertEqual(2, len(node.attributes))
self.assertEqual(node.attributes.getNamedItem("xmlns:xdc").value,
"http://www.xml.com/books")
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt) # Line break
evt, node = next(items)
# XXX - A comment should be reported here!
# self.assertEqual(pulldom.COMMENT, evt)
# Line break after swallowed comment:
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual("title", node.tagName)
title_node = node
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("Introduction to XSL", node.data)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("title", node.tagName)
self.assertTrue(title_node is node)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
# XXX No END_DOCUMENT item is ever obtained:
#evt, node = next(items)
#self.assertEqual(pulldom.END_DOCUMENT, evt)
def test_expandItem(self):
"""Ensure expandItem works as expected."""
items = pulldom.parseString(SMALL_SAMPLE)
# Loop through the nodes until we get to a "title" start tag:
for evt, item in items:
if evt == pulldom.START_ELEMENT and item.tagName == "title":
items.expandNode(item)
self.assertEqual(1, len(item.childNodes))
break
else:
self.fail("No \"title\" element detected in SMALL_SAMPLE!")
# Loop until we get to the next start-element:
for evt, node in items:
if evt == pulldom.START_ELEMENT:
break
self.assertEqual("hr", node.tagName,
"expandNode did not leave DOMEventStream in the correct state.")
# Attempt to expand a standalone element:
items.expandNode(node)
self.assertEqual(next(items)[0], pulldom.CHARACTERS)
evt, node = next(items)
self.assertEqual(node.tagName, "p")
items.expandNode(node)
next(items) # Skip character data
evt, node = next(items)
self.assertEqual(node.tagName, "html")
with self.assertRaises(StopIteration):
next(items)
items.clear()
self.assertIsNone(items.parser)
self.assertIsNone(items.stream)
@unittest.expectedFailure
def test_comment(self):
"""PullDOM does not receive "comment" events."""
items = pulldom.parseString(SMALL_SAMPLE)
for evt, _ in items:
if evt == pulldom.COMMENT:
break
else:
self.fail("No comment was encountered")
@unittest.expectedFailure
def test_end_document(self):
"""PullDOM does not receive "end-document" events."""
items = pulldom.parseString(SMALL_SAMPLE)
# Read all of the nodes up to and including </html>:
for evt, node in items:
if evt == pulldom.END_ELEMENT and node.tagName == "html":
break
try:
# Assert that the next node is END_DOCUMENT:
evt, node = next(items)
self.assertEqual(pulldom.END_DOCUMENT, evt)
except StopIteration:
self.fail(
"Ran out of events, but should have received END_DOCUMENT")
class ThoroughTestCase(unittest.TestCase):
"""Test the hard-to-reach parts of pulldom."""
def test_thorough_parse(self):
"""Test some of the hard-to-reach parts of PullDOM."""
self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))
@unittest.expectedFailure
def test_sax2dom_fail(self):
"""SAX2DOM can"t handle a PI before the root element."""
pd = SAX2DOMTestHelper(None, SAXExerciser(), 12)
self._test_thorough(pd)
def test_thorough_sax2dom(self):
"""Test some of the hard-to-reach parts of SAX2DOM."""
pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)
self._test_thorough(pd, False)
def _test_thorough(self, pd, before_root=True):
"""Test some of the hard-to-reach parts of the parser, using a mock
parser."""
evt, node = next(pd)
self.assertEqual(pulldom.START_DOCUMENT, evt)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
if before_root:
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("text", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_DOCUMENT, evt)
class SAXExerciser(object):
"""A fake sax parser that calls some of the harder-to-reach sax methods to
ensure it emits the correct events"""
def setContentHandler(self, handler):
self._handler = handler
def parse(self, _):
h = self._handler
h.startDocument()
# The next two items ensure that items preceding the first
# start_element are properly stored and emitted:
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
def stub(self, *args, **kwargs):
"""Stub method. Does nothing."""
pass
setProperty = stub
setFeature = stub
class SAX2DOMExerciser(SAXExerciser):
"""The same as SAXExerciser, but without the processing instruction and
comment before the root element, because S2D can"t handle it"""
def parse(self, _):
h = self._handler
h.startDocument()
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
class SAX2DOMTestHelper(pulldom.DOMEventStream):
"""Allows us to drive SAX2DOM from a DOMEventStream."""
def reset(self):
self.pulldom = pulldom.SAX2DOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
class SAX2DOMTestCase(unittest.TestCase):
def confirm(self, test, testname="Test"):
self.assertTrue(test, testname)
def test_basic(self):
"""Ensure SAX2DOM can parse from a stream."""
with io.StringIO(SMALL_SAMPLE) as fin:
sd = SAX2DOMTestHelper(fin, xml.sax.make_parser(),
len(SMALL_SAMPLE))
for evt, node in sd:
if evt == pulldom.START_ELEMENT and node.tagName == "html":
break
# Because the buffer is the same length as the XML, all the
# nodes should have been parsed and added:
self.assertGreater(len(node.childNodes), 0)
def testSAX2DOM(self):
"""Ensure SAX2DOM expands nodes as expected."""
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.assertIsNone(text1.previousSibling)
self.assertIs(text1.nextSibling, elm1)
self.assertIs(elm1.previousSibling, text1)
self.assertIs(elm1.nextSibling, text2)
self.assertIs(text2.previousSibling, elm1)
self.assertIsNone(text2.nextSibling)
self.assertIsNone(text3.previousSibling)
self.assertIsNone(text3.nextSibling)
self.assertIs(root.parentNode, doc)
self.assertIs(text1.parentNode, root)
self.assertIs(elm1.parentNode, root)
self.assertIs(text2.parentNode, root)
self.assertIs(text3.parentNode, elm1)
doc.unlink()
def test_main():
run_unittest(PullDOMTestCase, ThoroughTestCase, SAX2DOMTestCase)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
hkhpub/show_and_tell_korean | webdemo/webdemo/settings.py | 1 | 3274 | """
Django settings for webdemo project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fe@7+3w2fgy$rv77mwx*dzcz5jtitr-ey8vwx0lk1p9o3fzzg#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["163.239.199.196"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'webdemo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webdemo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webdemo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | mit |
kaynfiretvguru/Eldritch | plugin.video.youtube/resources/lib/kodion/items/utils.py | 27 | 1871 | import json
__author__ = 'bromix'
from .video_item import VideoItem
from .directory_item import DirectoryItem
from .audio_item import AudioItem
from .image_item import ImageItem
def from_json(json_data):
"""
Creates a instance of the given json dump or dict.
:param json_data:
:return:
"""
def _from_json(_json_data):
mapping = {'VideoItem': lambda: VideoItem(u'', u''),
'DirectoryItem': lambda: DirectoryItem(u'', u''),
'AudioItem': lambda: AudioItem(u'', u''),
'ImageItem': lambda: ImageItem(u'', u'')}
item = None
item_type = _json_data.get('type', None)
for key in mapping:
if item_type == key:
item = mapping[key]()
break
pass
if item is None:
return _json_data
data = _json_data.get('data', {})
for key in data:
if hasattr(item, key):
setattr(item, key, data[key])
pass
pass
return item
if isinstance(json_data, basestring):
json_data = json.loads(json_data)
return _from_json(json_data)
def to_jsons(base_item):
return json.dumps(to_json(base_item))
def to_json(base_item):
"""
Convert the given @base_item to json
:param base_item:
:return: json string
"""
def _to_json(obj):
if isinstance(obj, dict):
return obj.__dict__
mapping = {VideoItem: 'VideoItem',
DirectoryItem: 'DirectoryItem',
AudioItem: 'AudioItem',
ImageItem: 'ImageItem'}
for key in mapping:
if isinstance(obj, key):
return {'type': mapping[key], 'data': obj.__dict__}
pass
return obj.__dict__
return _to_json(base_item)
| gpl-2.0 |
andmos/ansible | test/units/modules/network/dellos6/dellos6_module.py | 52 | 2516 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestDellos6Module(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
else:
self.assertEqual(commands, result['updates'], result['updates'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
smithsps/LawsonAPI | old/src/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
hzlf/openbroadcast | website/apps/django_date_extensions/tests.py | 1 | 4994 | import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
from fields import ApproximateDate
import unittest
class PastAndFuture(unittest.TestCase):
def test_setting_both(self):
self.assertRaises(ValueError, ApproximateDate, past=True, future=True )
def test_setting_with_dates(self):
self.assertRaises(ValueError, ApproximateDate, future=True, year=2000 )
self.assertRaises(ValueError, ApproximateDate, past=True, year=2000 )
def test_stringification(self):
self.assertEqual(str(ApproximateDate(future=True)), 'future')
self.assertEqual(str(ApproximateDate(past=True)), 'past')
self.assertEqual(repr(ApproximateDate(future=True)), 'future')
self.assertEqual(repr(ApproximateDate(past=True)), 'past')
class CompareDates(unittest.TestCase):
def test_compare(self):
past = ApproximateDate( past=True )
past_too = ApproximateDate( past=True )
y_past = ApproximateDate( year=2000 )
y_future = ApproximateDate( year=2100 )
future = ApproximateDate( future=True )
future_too = ApproximateDate( future=True )
# check that we can be compared to None, '' and u''
for bad_val in ( '', u'', None ):
self.assertFalse( y_past in ( bad_val, ) )
self.assertFalse( y_past == bad_val )
self.assertTrue( y_past != bad_val )
# sanity check
self.assertTrue( y_past == y_past )
self.assertTrue( y_future == y_future )
self.assertFalse( y_past != y_past )
self.assertFalse( y_future != y_future )
self.assertTrue( y_past != y_future )
self.assertTrue( y_future != y_past )
self.assertTrue( y_future > y_past )
self.assertTrue( y_future >= y_past )
self.assertFalse( y_past > y_future )
self.assertFalse( y_past >= y_future )
self.assertTrue( y_past < y_future )
self.assertTrue( y_past <= y_future )
self.assertFalse( y_future < y_past )
self.assertFalse( y_future <= y_past )
# Future dates are always greater
self.assertTrue( y_past < future )
self.assertTrue( y_past <= future )
self.assertTrue( y_past != future )
self.assertTrue( y_future < future )
self.assertTrue( y_future <= future )
self.assertTrue( y_future != future )
self.assertTrue( future > y_past )
self.assertTrue( future >= y_past )
self.assertTrue( future != y_past )
self.assertTrue( future > y_future )
self.assertTrue( future >= y_future )
self.assertTrue( future != y_future )
# Past dates are always lesser
self.assertTrue( y_past > past )
self.assertTrue( y_past >= past )
self.assertTrue( y_past != past )
self.assertTrue( y_future > past )
self.assertTrue( y_future >= past )
self.assertTrue( y_future != past )
self.assertTrue( past < y_past )
self.assertTrue( past <= y_past )
self.assertTrue( past != y_past )
self.assertTrue( past < y_future )
self.assertTrue( past <= y_future )
self.assertTrue( past != y_future )
# Past and future comparisons
self.assertTrue( past < future )
self.assertTrue( past <= future )
self.assertTrue( past != future )
self.assertTrue( future > past )
self.assertTrue( future >= past )
self.assertTrue( future != past )
# Future and past dates are equal to themselves (so that sorting is sane)
self.assertFalse( future < future )
self.assertTrue( future <= future )
self.assertTrue( future == future )
self.assertTrue( future >= future )
self.assertFalse( future > future )
self.assertTrue( future == future_too )
self.assertFalse( future != future_too )
self.assertFalse( past < past )
self.assertTrue( past <= past )
self.assertTrue( past == past )
self.assertTrue( past >= past )
self.assertFalse( past > past )
self.assertTrue( past == past_too )
self.assertFalse( past != past_too )
class Lengths(unittest.TestCase):
known_lengths = (
({ 'year':1999, }, 10 ),
({ 'year':1999, 'month': 01, }, 10 ),
({ 'year':1999, 'month': 01, 'day': 01 }, 10 ),
({ 'future': True }, 6 ),
({ 'past': True }, 4 ),
);
def test_length(self):
for kwargs, length in self.known_lengths:
approx = ApproximateDate( **kwargs )
self.assertEqual( len( approx ), length )
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
Anonymouslemming/ansible | lib/ansible/utils/module_docs_fragments/netapp.py | 63 | 3287 | #
# (c) 2016, Sumit Kumar <sumit4@netapp.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
DOCUMENTATION = """
options:
- See respective platform section for more details
requirements:
- See respective platform section for more details
notes:
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
"""
# Documentation fragment for ONTAP
ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://goo.gl/BRu78Z).
password:
required: true
description:
- Password for the specified user.
requirements:
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
- Ansible 2.2
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
notes:
- The modules prefixed with C(netapp\_cdot) are built to support the ONTAP storage platform.
"""
# Documentation fragment for SolidFire
SOLIDFIRE = """
options:
hostname:
required: true
description:
- The hostname or IP address of the SolidFire cluster.
username:
required: true
description:
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation U(https://goo.gl/ddJa4Q).
password:
required: true
description:
- Password for the specified user.
requirements:
- solidfire-sdk-python (1.1.0.92)
notes:
- The modules prefixed with C(sf\_) are built to support the SolidFire storage platform.
"""
# Documentation fragment for E-Series
ESERIES = """
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
"""
| gpl-3.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/numpy/distutils/cpuinfo.py | 40 | 22989 | #!/usr/bin/env python
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__all__ = ['cpu']
import sys, re, types
import os
if sys.version_info[0] >= 3:
from subprocess import getstatusoutput
else:
from commands import getstatusoutput
import warnings
import platform
from numpy.distutils.compat import get_exception
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, output
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
def command_info(successful_status=(0,), stacklevel=1, **kw):
info = {}
for key in kw:
ok, output = getoutput(kw[key], successful_status=successful_status,
stacklevel=stacklevel+1)
if ok:
info[key] = output.strip()
return info
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
ok, output = getoutput(cmd, successful_status=successful_status,
stacklevel=stacklevel+1)
if not ok:
return
for line in output.splitlines():
yield line.strip()
def key_value_from_command(cmd, sep, successful_status=(0,),
stacklevel=1):
d = {}
for line in command_by_line(cmd, successful_status=successful_status,
stacklevel=stacklevel+1):
l = [s.strip() for s in line.split(sep, 1)]
if len(l) == 2:
d[l[0]] = l[1]
return d
class CPUInfoBase(object):
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self, func):
try:
return func()
except:
pass
def __getattr__(self, name):
if not name.startswith('_'):
if hasattr(self, '_'+name):
attr = getattr(self, '_'+name)
if isinstance(attr, types.MethodType):
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError(name)
def _getNCPUs(self):
return 1
def __get_nbits(self):
abits = platform.architecture()[0]
nbits = re.compile(r'(\d+)bit').search(abits).group(1)
return nbits
def _is_32bit(self):
return self.__get_nbits() == '32'
def _is_64bit(self):
return self.__get_nbits() == '64'
class LinuxCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = [ {} ]
ok, output = getoutput('uname -m')
if ok:
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning, stacklevel=2)
else:
for line in fo:
name_value = [s.strip() for s in line.split(':', 1)]
if len(name_value) != 2:
continue
name, value = name_value
if not info or name in info[-1]: # next processor
info.append({})
info[-1][name] = value
fo.close()
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id']=='AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['family'] == '15'
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu']=='Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id']=='GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu']=='i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_PentiumM(self):
return re.match(r'.*?Pentium.*?M\b',
self.info[0]['model name']) is not None
def _is_Prescott(self):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
return self.is_Intel() \
and (self.info[0]['cpu family'] == '6' \
or self.info[0]['cpu family'] == '15' ) \
and (self.has_sse3() and not self.has_ssse3())\
and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None
def _is_Core2(self):
return self.is_64bit() and self.is_Intel() and \
re.match(r'.*?Core\(TM\)2\b', \
self.info[0]['model name']) is not None
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['family']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name'], re.IGNORECASE) is not None
_is_Xeon = _is_XEON
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug']=='yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug']=='yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
def _has_ssse3(self):
return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
class IRIXCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = key_value_from_command('sysconf', sep=' ',
successful_status=(0, 1))
self.__class__.info = info
def _not_impl(self): pass
def _is_singleCPU(self):
return self.info.get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info.get('NUM_PROCESSORS', 1))
def __cputype(self, n):
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self): return self.__cputype(2000)
def _is_r3000(self): return self.__cputype(3000)
def _is_r3900(self): return self.__cputype(3900)
def _is_r4000(self): return self.__cputype(4000)
def _is_r4100(self): return self.__cputype(4100)
def _is_r4300(self): return self.__cputype(4300)
def _is_r4400(self): return self.__cputype(4400)
def _is_r4600(self): return self.__cputype(4600)
def _is_r4650(self): return self.__cputype(4650)
def _is_r5000(self): return self.__cputype(5000)
def _is_r6000(self): return self.__cputype(6000)
def _is_r8000(self): return self.__cputype(8000)
def _is_r10000(self): return self.__cputype(10000)
def _is_r12000(self): return self.__cputype(12000)
def _is_rorion(self): return self.__cputype('orion')
def get_ip(self):
try: return self.info.get('MACHINE')
except: pass
def __machine(self, n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
def _is_IP20(self): return self.__machine(20)
def _is_IP21(self): return self.__machine(21)
def _is_IP22(self): return self.__machine(22)
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
def _is_IP24(self): return self.__machine(24)
def _is_IP25(self): return self.__machine(25)
def _is_IP26(self): return self.__machine(26)
def _is_IP27(self): return self.__machine(27)
def _is_IP28(self): return self.__machine(28)
def _is_IP30(self): return self.__machine(30)
def _is_IP32(self): return self.__machine(32)
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
class DarwinCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
machine='machine')
info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
def _is_Power_Macintosh(self):
return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
def _is_i386(self):
return self.info['arch']=='i386'
def _is_ppc(self):
return self.info['arch']=='ppc'
def __machine(self, n):
return self.info['machine'] == 'ppc%s'%n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class SunOSCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
mach='mach',
uname_i='uname_i',
isainfo_b='isainfo -b',
isainfo_n='isainfo -n',
)
info['uname_X'] = key_value_from_command('uname -X', sep='=')
for line in command_by_line('psrinfo -v 0'):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
if m:
info['processor'] = m.group('p')
break
self.__class__.info = info
def _not_impl(self): pass
def _is_i386(self):
return self.info['isainfo_n']=='i386'
def _is_sparc(self):
return self.info['isainfo_n']=='sparc'
def _is_sparcv9(self):
return self.info['isainfo_n']=='sparcv9'
def _getNCPUs(self):
return int(self.info['uname_X'].get('NumCPU', 1))
def _is_sun4(self):
return self.info['arch']=='sun4'
def _is_SUNW(self):
return re.match(r'SUNW', self.info['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra', self.info['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info['processor']=='sparcv7'
def _is_cpusparcv8(self):
return self.info['processor']=='sparcv8'
def _is_cpusparcv9(self):
return self.info['processor']=='sparcv9'
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
if sys.version_info[0] >= 3:
import winreg
else:
import _winreg as winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"
r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum=0
while True:
try:
proc=winreg.EnumKey(chnd, pnum)
except winreg.error:
break
else:
pnum+=1
info.append({"Processor":proc})
phnd=winreg.OpenKey(chnd, proc)
pidx=0
while True:
try:
name, value, vtpe=winreg.EnumValue(phnd, pidx)
except winreg.error:
break
else:
pidx=pidx+1
info[-1][name]=value
if name=="Identifier":
srch=prgx.search(value)
if srch:
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except:
print(sys.exc_info()[1], '(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [0, 1, 2, 3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [6, 7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==9
def _is_AMDK7(self):
return self.is_AMD() and self.info[0]['Family'] == 6
# To reliably distinguish between the different types of AMD64 chips
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
# require looking at the 'brand' from cpuid
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['Family'] == 15
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier']=='GenuineIntel'
def _is_i386(self):
return self.info[0]['Family']==3
def _is_i486(self):
return self.info[0]['Family']==4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family']==6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family']==5 \
and self.info[0]['Model']==4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model']==1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [3, 5, 6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [7, 8, 9, 10, 11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family']==15
def _is_PentiumM(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [9, 13, 14]
def _is_Core2(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [15, 16, 17]
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
or (self.info[0]['Family'] in [6, 15])
elif self.is_AMD():
return self.info[0]['Family'] in [5, 6, 15]
else:
return False
def _has_sse(self):
if self.is_Intel():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [7, 8, 9, 10, 11]) \
or self.info[0]['Family']==15
elif self.is_AMD():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [6, 7, 8, 10]) \
or self.info[0]['Family']==15
else:
return False
def _has_sse2(self):
if self.is_Intel():
return self.is_Pentium4() or self.is_PentiumM() \
or self.is_Core2()
elif self.is_AMD():
return self.is_AMD64()
else:
return False
def _has_3dnow(self):
return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6, 15]
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
cpuinfo = LinuxCPUInfo
elif sys.platform.startswith('irix'):
cpuinfo = IRIXCPUInfo
elif sys.platform == 'darwin':
cpuinfo = DarwinCPUInfo
elif sys.platform.startswith('sunos'):
cpuinfo = SunOSCPUInfo
elif sys.platform.startswith('win32'):
cpuinfo = Win32CPUInfo
elif sys.platform.startswith('cygwin'):
cpuinfo = LinuxCPUInfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = CPUInfoBase
cpu = cpuinfo()
#if __name__ == "__main__":
#
# cpu.is_blaa()
# cpu.is_Intel()
# cpu.is_Alpha()
#
# print('CPU information:'),
# for name in dir(cpuinfo):
# if name[0]=='_' and name[1]!='_':
# r = getattr(cpu,name[1:])()
# if r:
# if r!=1:
# print('%s=%s' %(name[1:],r))
# else:
# print(name[1:]),
# print()
| apache-2.0 |
fairbird/OpenPLI-TSimage | lib/python/Components/Converter/RdsInfo.py | 163 | 1649 | from enigma import iRdsDecoder, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
class RdsInfo(Converter, object):
RASS_INTERACTIVE_AVAILABLE = 0
RTP_TEXT_CHANGED = 1
RADIO_TEXT_CHANGED = 2
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {
"RadioText": (self.RADIO_TEXT_CHANGED, (iPlayableService.evUpdatedRadioText,)),
"RtpText": (self.RTP_TEXT_CHANGED, (iPlayableService.evUpdatedRtpText,)),
"RasInteractiveAvailable": (self.RASS_INTERACTIVE_AVAILABLE, (iPlayableService.evUpdatedRassInteractivePicMask,))
}[type]
@cached
def getText(self):
decoder = self.source.decoder
text = ""
if decoder:
if self.type == self.RADIO_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RadioText)
elif self.type == self.RTP_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RtpText)
else:
print "unknown RdsInfo Converter type", self.type
return text
text = property(getText)
@cached
def getBoolean(self):
decoder = self.source.decoder
if self.type == self.RASS_INTERACTIVE_AVAILABLE:
mask = decoder and decoder.getRassInteractiveMask()
return (mask and mask[0] & 1 and True) or False
elif self.type == self.RADIO_TEXT_CHANGED:
return (len(decoder.getText(iRdsDecoder.RadioText)) and True) or False
elif self.type == self.RTP_TEXT_CHANGED:
return (len(decoder.getText(iRdsDecoder.RtpText)) and True) or False
boolean = property(getBoolean)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
| gpl-2.0 |
KMTsvetanov/Zend2all | node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| bsd-3-clause |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/ctypes/test/test_anon.py | 264 | 2051 | import unittest
from ctypes import *
class AnonTest(unittest.TestCase):
def test_anon(self):
class ANON(Union):
_fields_ = [("a", c_int),
("b", c_int)]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(ANON.a.offset, 0)
self.assertEqual(ANON.b.offset, 0)
def test_anon_nonseq(self):
# TypeError: _anonymous_ must be a sequence
self.assertRaises(TypeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [], "_anonymous_": 42}))
def test_anon_nonmember(self):
# AttributeError: type object 'Name' has no attribute 'x'
self.assertRaises(AttributeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [],
"_anonymous_": ["x"]}))
def test_nested(self):
class ANON_S(Structure):
_fields_ = [("a", c_int)]
class ANON_U(Union):
_fields_ = [("_", ANON_S),
("b", c_int)]
_anonymous_ = ["_"]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON_U),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.x.offset, 0)
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(Y._.offset, sizeof(c_int))
self.assertEqual(Y.y.offset, sizeof(c_int) * 2)
if __name__ == "__main__":
unittest.main()
| mit |
oudalab/phyllo | phyllo/extractors/passio_perpetuaDB.py | 1 | 2535 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/perp.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = "unknown"
colltitle = "PASSIO SANCTARUM PERPETUAE ET FELICITATI"
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE title = 'Perpetua et Felicitatis'")
for url in textsURL:
chapter = "Preface"
verse = 0
title = "Perpetua et Felicitatis"
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
getp = textsoup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
verses = []
text = p.get_text()
text = text.strip()
if text.startswith('This text'):
continue
if p.find('b') is not None:
chapter = p.find('b').string.strip()
text = text.replace(chapter, '')
verse = 0
verses = re.split("[0-9]+\.", text)
for v in verses:
if v.startswith('Christian'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'prose'))
if __name__ == '__main__':
main()
| apache-2.0 |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/django/contrib/auth/tests/test_views.py | 22 | 33423 | import itertools
import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import int_to_base36, urlsafe_base64_decode, urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils.importlib import import_module
from django.utils._os import upath
from django.test import TestCase
from django.test.utils import override_settings, patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_valid_base36(self):
# Remove in Django 1.7
url, path = self._test_confirm_start()
path_parts = path.strip("/").split("/")
# construct an old style (base36) URL by converting the base64 ID
path_parts[1] = int_to_base36(int(urlsafe_base64_decode(path_parts[1])))
response = self.client.get("/%s/%s-%s/" % tuple(path_parts))
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user_base36(self):
# Remove in Django 1.7
response = self.client.get('/reset/123456-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user_base36(self):
# Remove in Django 1.7
response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
response = self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
req.REQUEST = req.POST
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session['django_language'] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session['django_language'], 'pl')
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
| mit |
n0trax/ansible | lib/ansible/constants.py | 3 | 4412 | # Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os # used to set lang and for backwards compat get_config
from ast import literal_eval
from jinja2 import Template
from string import ascii_letters, digits
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_TRUE
from ansible.module_utils.six import string_types
from ansible.config.manager import ConfigManager, ensure_type
def _deprecated(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from __main__ import display
display.deprecated(msg, version='2.8')
except:
import sys
sys.stderr.write('[DEPRECATED] %s, to be removed in 2.8' % msg)
def mk_boolean(value):
''' moved to module_utils'''
_deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
return boolean(value, strict=False)
def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
''' kept for backwarsd compatibility, but deprecated '''
_deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
value = None
# small reconstruction of the old code env/ini/default
value = os.environ.get(env_var, None)
if value is None:
try:
value = config.get_ini_config(parser, [{'key': key, 'section': section}])
except:
pass
if value is None:
value = default_value
value = ensure_type(value, value_type)
return value
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
### CONSTANTS ### yes, actual ones
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun']
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect',
'pmrun': 'You are not permitted to run this command'
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given',
'pmrun': ''
} # FIXME: deal with i18n
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
BOOL_TRUE = BOOLEANS_TRUE
CONTROLER_LANG = os.getenv('LANG', 'en_US.UTF-8')
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
MODULE_REQUIRE_ARGS = ('command', 'win_command', 'shell', 'win_shell', 'raw', 'script')
MODULE_NO_JSON = ('command', 'win_command', 'shell', 'win_shell', 'raw')
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python')
TREE_DIR = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
### POPULATE SETTINGS FROM CONFIG ###
config = ConfigManager()
# Generate constants from config
for setting in config.data.get_settings():
value = setting.value
if setting.origin == 'default' and \
isinstance(setting.value, string_types) and \
(setting.value.startswith('{{') and setting.value.endswith('}}')):
try:
t = Template(setting.value)
value = t.render(vars())
try:
value = literal_eval(value)
except ValueError:
pass # not a python data structure
except:
pass # not templatable
value = ensure_type(value, setting.name)
set_constant(setting.name, value)
| gpl-3.0 |
sigmavirus24/pip | tests/lib/__init__.py | 4 | 21080 | from __future__ import absolute_import
from contextlib import contextmanager
import os
import sys
import re
import textwrap
import site
import scripttest
import virtualenv
from tests.lib.path import Path, curdir, u
DATA_DIR = Path(__file__).folder.folder.join("data").abspath
SRC_DIR = Path(__file__).abspath.folder.folder.folder
pyversion = sys.version[:3]
pyversion_tuple = sys.version_info
def path_to_url(path):
"""
Convert a path to URI. The path will be made absolute and
will not have quoted path parts.
(adapted from pip.util)
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join(filepath)
if drive:
return 'file:///' + drive + url
return 'file://' + url
class TestData(object):
"""
Represents a bundle of pre-created test data.
This copies a pristine set of test data into a root location that is
designed to be test specific. The reason for this is when running the tests
concurrently errors can be generated because the related tooling uses
the directory as a work space. This leads to two concurrent processes
trampling over each other. This class gets around that by copying all
data into a directory and operating on the copied data.
"""
def __init__(self, root, source=None):
self.source = source or DATA_DIR
self.root = Path(root).abspath
@classmethod
def copy(cls, root):
obj = cls(root)
obj.reset()
return obj
def reset(self):
self.root.rmtree()
self.source.copytree(self.root)
@property
def packages(self):
return self.root.join("packages")
@property
def packages2(self):
return self.root.join("packages2")
@property
def packages3(self):
return self.root.join("packages3")
@property
def src(self):
return self.root.join("src")
@property
def indexes(self):
return self.root.join("indexes")
@property
def reqfiles(self):
return self.root.join("reqfiles")
@property
def find_links(self):
return path_to_url(self.packages)
@property
def find_links2(self):
return path_to_url(self.packages2)
@property
def find_links3(self):
return path_to_url(self.packages3)
def index_url(self, index="simple"):
return path_to_url(self.root.join("indexes", index))
class TestFailure(AssertionError):
"""
An "assertion" failed during testing.
"""
pass
class TestPipResult(object):
def __init__(self, impl, verbose=False):
self._impl = impl
if verbose:
print(self.stdout)
if self.stderr:
print('======= stderr ========')
print(self.stderr)
print('=======================')
def __getattr__(self, attr):
return getattr(self._impl, attr)
if sys.platform == 'win32':
@property
def stdout(self):
return self._impl.stdout.replace('\r\n', '\n')
@property
def stderr(self):
return self._impl.stderr.replace('\r\n', '\n')
def __str__(self):
return str(self._impl).replace('\r\n', '\n')
else:
# Python doesn't automatically forward __str__ through __getattr__
def __str__(self):
return str(self._impl)
def assert_installed(self, pkg_name, editable=True, with_files=[],
without_files=[], without_egg_link=False,
use_user_site=False, sub_dir=False):
e = self.test_env
if editable:
pkg_dir = e.venv / 'src' / pkg_name.lower()
# If package was installed in a sub directory
if sub_dir:
pkg_dir = pkg_dir / sub_dir
else:
without_egg_link = True
pkg_dir = e.site_packages / pkg_name
if use_user_site:
egg_link_path = e.user_site / pkg_name + '.egg-link'
else:
egg_link_path = e.site_packages / pkg_name + '.egg-link'
if without_egg_link:
if egg_link_path in self.files_created:
raise TestFailure(
'unexpected egg link file created: %r\n%s' %
(egg_link_path, self)
)
else:
if egg_link_path not in self.files_created:
raise TestFailure(
'expected egg link file missing: %r\n%s' %
(egg_link_path, self)
)
egg_link_file = self.files_created[egg_link_path]
# FIXME: I don't understand why there's a trailing . here
if not (egg_link_file.bytes.endswith('\n.') and
egg_link_file.bytes[:-2].endswith(pkg_dir)):
raise TestFailure(textwrap.dedent(u('''\
Incorrect egg_link file %r
Expected ending: %r
------- Actual contents -------
%s
-------------------------------''' % (
egg_link_file,
pkg_dir + '\n.',
repr(egg_link_file.bytes))
)))
if use_user_site:
pth_file = e.user_site / 'easy-install.pth'
else:
pth_file = e.site_packages / 'easy-install.pth'
if (pth_file in self.files_updated) == without_egg_link:
raise TestFailure('%r unexpectedly %supdated by install' % (
pth_file, (not without_egg_link and 'not ' or '')))
if (pkg_dir in self.files_created) == (curdir in without_files):
raise TestFailure(textwrap.dedent('''\
expected package directory %r %sto be created
actually created:
%s
''') % (
pkg_dir,
(curdir in without_files and 'not ' or ''),
sorted(self.files_created.keys())))
for f in with_files:
if not (pkg_dir / f).normpath in self.files_created:
raise TestFailure(
'Package directory %r missing expected content %r' %
(pkg_dir, f)
)
for f in without_files:
if (pkg_dir / f).normpath in self.files_created:
raise TestFailure(
'Package directory %r has unexpected content %f' %
(pkg_dir, f)
)
class PipTestEnvironment(scripttest.TestFileEnvironment):
"""
A specialized TestFileEnvironment for testing pip
"""
#
# Attribute naming convention
# ---------------------------
#
# Instances of this class have many attributes representing paths
# in the filesystem. To keep things straight, absolute paths have
# a name of the form xxxx_path and relative paths have a name that
# does not end in '_path'.
exe = sys.platform == 'win32' and '.exe' or ''
verbose = False
def __init__(self, base_path, *args, **kwargs):
# Make our base_path a test.lib.path.Path object
base_path = Path(base_path)
# Store paths related to the virtual environment
_virtualenv = kwargs.pop("virtualenv")
path_locations = virtualenv.path_locations(_virtualenv)
# Make sure we have test.lib.path.Path objects
venv, lib, include, bin = map(Path, path_locations)
# workaround for https://github.com/pypa/virtualenv/issues/306
if hasattr(sys, "pypy_version_info"):
lib = os.path.join(venv, 'lib-python', pyversion)
self.venv_path = venv
self.lib_path = lib
self.include_path = include
self.bin_path = bin
if hasattr(sys, "pypy_version_info"):
self.site_packages_path = self.venv_path.join("site-packages")
else:
self.site_packages_path = self.lib_path.join("site-packages")
self.user_base_path = self.venv_path.join("user")
self.user_bin_path = self.user_base_path.join(
self.bin_path - self.venv_path
)
self.user_site_path = self.venv_path.join(
"user",
site.USER_SITE[len(site.USER_BASE) + 1:],
)
# Create a Directory to use as a scratch pad
self.scratch_path = base_path.join("scratch").mkdir()
# Set our default working directory
kwargs.setdefault("cwd", self.scratch_path)
# Setup our environment
environ = kwargs.get("environ")
if environ is None:
environ = os.environ.copy()
environ["PATH"] = Path.pathsep.join(
[self.bin_path] + [environ.get("PATH", [])],
)
environ["PYTHONUSERBASE"] = self.user_base_path
# Writing bytecode can mess up updated file detection
environ["PYTHONDONTWRITEBYTECODE"] = "1"
kwargs["environ"] = environ
# Call the TestFileEnvironment __init__
super(PipTestEnvironment, self).__init__(base_path, *args, **kwargs)
# Expand our absolute path directories into relative
for name in ["base", "venv", "lib", "include", "bin", "site_packages",
"user_base", "user_site", "user_bin", "scratch"]:
real_name = "%s_path" % name
setattr(self, name, getattr(self, real_name) - self.base_path)
# Make sure temp_path is a Path object
self.temp_path = Path(self.temp_path)
# Ensure the tmp dir exists, things break horribly if it doesn't
self.temp_path.mkdir()
# create easy-install.pth in user_site, so we always have it updated
# instead of created
self.user_site_path.makedirs()
self.user_site_path.join("easy-install.pth").touch()
def _ignore_file(self, fn):
if fn.endswith('__pycache__') or fn.endswith(".pyc"):
result = True
else:
result = super(PipTestEnvironment, self)._ignore_file(fn)
return result
def run(self, *args, **kw):
if self.verbose:
print('>> running %s %s' % (args, kw))
cwd = kw.pop('cwd', None)
run_from = kw.pop('run_from', None)
assert not cwd or not run_from, "Don't use run_from; it's going away"
cwd = cwd or run_from or self.cwd
return TestPipResult(
super(PipTestEnvironment, self).run(cwd=cwd, *args, **kw),
verbose=self.verbose,
)
def pip(self, *args, **kwargs):
# On old versions of Python, urllib3/requests will raise a warning
# about the lack of an SSLContext. Expect it when running commands
# that will touch the outside world.
if (pyversion_tuple < (2, 7, 9) and
args and args[0] in ('search', 'install', 'download')):
kwargs['expect_stderr'] = True
# Python 2.6 is deprecated and we emit a warning on it.
if pyversion_tuple[:2] == (2, 6):
kwargs['expect_stderr'] = True
return self.run("pip", *args, **kwargs)
def pip_install_local(self, *args, **kwargs):
return self.pip(
"install", "--no-index",
"--find-links", path_to_url(os.path.join(DATA_DIR, "packages")),
*args, **kwargs
)
# FIXME ScriptTest does something similar, but only within a single
# ProcResult; this generalizes it so states can be compared across
# multiple commands. Maybe should be rolled into ScriptTest?
def diff_states(start, end, ignore=None):
"""
Differences two "filesystem states" as represented by dictionaries
of FoundFile and FoundDir objects.
Returns a dictionary with following keys:
``deleted``
Dictionary of files/directories found only in the start state.
``created``
Dictionary of files/directories found only in the end state.
``updated``
Dictionary of files whose size has changed (FIXME not entirely
reliable, but comparing contents is not possible because
FoundFile.bytes is lazy, and comparing mtime doesn't help if
we want to know if a file has been returned to its earlier
state).
Ignores mtime and other file attributes; only presence/absence and
size are considered.
"""
ignore = ignore or []
def prefix_match(path, prefix):
if path == prefix:
return True
prefix = prefix.rstrip(os.path.sep) + os.path.sep
return path.startswith(prefix)
start_keys = set([k for k in start.keys()
if not any([prefix_match(k, i) for i in ignore])])
end_keys = set([k for k in end.keys()
if not any([prefix_match(k, i) for i in ignore])])
deleted = dict([(k, start[k]) for k in start_keys.difference(end_keys)])
created = dict([(k, end[k]) for k in end_keys.difference(start_keys)])
updated = {}
for k in start_keys.intersection(end_keys):
if (start[k].size != end[k].size):
updated[k] = end[k]
return dict(deleted=deleted, created=created, updated=updated)
def assert_all_changes(start_state, end_state, expected_changes):
"""
Fails if anything changed that isn't listed in the
expected_changes.
start_state is either a dict mapping paths to
scripttest.[FoundFile|FoundDir] objects or a TestPipResult whose
files_before we'll test. end_state is either a similar dict or a
TestPipResult whose files_after we'll test.
Note: listing a directory means anything below
that directory can be expected to have changed.
"""
__tracebackhide__ = True
start_files = start_state
end_files = end_state
if isinstance(start_state, TestPipResult):
start_files = start_state.files_before
if isinstance(end_state, TestPipResult):
end_files = end_state.files_after
diff = diff_states(start_files, end_files, ignore=expected_changes)
if list(diff.values()) != [{}, {}, {}]:
raise TestFailure('Unexpected changes:\n' + '\n'.join(
[k + ': ' + ', '.join(v.keys()) for k, v in diff.items()]))
# Don't throw away this potentially useful information
return diff
def _create_test_package_with_subdirectory(script, subdirectory):
script.scratch_path.join("version_pkg").mkdir()
version_pkg_path = script.scratch_path / 'version_pkg'
version_pkg_path.join("version_pkg.py").write(textwrap.dedent("""
def main():
print('0.1')
"""))
version_pkg_path.join("setup.py").write(
textwrap.dedent("""
from setuptools import setup, find_packages
setup(name='version_pkg',
version='0.1',
packages=find_packages(),
py_modules=['version_pkg'],
entry_points=dict(console_scripts=['version_pkg=version_pkg:main']))
"""))
subdirectory_path = version_pkg_path.join(subdirectory)
subdirectory_path.mkdir()
subdirectory_path.join('version_subpkg.py').write(textwrap.dedent("""
def main():
print('0.1')
"""))
subdirectory_path.join('setup.py').write(
textwrap.dedent("""
from setuptools import setup, find_packages
setup(name='version_subpkg',
version='0.1',
packages=find_packages(),
py_modules=['version_subpkg'],
entry_points=dict(console_scripts=['version_pkg=version_subpkg:main']))
"""))
script.run('git', 'init', cwd=version_pkg_path)
script.run('git', 'add', '.', cwd=version_pkg_path)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path
)
return version_pkg_path
def _create_test_package_with_srcdir(script, name='version_pkg', vcs='git'):
script.scratch_path.join(name).mkdir()
version_pkg_path = script.scratch_path / name
subdir_path = version_pkg_path.join('subdir')
subdir_path.mkdir()
src_path = subdir_path.join('src')
src_path.mkdir()
pkg_path = src_path.join('pkg')
pkg_path.mkdir()
pkg_path.join('__init__.py').write('')
subdir_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup, find_packages
setup(
name='{name}',
version='0.1',
packages=find_packages(),
package_dir={{'': 'src'}},
)
""".format(name=name)))
return _vcs_add(script, version_pkg_path, vcs)
def _create_test_package(script, name='version_pkg', vcs='git'):
script.scratch_path.join(name).mkdir()
version_pkg_path = script.scratch_path / name
version_pkg_path.join("%s.py" % name).write(textwrap.dedent("""
def main():
print('0.1')
"""))
version_pkg_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup, find_packages
setup(
name='{name}',
version='0.1',
packages=find_packages(),
py_modules=['{name}'],
entry_points=dict(console_scripts=['{name}={name}:main'])
)
""".format(name=name)))
return _vcs_add(script, version_pkg_path, vcs)
def _vcs_add(script, version_pkg_path, vcs='git'):
if vcs == 'git':
script.run('git', 'init', cwd=version_pkg_path)
script.run('git', 'add', '.', cwd=version_pkg_path)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path,
)
elif vcs == 'hg':
script.run('hg', 'init', cwd=version_pkg_path)
script.run('hg', 'add', '.', cwd=version_pkg_path)
script.run(
'hg', 'commit', '-q',
'--user', 'pip <pypa-dev@googlegroups.com>',
'-m', 'initial version', cwd=version_pkg_path,
)
elif vcs == 'svn':
repo_url = _create_svn_repo(script, version_pkg_path)
script.run(
'svn', 'checkout', repo_url, 'pip-test-package',
cwd=script.scratch_path
)
checkout_path = script.scratch_path / 'pip-test-package'
# svn internally stores windows drives as uppercase; we'll match that.
checkout_path = checkout_path.replace('c:', 'C:')
version_pkg_path = checkout_path
elif vcs == 'bazaar':
script.run('bzr', 'init', cwd=version_pkg_path)
script.run('bzr', 'add', '.', cwd=version_pkg_path)
script.run(
'bzr', 'whoami', 'pip <pypa-dev@googlegroups.com>',
cwd=version_pkg_path)
script.run(
'bzr', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-m', 'initial version', cwd=version_pkg_path,
)
else:
raise ValueError('Unknown vcs: %r' % vcs)
return version_pkg_path
def _create_svn_repo(script, version_pkg_path):
repo_url = path_to_url(
script.scratch_path / 'pip-test-package-repo' / 'trunk')
script.run(
'svnadmin', 'create', 'pip-test-package-repo',
cwd=script.scratch_path
)
script.run(
'svn', 'import', version_pkg_path, repo_url,
'-m', 'Initial import of pip-test-package',
cwd=script.scratch_path
)
return repo_url
def _change_test_package_version(script, version_pkg_path):
version_pkg_path.join("version_pkg.py").write(textwrap.dedent('''\
def main():
print("some different version")'''))
script.run(
'git', 'clean', '-qfdx',
cwd=version_pkg_path,
expect_stderr=True,
)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'messed version',
cwd=version_pkg_path,
expect_stderr=True,
)
def assert_raises_regexp(exception, reg, run, *args, **kwargs):
"""Like assertRaisesRegexp in unittest"""
__tracebackhide__ = True
try:
run(*args, **kwargs)
assert False, "%s should have been thrown" % exception
except exception:
e = sys.exc_info()[1]
p = re.compile(reg)
assert p.search(str(e)), str(e)
@contextmanager
def requirements_file(contents, tmpdir):
"""Return a Path to a requirements file of given contents.
As long as the context manager is open, the requirements file will exist.
:param tmpdir: A Path to the folder in which to create the file
"""
path = tmpdir / 'reqs.txt'
path.write(contents)
yield path
path.remove()
def create_test_package_with_setup(script, **setup_kwargs):
assert 'name' in setup_kwargs, setup_kwargs
pkg_path = script.scratch_path / setup_kwargs['name']
pkg_path.mkdir()
pkg_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
kwargs = %r
setup(**kwargs)
""") % setup_kwargs)
return pkg_path
| mit |
gogozs/shadowsocks | tests/test_udp_src.py | 1009 | 2482 | #!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
| apache-2.0 |
ppwwyyxx/tensorflow | tensorflow/python/debug/lib/debug_graph_reconstruction_test.py | 4 | 7605 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the reconstruction of non-debugger-decorated GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase):
_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
super(ReconstructNonDebugGraphTest, self).setUp()
self._dump_dir = tempfile.mkdtemp()
self._debug_url = "file://" + self._dump_dir
ops.reset_default_graph()
def tearDown(self):
file_io.delete_recursively(self._dump_dir)
super(ReconstructNonDebugGraphTest, self).tearDown()
def _graphDefWithoutBlacklistedNodes(self, graph_def):
output_graph_def = graph_pb2.GraphDef()
for node in graph_def.node:
if node.op not in self._OP_TYPE_BLACKLIST:
new_node = output_graph_def.node.add()
new_node.CopyFrom(node)
if new_node.op == "Enter":
# The debugger sets parallel_iterations attribute of while-loop Enter
# nodes to 1 for debugging.
for attr_key in new_node.attr:
if attr_key == "parallel_iterations":
new_node.attr[attr_key].i = 1
elif new_node.op == "Switch":
# We don't check the inputs to Switch ops as their inputs may be
# Send/Recv nodes.
del new_node.input[:]
return output_graph_def
def _compareOriginalAndReconstructedGraphDefs(self,
sess,
fetches,
feed_dict=None,
expected_output=None):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
output = sess.run(fetches, feed_dict=feed_dict, options=run_options,
run_metadata=run_metadata)
if expected_output is not None:
self.assertAllClose(expected_output, output)
non_debug_graph_defs = run_metadata.partition_graphs
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_url)
run_metadata = config_pb2.RunMetadata()
output = sess.run(fetches, feed_dict=feed_dict, options=run_options,
run_metadata=run_metadata)
if expected_output is not None:
self.assertAllClose(expected_output, output)
dump = debug_data.DebugDumpDir(
self._dump_dir, partition_graphs=run_metadata.partition_graphs,
validate=True)
reconstructed = dump.reconstructed_non_debug_partition_graphs()
self.assertEqual(len(non_debug_graph_defs), len(reconstructed))
for i, non_debug_graph_def in enumerate(non_debug_graph_defs):
device_name = debug_graphs._infer_device_name(non_debug_graph_def)
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed[device_name]),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
# Test debug_graphs.reconstruct_non_debug_graph_def.
reconstructed_again = (
debug_graphs.reconstruct_non_debug_graph_def(
run_metadata.partition_graphs[i]))
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed_again),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
def testReconstructSimpleGraph(self):
with session.Session() as sess:
u = variables.Variable([12.0], name="u")
v = variables.Variable([30.0], name="v")
w = math_ops.add(u, v, name="w")
self.evaluate(u.initializer)
self.evaluate(v.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, w, expected_output=[42.0])
def testReconstructGraphWithControlEdge(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
with ops.control_dependencies([a]):
b = math_ops.add(a, a, name="b")
with ops.control_dependencies([a, b]):
c = math_ops.multiply(b, b, name="c")
self.evaluate(a.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, c, expected_output=400.0)
def testReonstructGraphWithCond(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
self.evaluate(x.initializer)
self.evaluate(y.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, cond, expected_output=21.0)
def testReconstructGraphWithWhileLoop(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
self._compareOriginalAndReconstructedGraphDefs(sess, loop)
def testReconstructGraphWithGradients(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
u = variables.Variable(12.0, name="u")
v = variables.Variable(30.0, name="v")
x = constant_op.constant(1.1, name="x")
toy_loss = x * (u - v)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
self.evaluate(u.initializer)
self.evaluate(v.initializer)
self._compareOriginalAndReconstructedGraphDefs(sess, train_op)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kevclarx/ansible | lib/ansible/plugins/action/eos_config.py | 126 | 4193 | #
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.eos import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
haoqili/MozSecWorld | vendor-local/packages/django-csp/csp/views.py | 1 | 1229 | import json
from django.core.mail import mail_admins
from django.http import HttpResponse, HttpResponseBadRequest
from django.template import loader, Context
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from csp import build_policy
@csrf_exempt
@require_POST
def report(request):
"""
Accept a Content Security Policy violation report and forward
the report via email to ADMINS.
"""
try:
violation = json.loads(request.raw_post_data)['csp-report']
except Exception:
return HttpResponseBadRequest()
data = {}
for key in violation:
data[key.replace('-', '_')] = violation[key]
c = Context(data)
t = loader.get_template('csp/email/report.ltxt')
body = t.render(c)
subject = 'CSP Violation: %s: %s' % (data['blocked_uri'],
data['violated_directive'])
mail_admins(subject, body)
return HttpResponse()
def policy(request):
"""
Returns a valid policy-uri, as an alternative to putting the whole
policy in the header.
"""
policy = build_policy()
return HttpResponse(policy, mimetype='text/x-content-security-policy')
| bsd-3-clause |
deepmind/launchpad | launchpad/flags.py | 1 | 1758 | # Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all Launchpad flags."""
from absl import flags
from launchpad import context
FLAGS = flags.FLAGS
# For inferring flag types only.
flags.DEFINE_string('lp_dummy_str', '', 'Internal implementation details.')
flags.DEFINE_float('lp_dummy_float', 0., 'Internal implementation details.')
flags.DEFINE_integer('lp_dummy_int', 0, 'Internal implementation details.')
flags.DEFINE_boolean('lp_dummy_bool', False, 'Internal implementation details.')
flags.DEFINE_list('lp_dummy_list', [], 'Internal implementation details.')
flags.DEFINE_enum('lp_dummy_enum', '', [''], 'Internal implementation details.')
_DEFAULT_LAUNCH_TYPE = context.LaunchType.LOCAL_MULTI_THREADING.value
flags.DEFINE_enum(
'lp_launch_type',
_DEFAULT_LAUNCH_TYPE, [t.value for t in context.LaunchType],
'How to launch a Launchpad program when launch() is called',
allow_override=True)
flags.DEFINE_string('tmux_open_window', None,
'Window in new Tmux session to switch to.')
flags.DEFINE_string('tmux_session_name', 'launchpad',
'Prefix session name to use for the Tmux session.')
| apache-2.0 |
filias/django | tests/annotations/tests.py | 22 | 19583 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, Sum, Value,
)
from django.db.models.functions import Lower
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual(set(p.last_name for p in people), {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Test that annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
| bsd-3-clause |
hungle90/flexx | flexx/pyscript/tests/test_parser1.py | 20 | 10770 | from pytest import raises
from flexx.util.testing import run_tests_if_main
from flexx.pyscript import JSError, py2js, evaljs, evalpy, Parser
from flexx import pyscript
def nowhitespace(s):
return s.replace('\n', '').replace('\t', '').replace(' ', '')
class TestParser(Parser):
def function_foo_foo(self, node):
return 'xxx'
def method_bar_bar(self, node, base):
return base
class TestTheParser:
def test_special_functions(self):
assert TestParser("foo_foo()").dump() == 'xxx;'
assert TestParser("bar_bar()").dump() == 'bar_bar();'
assert TestParser("xxx.bar_bar()").dump() == 'xxx;'
assert TestParser("xxx.foo_foo()").dump() == 'xxx.foo_foo();'
def test_exceptions(self):
raises(JSError, py2js, "foo(**kwargs)")
class TestExpressions:
""" Tests for single-line statements/expressions
"""
def test_special(self):
assert py2js('') == ''
assert py2js(' \n') == ''
def test_ops(self):
# Test code
assert py2js('2+3') == '2 + 3;' # Binary
assert py2js('2/3') == '2 / 3;'
assert py2js('not 2') == '!2;' # Unary
assert py2js('-(2+3)') == '-(2 + 3);'
assert py2js('True and False') == 'true && false;' # Boolean
# No parentices around names, numbers and strings
assert py2js('foo + bar') == "foo + bar;"
assert py2js('_foo3 + _bar4') == "_foo3 + _bar4;"
assert py2js('3 + 4') == "3 + 4;"
assert py2js('"abc" + "def"') == "'abc' + 'def';"
assert py2js("'abc' + 'def'") == "'abc' + 'def';"
assert py2js("'abc' + \"'def\"") == "'abc' + \"'def\";"
# But they should be if it gets more complex
assert py2js('foo + bar == 3') == "(foo + bar) == 3;"
# Test outcome
assert evalpy('2+3') == '5' # Binary
assert evalpy('6/3') == '2'
assert evalpy('4//3') == '1'
assert evalpy('2**8') == '256'
assert evalpy('not True') == 'false' # Unary
assert evalpy('- 3') == '-3'
assert evalpy('True and False') == 'false' # Boolean
assert evalpy('True or False') == 'true'
# Bug
assert evalpy('(9-3-3)/3') == '1'
# string formatting
assert evalpy('"%s" % "bar"') == 'bar'
assert evalpy('"-%s-" % "bar"') == '-bar-'
assert evalpy('"foo %s foo" % "bar"') == 'foo bar foo'
assert evalpy('"x %i" % 6') == 'x 6'
assert evalpy('"x %f" % 6') == 'x 6'
assert evalpy('"%s: %f" % ("value", 6)') == 'value: 6'
assert evalpy('"%r: %r" % ("value", 6)') == '"value": 6'
def test_comparisons(self):
assert py2js('4 > 3') == '4 > 3;'
assert py2js('4 is 3') == '4 === 3;'
assert evalpy('4 > 4') == 'false'
assert evalpy('4 >= 4') == 'true'
assert evalpy('4 < 3') == 'false'
assert evalpy('4 <= 4') == 'true'
assert evalpy('4 == 3') == 'false'
assert evalpy('4 != 3') == 'true'
assert evalpy('4 == "4"') == 'true' # yuck!
assert evalpy('4 is "4"') == 'false'
assert evalpy('4 is not "4"') == 'true'
assert evalpy('"c" in "abcd"') == 'true'
assert evalpy('"x" in "abcd"') == 'false'
assert evalpy('"x" not in "abcd"') == 'true'
assert evalpy('3 in [1,2,3,4]') == 'true'
assert evalpy('9 in [1,2,3,4]') == 'false'
assert evalpy('9 not in [1,2,3,4]') == 'true'
assert evalpy('"bar" in {"foo": 3}') == 'false'
assert evalpy('"foo" in {"foo": 3}') == 'true'
def test_truthfulness_of_basic_types(self):
# Numbers
assert evalpy('"T" if (1) else "F"') == 'T'
assert evalpy('"T" if (0) else "F"') == 'F'
# Strings
assert evalpy('"T" if ("a") else "F"') == 'T'
assert evalpy('"T" if ("") else "F"') == 'F'
# None - undefined
assert evalpy('None is null') == 'true'
assert evalpy('None is undefined') == 'false'
assert evalpy('undefined is undefined') == 'true'
def test_truthfulness_of_array_and_dict(self):
# Arrays
assert evalpy('bool([1])') == 'true'
assert evalpy('bool([])') == 'false'
#
assert evalpy('"T" if ([1, 2, 3]) else "F"') == 'T'
assert evalpy('"T" if ([]) else "F"') == 'F'
#
assert evalpy('if [1]: "T"\nelse: "F"') == 'T'
assert evalpy('if []: "T"\nelse: "F"') == 'F'
#
assert evalpy('if [1] and 1: "T"\nelse: "F"') == 'T'
assert evalpy('if [] and 1: "T"\nelse: "F"') == 'F'
assert evalpy('if [] or 1: "T"\nelse: "F"') == 'T'
#
assert evalpy('[2] or 42') == '[ 2 ]'
assert evalpy('[] or 42') == '42'
# Dicts
assert evalpy('bool({1:2})') == 'true'
assert evalpy('bool({})') == 'false'
#
assert evalpy('"T" if ({"foo": 3}) else "F"') == 'T'
assert evalpy('"T" if ({}) else "F"') == 'F'
#
assert evalpy('if {1:2}: "T"\nelse: "F"') == 'T'
assert evalpy('if {}: "T"\nelse: "F"') == 'F'
#
assert evalpy('if {1:2} and 1: "T"\nelse: "F"') == 'T'
assert evalpy('if {} and 1: "T"\nelse: "F"') == 'F'
assert evalpy('if {} or 1: "T"\nelse: "F"') == 'T'
#
assert evalpy('{1:2} or 42') == "{ '1': 2 }"
assert evalpy('{} or 42') == '42'
# Eval extra types
assert evalpy('null or 42') == '42'
assert evalpy('ArrayBuffer(4) or 42') != '42'
# No bools
assert py2js('if foo: pass').count('_truthy')
assert py2js('if foo.length: pass').count('_truthy') == 0
assert py2js('if 3: pass').count('_truthy') == 0
assert py2js('if True: pass').count('_truthy') == 0
assert py2js('if a == 3: pass').count('_truthy') == 0
assert py2js('if a is 3: pass').count('_truthy') == 0
def test_indexing_and_slicing(self):
c = 'a = [1, 2, 3, 4, 5]\n'
# Indexing
assert evalpy(c + 'a[2]') == '3'
assert evalpy(c + 'a[-2]') == '4'
# Slicing
assert evalpy(c + 'a[:]') == '[ 1, 2, 3, 4, 5 ]'
assert evalpy(c + 'a[1:-1]') == '[ 2, 3, 4 ]'
def test_assignments(self):
assert py2js('foo = 3') == 'var foo;\nfoo = 3;' # with var
assert py2js('foo.bar = 3') == 'foo.bar = 3;' # without var
code = py2js('foo = 3; bar = 4') # define both
assert code.count('var') == 1
code = py2js('foo = 3; foo = 4') # only define first time
assert code.count('var') == 1
code = py2js('foo = bar = 3') # multiple assignment
assert 'foo = bar = 3' in code
assert 'var bar, foo' in code # alphabetic order
# self -> this
assert py2js('self') == 'this;'
assert py2js('self.foo') == 'this.foo;'
# Indexing
assert evalpy('a=[0,0]\na[0]=2\na[1]=3\na', False) == '[2,3]'
# Tuple unpacking
evalpy('x=[1,2,3]\na, b, c = x\nb', False) == '2'
evalpy('a,b,c = [1,2,3]\nc,b,a = a,b,c\n[a,b,c]', False) == '[3,2,1]'
# Class variables don't get a var
code = py2js('class Foo:\n bar=3\n bar = bar + 1')
assert code.count('bar') == 3
assert code.count('Foo.prototype.bar') == 3
def test_aug_assignments(self):
# assign + bin op
assert evalpy('x=5; x+=1; x') == '6'
assert evalpy('x=5; x/=2; x') == '2.5'
assert evalpy('x=5; x**=2; x') == '25'
assert evalpy('x=5; x//=2; x') == '2'
def test_basic_types(self):
assert py2js('True') == 'true;'
assert py2js('False') == 'false;'
assert py2js('None') == 'null;'
assert py2js('"bla\\"bla"') == "'bla\"bla';"
assert py2js('3') == '3;'
assert py2js('3.1415') == '3.1415;'
assert py2js('[1,2,3]') == '[1, 2, 3];'
assert py2js('(1,2,3)') == '[1, 2, 3];'
assert py2js('{foo: 3, bar: 4}') == '{foo: 3, bar: 4};'
def test_ignore_import_of_compiler(self):
modname = pyscript.__name__
assert py2js('from %s import x, y, z\n42' % modname) == '42;'
def test_funcion_call(self):
jscode = 'var foo = function (x, y) {return x+y;};'
assert evaljs(jscode + py2js('foo(2,2)')) == '4'
assert evaljs(jscode + py2js('foo("so ", True)')) == 'so true'
assert evaljs(jscode + py2js('a=[1,2]; foo(*a)')) == '3'
assert evaljs(jscode + py2js('a=[1,2]; foo(7, *a)')) == '8'
# Test super (is tested for real in test_parser3.py
assert evalpy('d={"_base_class": console};d._base_class.log(4)') == '4'
assert evalpy('d={"_base_class": console};d._base_class.log()') == ''
jscode = 'var foo = function () {return this.val};'
jscode += 'var d = {"foo": foo, "val": 7};\n'
assert evaljs(jscode + py2js('d["foo"]()')) == '7'
assert evaljs(jscode + py2js('d["foo"](*[3, 4])')) == '7'
def test_instantiation(self):
# Test creating instances
assert 'new' in py2js('a = Foo()')
assert 'new' in py2js('a = x.Foo()')
assert 'new' not in py2js('a = foo()')
assert 'new' not in py2js('a = _foo()')
assert 'new' not in py2js('a = _Foo()')
assert 'new' not in py2js('a = this.Foo()')
assert 'new' not in py2js('a = JSON.stringify(x)')
jscode = 'function Foo() {this.x = 3}\nx=1;\n'
assert evaljs(jscode + py2js('a=Foo()\nx')) == '1'
def test_pass(self):
assert py2js('pass') == ''
def test_delete(self):
assert evalpy('d={}\nd.foo=3\n\nd') == "{ foo: 3 }"
assert evalpy('d={}\nd.foo=3\ndel d.foo\nd') == '{}'
assert evalpy('d={}\nd.foo=3\nd.bar=3\ndel d.foo\nd') == '{ bar: 3 }'
assert evalpy('d={}\nd.foo=3\nd.bar=3\ndel d.foo, d["bar"]\nd') == '{}'
class TestModules:
def test_module(self):
code = Parser('"docstring"\nfoo=3;bar=4;_priv=0;', 'mymodule').dump()
# Has docstring
assert code.count('// docstring') == 1
# Test that global variables exist
assert evaljs(code+'mymodule.foo+mymodule.bar') == '7'
# And privates do not
assert evaljs(code+'mymodule._priv===undefined') == 'true'
run_tests_if_main()
# if __name__ == '__main__':
# t = TestClasses()
# t.test_class()
# t.test_inheritance()
| bsd-2-clause |
scorphus/thefuck | thefuck/utils.py | 2 | 9186 | import atexit
import os
import pickle
import re
import shelve
import sys
import six
from decorator import decorator
from difflib import get_close_matches as difflib_get_close_matches
from functools import wraps
from .logs import warn, exception
from .conf import settings
from .system import Path
DEVNULL = open(os.devnull, 'w')
if six.PY2:
import anydbm
shelve_open_error = anydbm.error
else:
import dbm
shelve_open_error = dbm.error
def memoize(fn):
"""Caches previous calls to the function."""
memo = {}
@wraps(fn)
def wrapper(*args, **kwargs):
if not memoize.disabled:
key = pickle.dumps((args, kwargs))
if key not in memo:
memo[key] = fn(*args, **kwargs)
value = memo[key]
else:
# Memoize is disabled, call the function
value = fn(*args, **kwargs)
return value
return wrapper
memoize.disabled = False
@memoize
def which(program):
"""Returns `program` path or `None`."""
try:
from shutil import which
return which(program)
except ImportError:
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def default_settings(params):
"""Adds default values to settings if it not presented.
Usage:
@default_settings({'apt': '/usr/bin/apt'})
def match(command):
print(settings.apt)
"""
def _default_settings(fn, command):
for k, w in params.items():
settings.setdefault(k, w)
return fn(command)
return decorator(_default_settings)
def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True):
"""Returns closest match or just first from possibilities."""
possibilities = list(possibilities)
try:
return difflib_get_close_matches(word, possibilities, 1, cutoff)[0]
except IndexError:
if fallback_to_first:
return possibilities[0]
def get_close_matches(word, possibilities, n=None, cutoff=0.6):
"""Overrides `difflib.get_close_match` to control argument `n`."""
if n is None:
n = settings.num_close_matches
return difflib_get_close_matches(word, possibilities, n, cutoff)
@memoize
def get_all_executables():
from thefuck.shells import shell
def _safe(fn, fallback):
try:
return fn()
except OSError:
return fallback
tf_alias = get_alias()
tf_entry_points = ['thefuck', 'fuck']
bins = [exe.name.decode('utf8') if six.PY2 else exe.name
for path in os.environ.get('PATH', '').split(os.pathsep)
for exe in _safe(lambda: list(Path(path).iterdir()), [])
if not _safe(exe.is_dir, True)
and exe.name not in tf_entry_points]
aliases = [alias.decode('utf8') if six.PY2 else alias
for alias in shell.get_aliases() if alias != tf_alias]
return bins + aliases
def replace_argument(script, from_, to):
"""Replaces command line argument."""
replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to),
script, count=1)
if replaced_in_the_end != script:
return replaced_in_the_end
else:
return script.replace(
u' {} '.format(from_), u' {} '.format(to), 1)
@decorator
def eager(fn, *args, **kwargs):
return list(fn(*args, **kwargs))
@eager
def get_all_matched_commands(stderr, separator='Did you mean'):
if not isinstance(separator, list):
separator = [separator]
should_yield = False
for line in stderr.split('\n'):
for sep in separator:
if sep in line:
should_yield = True
break
else:
if should_yield and line:
yield line.strip()
def replace_command(command, broken, matched):
"""Helper for *_no_command rules."""
new_cmds = get_close_matches(broken, matched, cutoff=0.1)
return [replace_argument(command.script, broken, new_cmd.strip())
for new_cmd in new_cmds]
@memoize
def is_app(command, *app_names, **kwargs):
"""Returns `True` if command is call to one of passed app names."""
at_least = kwargs.pop('at_least', 0)
if kwargs:
raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys()))
if len(command.script_parts) > at_least:
return command.script_parts[0] in app_names
return False
def for_app(*app_names, **kwargs):
"""Specifies that matching script is for on of app names."""
def _for_app(fn, command):
if is_app(command, *app_names, **kwargs):
return fn(command)
else:
return False
return decorator(_for_app)
class Cache(object):
"""Lazy read cache and save changes at exit."""
def __init__(self):
self._db = None
def _init_db(self):
try:
self._setup_db()
except Exception:
exception("Unable to init cache", sys.exc_info())
self._db = {}
def _setup_db(self):
cache_dir = self._get_cache_dir()
cache_path = Path(cache_dir).joinpath('thefuck').as_posix()
try:
self._db = shelve.open(cache_path)
except shelve_open_error + (ImportError,):
# Caused when switching between Python versions
warn("Removing possibly out-dated cache")
os.remove(cache_path)
self._db = shelve.open(cache_path)
atexit.register(self._db.close)
def _get_cache_dir(self):
default_xdg_cache_dir = os.path.expanduser("~/.cache")
cache_dir = os.getenv("XDG_CACHE_HOME", default_xdg_cache_dir)
# Ensure the cache_path exists, Python 2 does not have the exist_ok
# parameter
try:
os.makedirs(cache_dir)
except OSError:
if not os.path.isdir(cache_dir):
raise
return cache_dir
def _get_mtime(self, path):
try:
return str(os.path.getmtime(path))
except OSError:
return '0'
def _get_key(self, fn, depends_on, args, kwargs):
parts = (fn.__module__, repr(fn).split('at')[0],
depends_on, args, kwargs)
return str(pickle.dumps(parts))
def get_value(self, fn, depends_on, args, kwargs):
if self._db is None:
self._init_db()
depends_on = [Path(name).expanduser().absolute().as_posix()
for name in depends_on]
key = self._get_key(fn, depends_on, args, kwargs)
etag = '.'.join(self._get_mtime(path) for path in depends_on)
if self._db.get(key, {}).get('etag') == etag:
return self._db[key]['value']
else:
value = fn(*args, **kwargs)
self._db[key] = {'etag': etag, 'value': value}
return value
_cache = Cache()
def cache(*depends_on):
"""Caches function result in temporary file.
Cache will be expired when modification date of files from `depends_on`
will be changed.
Only functions should be wrapped in `cache`, not methods.
"""
def cache_decorator(fn):
@memoize
@wraps(fn)
def wrapper(*args, **kwargs):
if cache.disabled:
return fn(*args, **kwargs)
else:
return _cache.get_value(fn, depends_on, args, kwargs)
return wrapper
return cache_decorator
cache.disabled = False
def get_installation_info():
import pkg_resources
return pkg_resources.require('thefuck')[0]
def get_alias():
return os.environ.get('TF_ALIAS', 'fuck')
@memoize
def get_valid_history_without_current(command):
def _not_corrected(history, tf_alias):
"""Returns all lines from history except that comes before `fuck`."""
previous = None
for line in history:
if previous is not None and line != tf_alias:
yield previous
previous = line
if history:
yield history[-1]
from thefuck.shells import shell
history = shell.get_history()
tf_alias = get_alias()
executables = set(get_all_executables())\
.union(shell.get_builtin_commands())
return [line for line in _not_corrected(history, tf_alias)
if not line.startswith(tf_alias) and not line == command.script
and line.split(' ')[0] in executables]
def format_raw_script(raw_script):
"""Creates single script from a list of script parts.
:type raw_script: [basestring]
:rtype: basestring
"""
if six.PY2:
script = ' '.join(arg.decode('utf-8') for arg in raw_script)
else:
script = ' '.join(raw_script)
return script.strip()
| mit |
hustodemon/spacewalk | backend/server/test/unit-test/rhnSQL/test_server_registration.py | 2 | 9344 | #!/usr/bin/python
#
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Tests verious codepaths for server registration
#
import sys
import time
import unittest
from spacewalk.common import rhnFlags
from spacewalk.common.rhnConfig import initCFG, CFG
from spacewalk.server import rhnSQL, rhnServer, rhnChannel, rhnUser
from spacewalk.server.handlers.xmlrpc import registration
import misc_functions
DB_SETTINGS = misc_functions.db_settings("oracle")
class Tests(unittest.TestCase):
_channel = 'redhat-advanced-server-i386'
_channel_family = 'rhel-as'
_entitlements = {
'enterprise_entitled': None
}
def setUp(self):
initCFG("server.xmlrpc")
rhnSQL.initDB(
backend="oracle",
username=DB_SETTINGS["user"],
password=DB_SETTINGS["password"],
database=DB_SETTINGS["database"]
)
rhnSQL.clear_log_id()
def tearDown(self):
# Roll back any unsaved data
rhnSQL.rollback()
def test_new_server_1(self):
"Test normal server registration, with username/password"
u, password = self._create_new_user()
username = u.contact['login']
org_id = u.contact['org_id']
entitlements = self._entitlements
os_release = "2.1as"
t = misc_functions.create_activation_key(
org_id=u.contact['org_id'],
entitlement_level=entitlements,
user_id=u.getid(),
release=os_release
)
params = build_new_system_params_with_username(username=username,
password=password, os_release=os_release)
system_id = register_new_system(params)
rhnSQL.commit()
s = rhnServer.get(system_id)
self.assertNotEqual(s, None)
server_id = s.getid()
channels = rhnChannel.channels_for_server(server_id)
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['label'], self._channel)
def test_new_server_token_1(self):
"test registration with token"
u, _ = self._create_new_user()
org_id = u.contact['org_id']
entitlements = self._entitlements
os_release = "2.1as"
t = misc_functions.create_activation_key(
org_id=u.contact['org_id'],
entitlement_level=entitlements,
user_id=u.getid(),
release=os_release
)
token = t.get_token()
params = build_new_system_params_with_token(
token=token,
os_release=os_release
)
system_id = register_new_system(params)
rhnSQL.commit()
s = rhnServer.get(system_id)
self.assertNotEqual(s, None)
def test_new_server_token_2(self):
"Test registration with token that specifies a base channel"
# FIXME: the test fails because there's no channel associated with the
# freshly created Server: rhnServerChannel is not populated by the
# registration code.
u, _ = self._create_new_user()
org_id = u.contact['org_id']
base_channel = 'rhel-i386-as-3'
entitlements = self._entitlements
os_release = "2.1as"
t = misc_functions.create_activation_key(org_id=u.contact['org_id'],
entitlement_level=entitlements, user_id=u.getid(),
channels=[base_channel], release=os_release)
token = t.get_token()
params = build_new_system_params_with_token(
token=token,
os_release=os_release
)
system_id = register_new_system(params)
rhnSQL.commit()
s = rhnServer.get(system_id)
self.assertNotEqual(s, None)
server_id = s.getid()
channels = rhnChannel.channels_for_server(server_id)
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['label'], base_channel)
def test_new_server_reactivation_token_1(self):
"Test server re-registration"
u, password = self._create_new_user()
username = u.contact['login']
os_release = "2.1AS"
params = build_new_system_params_with_username(username=username,
password=password, os_release="2.1AS")
system_id = register_new_system(params)
rhnSQL.commit()
s1 = rhnServer.get(system_id)
self.assertNotEqual(s1, None)
server_id_1 = s1.getid()
groups1 = misc_functions.fetch_server_groups(server_id_1)
# Build a re-registration token
base_channel = 'rhel-i386-as-3'
entitlements = self._entitlements
t = misc_functions.create_activation_key(org_id=u.contact['org_id'],
entitlement_level=entitlements, user_id=u.getid(),
channels=[base_channel], server_id=server_id_1, release=os_release)
token = t.get_token()
params = build_new_system_params_with_token(
token=token,
os_release=os_release
)
system_id = register_new_system(params)
rhnSQL.commit()
s2 = rhnServer.get(system_id)
server_id_2 = s2.getid()
groups2 = misc_functions.fetch_server_groups(server_id_2)
self.assertNotEqual(s2, None)
self.assertEqual(server_id_1, server_id_2)
# Should be subscribed to the same groups
self.assertEqual(groups1, groups2)
def test_new_server_multiple_tokens_1(self):
"""Test registration with multiple activation tokens
Resulting server group is the union of all server groups from all
tokens
"""
u, _ = self._create_new_user()
org_id = u.contact['org_id']
entitlements = self._entitlements
os_release = "2.1AS"
t = misc_functions.create_activation_key(org_id=u.contact['org_id'],
entitlement_level=entitlements, user_id=u.getid(), release=os_release)
token1 = t.get_token()
sg1 = t.get_server_groups()
t = misc_functions.create_activation_key(org_id=u.contact['org_id'],
entitlement_level=entitlements, user_id=u.getid(), release=os_release)
token2 = t.get_token()
sg2 = t.get_server_groups()
token = token1 + ',' + token2
params = build_new_system_params_with_token(token=token,
os_release=os_release)
system_id = register_new_system(params)
rhnSQL.commit()
s = rhnServer.get(system_id)
self.assertNotEqual(s, None)
server_id = s.getid()
sgs = misc_functions.fetch_server_groups(server_id)
sgstgt = sg1 + sg2
sgstgt.sort()
self.assertEqual(sgs, sgstgt)
def _create_new_user(self):
# Create new org
org_id = misc_functions.create_new_org()
users_unencrypted_password = "unittest-password-%.3f" % time.time()
# Grant entitlements to the org
misc_functions.grant_entitlements(org_id, 'enterprise_entitled', 1)
misc_functions.grant_channel_family_entitlements(
org_id,
"%s-%.3f" % (self._channel_family, time.time()),
1
)
# Create new user
u = misc_functions.create_new_user(
org_id=org_id,
roles=['org_admin'],
password=users_unencrypted_password,
encrypt_password=CFG.encrypted_passwords
)
return u, users_unencrypted_password
class Counter:
_counter = 0
def value(self):
val = self._counter
self._counter = val + 1
return val
def build_new_system_params_with_username(**kwargs):
val = Counter().value()
rnd_string = "%d-%d" % (int(time.time()), val)
params = {
'os_release': '9',
'architecture': 'i686-redhat-linux',
'profile_name': "unittest server " + rnd_string,
'username': 'no such user',
'password': 'no such password',
}
params.update(kwargs)
if params.has_key('token'):
del params['token']
return params
def build_new_system_params_with_token(**kwargs):
params = {
'token': kwargs.get('token', "no such token"),
}
params.update(build_new_system_params_with_username(**kwargs))
del params['username']
del params['password']
return params
def register_new_system(params):
rhnFlags.reset()
return registration.Registration().new_system(params)
if __name__ == '__main__':
sys.exit(unittest.main() or 0)
| gpl-2.0 |
dhxkgozj/DirEngine | lib/pyelftools/elftools/construct/lib/container.py | 23 | 3949 | """
Various containers.
"""
from collections import MutableMapping
from pprint import pformat
def recursion_lock(retval, lock_name = "__recursion_lock__"):
def decorator(func):
def wrapper(self, *args, **kw):
if getattr(self, lock_name, False):
return retval
setattr(self, lock_name, True)
try:
return func(self, *args, **kw)
finally:
setattr(self, lock_name, False)
wrapper.__name__ = func.__name__
return wrapper
return decorator
class Container(MutableMapping):
"""
A generic container of attributes.
Containers are the common way to express parsed data.
"""
def __init__(self, **kw):
self.__dict__ = kw
# The core dictionary interface.
def __getitem__(self, name):
return self.__dict__[name]
def __delitem__(self, name):
del self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def keys(self):
return self.__dict__.keys()
def __len__(self):
return len(self.__dict__.keys())
# Extended dictionary interface.
def update(self, other):
self.__dict__.update(other)
__update__ = update
def __contains__(self, value):
return value in self.__dict__
# Rich comparisons.
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
return False
def __ne__(self, other):
return not self == other
# Copy interface.
def copy(self):
return self.__class__(**self.__dict__)
__copy__ = copy
# Iterator interface.
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.__dict__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.__dict__))
class FlagsContainer(Container):
"""
A container providing pretty-printing for flags.
Only set flags are displayed.
"""
@recursion_lock("<...>")
def __str__(self):
d = dict((k, self[k]) for k in self
if self[k] and not k.startswith("_"))
return "%s(%s)" % (self.__class__.__name__, pformat(d))
class ListContainer(list):
"""
A container for lists.
"""
__slots__ = ["__recursion_lock__"]
@recursion_lock("[...]")
def __str__(self):
return pformat(self)
class LazyContainer(object):
__slots__ = ["subcon", "stream", "pos", "context", "_value"]
def __init__(self, subcon, stream, pos, context):
self.subcon = subcon
self.stream = stream
self.pos = pos
self.context = context
self._value = NotImplemented
def __eq__(self, other):
try:
return self._value == other._value
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.__pretty_str__()
def __pretty_str__(self, nesting = 1, indentation = " "):
if self._value is NotImplemented:
text = "<unread>"
elif hasattr(self._value, "__pretty_str__"):
text = self._value.__pretty_str__(nesting, indentation)
else:
text = str(self._value)
return "%s: %s" % (self.__class__.__name__, text)
def read(self):
self.stream.seek(self.pos)
return self.subcon._parse(self.stream, self.context)
def dispose(self):
self.subcon = None
self.stream = None
self.context = None
self.pos = None
def _get_value(self):
if self._value is NotImplemented:
self._value = self.read()
return self._value
value = property(_get_value)
has_value = property(lambda self: self._value is not NotImplemented)
| bsd-3-clause |
georgewhewell/CouchPotatoServer | libs/html5lib/trie/py.py | 817 | 1763 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| gpl-3.0 |
altsen/diandiyun-platform | common/djangoapps/django_comment_common/tests.py | 22 | 2121 | from django.test import TestCase
from django_comment_common.models import Role
from student.models import CourseEnrollment, User
class RoleAssignmentTest(TestCase):
"""
Basic checks to make sure our Roles get assigned and unassigned as students
are enrolled and unenrolled from a course.
"""
def setUp(self):
# Check a staff account because those used to get the Moderator role
self.staff_user = User.objects.create_user(
"patty",
"patty@fake.edx.org",
)
self.staff_user.is_staff = True
self.student_user = User.objects.create_user(
"hacky",
"hacky@fake.edx.org"
)
self.course_id = "edX/Fake101/2012"
CourseEnrollment.enroll(self.staff_user, self.course_id)
CourseEnrollment.enroll(self.student_user, self.course_id)
def test_enrollment_auto_role_creation(self):
student_role = Role.objects.get(
course_id=self.course_id,
name="Student"
)
self.assertEqual([student_role], list(self.staff_user.roles.all()))
self.assertEqual([student_role], list(self.student_user.roles.all()))
# The following was written on the assumption that unenrolling from a course
# should remove all forum Roles for that student for that course. This is
# not necessarily the case -- please see comments at the top of
# django_comment_client.models.assign_default_role(). Leaving it for the
# forums team to sort out.
#
# def test_unenrollment_auto_role_removal(self):
# another_student = User.objects.create_user("sol", "sol@fake.edx.org")
# CourseEnrollment.enroll(another_student, self.course_id)
#
# CourseEnrollment.unenroll(self.student_user, self.course_id)
# # Make sure we didn't delete the actual Role
# student_role = Role.objects.get(
# course_id=self.course_id,
# name="Student"
# )
# self.assertNotIn(student_role, self.student_user.roles.all())
# self.assertIn(student_role, another_student.roles.all())
| agpl-3.0 |
cmvelo/ansible-modules-extras | windows/win_iis_webapplication.py | 153 | 1896 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_iis_webapplication
version_added: "2.0"
short_description: Configures a IIS Web application.
description:
- Creates, Removes and configures a IIS Web applications
options:
name:
description:
- Name of the Web applicatio
required: true
default: null
aliases: []
site:
description:
- Name of the site on which the application is created.
required: true
default: null
aliases: []
state:
description:
- State of the web application
choices:
- present
- absent
required: false
default: null
aliases: []
physical_path:
description:
- The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist.
required: false
default: null
aliases: []
application_pool:
description:
- The application pool in which the new site executes.
required: false
default: null
aliases: []
author: Henrik Wallström
'''
EXAMPLES = '''
$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host
'''
| gpl-3.0 |
KohlsTechnology/ansible | lib/ansible/plugins/callback/osx_say.py | 60 | 2867 | # (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: osx_say
type: notification
requirements:
- whitelising in configuration
- the '/usr/bin/say' command line program (standard on OS X)
short_description: oneline Ansible screen output
version_added: historical
description:
- This plugin will use the 'say' program to "speak" about play events.
'''
import subprocess
import os
from ansible.plugins.callback import CallbackBase
FAILED_VOICE = "Zarvox"
REGULAR_VOICE = "Trinoids"
HAPPY_VOICE = "Cellos"
LASER_VOICE = "Princess"
SAY_CMD = "/usr/bin/say"
class CallbackModule(CallbackBase):
"""
makes Ansible much more exciting on OS X.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'osx_say'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)))
def say(self, msg, voice):
subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
def runner_on_failed(self, host, res, ignore_errors=False):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
self.say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
self.say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
self.say("pew", LASER_VOICE)
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
self.say("Starting task: %s" % name, REGULAR_VOICE)
else:
self.say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", REGULAR_VOICE)
def playbook_on_play_start(self, name):
self.say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", HAPPY_VOICE)
| gpl-3.0 |
putcn/Paddle | tools/timeline.py | 4 | 7120 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
import unittest
import google.protobuf.text_format as text_format
import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--profile_path',
type=str,
default='',
help='Input profile file name. If there are multiple file, the format '
'should be trainer1=file1,trainer2=file2,ps=file3')
parser.add_argument(
'--timeline_path', type=str, default='', help='Output timeline file name.')
args = parser.parse_args()
class _ChromeTraceFormatter(object):
def __init__(self):
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class Timeline(object):
def __init__(self, profile_dict):
self._profile_dict = profile_dict
self._pid = 0
self._devices = dict()
self._chrome_trace = _ChromeTraceFormatter()
def _allocate_pid(self):
cur_pid = self._pid
self._pid += 1
return cur_pid
def _allocate_pids(self):
for k, profile_pb in self._profile_dict.iteritems():
for event in profile_pb.events:
if event.type == profiler_pb2.Event.CPU:
if (k, event.device_id, "CPU") not in self._devices:
pid = self._allocate_pid()
self._devices[(k, event.device_id, "CPU")] = pid
self._chrome_trace.emit_pid("%s:cpu:block:%d" %
(k, event.device_id), pid)
elif event.type == profiler_pb2.Event.GPUKernel:
if (k, event.device_id, "GPUKernel") not in self._devices:
pid = self._allocate_pid()
self._devices[(k, event.device_id, "GPUKernel")] = pid
self._chrome_trace.emit_pid("%s:gpu:%d" %
(k, event.device_id), pid)
def _allocate_events(self):
for k, profile_pb in self._profile_dict.iteritems():
for event in profile_pb.events:
if event.type == profiler_pb2.Event.CPU:
type = "CPU"
elif event.type == profiler_pb2.Event.GPUKernel:
type = "GPUKernel"
pid = self._devices[(k, event.device_id, type)]
args = {'name': event.name}
if event.memcopy.bytes > 0:
args = {'mem_bytes': event.memcopy.bytes}
# TODO(panyx0718): Chrome tracing only handles ms. However, some
# ops takes micro-seconds. Hence, we keep the ns here.
self._chrome_trace.emit_region(
event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid,
event.sub_device_id, 'Op', event.name, args)
def generate_chrome_trace(self):
self._allocate_pids()
self._allocate_events()
return self._chrome_trace.format_to_string()
profile_path = '/tmp/profile'
if args.profile_path:
profile_path = args.profile_path
timeline_path = '/tmp/timeline'
if args.timeline_path:
timeline_path = args.timeline_path
profile_paths = profile_path.split(',')
profile_dict = dict()
if len(profile_paths) == 1:
with open(profile_path, 'r') as f:
profile_s = f.read()
profile_pb = profiler_pb2.Profile()
profile_pb.ParseFromString(profile_s)
profile_dict['trainer'] = profile_pb
else:
for profile_path in profile_paths:
k, v = profile_path.split('=')
with open(v, 'r') as f:
profile_s = f.read()
profile_pb = profiler_pb2.Profile()
profile_pb.ParseFromString(profile_s)
profile_dict[k] = profile_pb
tl = Timeline(profile_dict)
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace())
| apache-2.0 |
dmilith/SublimeText3-dmilith | Packages/sublime_lib/st3/sublime_lib/_util/enum.py | 1 | 1739 | from functools import partial
from .._compat.enum import EnumMeta, Enum, Flag
from .._compat.typing import Any, Callable, Optional
__all__ = ['ExtensibleConstructorMeta', 'construct_with_alternatives', 'construct_union']
class ExtensibleConstructorMeta(EnumMeta):
def __call__(cls, *args: Any, **kwargs: Any) -> Any:
return cls.__new__(cls, *args, **kwargs) # type: ignore
def extend_constructor(
constructor: Callable[..., Enum]
) -> Callable[[EnumMeta], EnumMeta]:
def decorator(cls: EnumMeta) -> EnumMeta:
next_constructor = partial(cls.__new__, cls)
def __new__(cls: EnumMeta, *args: Any, **kwargs: Any) -> Enum:
return constructor(next_constructor, cls, *args, **kwargs)
cls.__new__ = __new__ # type: ignore
return cls
return decorator
def construct_with_alternatives(
provider: Callable[..., Optional[Enum]]
) -> Callable[[EnumMeta], EnumMeta]:
def constructor(next_constructor: Callable[..., Enum], cls: EnumMeta,
*args: Any, **kwargs: Any) -> Enum:
try:
return next_constructor(*args, **kwargs)
except ValueError:
result = provider(cls, *args, **kwargs)
if result is None:
raise
else:
return result
return extend_constructor(constructor)
def _construct_union(
next_constructor: Callable[[Any], Flag],
cls: ExtensibleConstructorMeta,
*args: Any
) -> Any:
if args:
ret, *rest = iter(next_constructor(arg) for arg in args)
for value in rest:
ret |= value
return ret
else:
return next_constructor(0)
construct_union = extend_constructor(_construct_union)
| mit |
brijeshkesariya/odoo | addons/crm/validate_email.py | 462 | 5978 | # RFC 2822 - style email validation for Python
# (c) 2012 Syrus Akbary <me@syrusakbary.com>
# Extended from (c) 2011 Noel Bush <noel@aitools.org>
# for support of mx and user check
# This code is made available to you under the GNU LGPL v3.
#
# This module provides a single method, valid_email_address(),
# which returns True or False to indicate whether a given address
# is valid according to the 'addr-spec' part of the specification
# given in RFC 2822. Ideally, we would like to find this
# in some other library, already thoroughly tested and well-
# maintained. The standard Python library email.utils
# contains a parse_addr() function, but it is not sufficient
# to detect many malformed addresses.
#
# This implementation aims to be faithful to the RFC, with the
# exception of a circular definition (see comments below), and
# with the omission of the pattern components marked as "obsolete".
import re
import smtplib
import socket
try:
import DNS
ServerError = DNS.ServerError
except:
DNS = None
class ServerError(Exception): pass
# All we are really doing is comparing the input string to one
# gigantic regular expression. But building that regexp, and
# ensuring its correctness, is made much easier by assembling it
# from the "tokens" defined by the RFC. Each of these tokens is
# tested in the accompanying unit test file.
#
# The section of RFC 2822 from which each pattern component is
# derived is given in an accompanying comment.
#
# (To make things simple, every string below is given as 'raw',
# even when it's not strictly necessary. This way we don't forget
# when it is necessary.)
#
WSP = r'[ \t]' # see 2.2.2. Structured Header Field Bodies
CRLF = r'(?:\r\n)' # see 2.2.3. Long Header Fields
NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f' # see 3.2.1. Primitive Tokens
QUOTED_PAIR = r'(?:\\.)' # see 3.2.2. Quoted characters
FWS = r'(?:(?:' + WSP + r'*' + CRLF + r')?' + \
WSP + r'+)' # see 3.2.3. Folding white space and comments
CTEXT = r'[' + NO_WS_CTL + \
r'\x21-\x27\x2a-\x5b\x5d-\x7e]' # see 3.2.3
CCONTENT = r'(?:' + CTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.3 (NB: The RFC includes COMMENT here
# as well, but that would be circular.)
COMMENT = r'\((?:' + FWS + r'?' + CCONTENT + \
r')*' + FWS + r'?\)' # see 3.2.3
CFWS = r'(?:' + FWS + r'?' + COMMENT + ')*(?:' + \
FWS + '?' + COMMENT + '|' + FWS + ')' # see 3.2.3
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4. Atom
ATOM = CFWS + r'?' + ATEXT + r'+' + CFWS + r'?' # see 3.2.4
DOT_ATOM_TEXT = ATEXT + r'+(?:\.' + ATEXT + r'+)*' # see 3.2.4
DOT_ATOM = CFWS + r'?' + DOT_ATOM_TEXT + CFWS + r'?' # see 3.2.4
QTEXT = r'[' + NO_WS_CTL + \
r'\x21\x23-\x5b\x5d-\x7e]' # see 3.2.5. Quoted strings
QCONTENT = r'(?:' + QTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.5
QUOTED_STRING = CFWS + r'?' + r'"(?:' + FWS + \
r'?' + QCONTENT + r')*' + FWS + \
r'?' + r'"' + CFWS + r'?'
LOCAL_PART = r'(?:' + DOT_ATOM + r'|' + \
QUOTED_STRING + r')' # see 3.4.1. Addr-spec specification
DTEXT = r'[' + NO_WS_CTL + r'\x21-\x5a\x5e-\x7e]' # see 3.4.1
DCONTENT = r'(?:' + DTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.4.1
DOMAIN_LITERAL = CFWS + r'?' + r'\[' + \
r'(?:' + FWS + r'?' + DCONTENT + \
r')*' + FWS + r'?\]' + CFWS + r'?' # see 3.4.1
DOMAIN = r'(?:' + DOT_ATOM + r'|' + \
DOMAIN_LITERAL + r')' # see 3.4.1
ADDR_SPEC = LOCAL_PART + r'@' + DOMAIN # see 3.4.1
# A valid address will match exactly the 3.4.1 addr-spec.
VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$'
def validate_email(email, check_mx=False,verify=False):
"""Indicate whether the given string is a valid email address
according to the 'addr-spec' portion of RFC 2822 (see section
3.4.1). Parts of the spec that are marked obsolete are *not*
included in this test, and certain arcane constructions that
depend on circular definitions in the spec may not pass, but in
general this should correctly identify any email address likely
to be in use as of 2011."""
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS: raise Exception('For check the mx records or check if the email exists you must have installed pyDNS python package')
DNS.DiscoverNameServers()
hostname = email[email.find('@')+1:]
mx_hosts = DNS.mxlookup(hostname)
for mx in mx_hosts:
try:
smtp = smtplib.SMTP()
smtp.connect(mx[1])
if not verify: return True
status, _ = smtp.helo()
if status != 250: continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status != 250: return False
break
except smtplib.SMTPServerDisconnected: #Server not permits verify user
break
except smtplib.SMTPConnectError:
continue
except (AssertionError, ServerError):
return False
return True
# import sys
# sys.modules[__name__],sys.modules['validate_email_module'] = validate_email,sys.modules[__name__]
# from validate_email_module import *
| agpl-3.0 |
chen0510566/MissionPlanner | Lib/site-packages/numpy/distutils/npy_pkg_config.py | 53 | 13468 | import sys
if sys.version_info[0] < 3:
from ConfigParser import SafeConfigParser, NoOptionError
else:
from configparser import SafeConfigParser, NoOptionError
import re
import os
import shlex
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
lexer = shlex.shlex(line)
lexer.whitespace_split = True
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
def next_token(t):
if t.startswith('-I'):
if len(t) > 2:
d['include_dirs'].append(t[2:])
else:
t = lexer.get_token()
d['include_dirs'].append(t)
elif t.startswith('-L'):
if len(t) > 2:
d['library_dirs'].append(t[2:])
else:
t = lexer.get_token()
d['library_dirs'].append(t)
elif t.startswith('-l'):
d['libraries'].append(t[2:])
elif t.startswith('-D'):
d['macros'].append(t[2:])
else:
d['ignored'].append(t)
return lexer.get_token()
t = lexer.get_token()
while t:
t = next_token(t)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return self._sections.keys()
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name]
m.append('Description: %s' % self.description)
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return self._raw_data.keys()
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not d.has_key(k):
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not d.has_key('requires'):
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = SafeConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not vars.has_key(k):
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not vars.has_key("pkgdir") and vars.has_key("pkgname"):
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print npymath_info
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print ("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search('([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print (info.cflags(section))
if options.libs:
print (info.libs(section))
if options.version:
print (info.version)
if options.min_version:
print (info.version >= options.min_version)
| gpl-3.0 |
securestate/king-phisher | king_phisher/client/dialogs/__init__.py | 5 | 1853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/__init__.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .about import *
from .campaign_selection import *
from .clone_page import *
from .company_editor import *
from .configuration import *
from .entry import *
from .exception import *
from .login import *
from .ssh_host_key import *
from .tag_editor import *
| bsd-3-clause |
dboonz/polymode | Polymode/Solver.py | 5 | 24557 | # _*_ coding=utf-8 _*_
#
#---------------------------------------------------------------------------------
#Copyright © 2009 Andrew Docherty
#
#This program is part of Polymode.
#Polymode is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------------
"""
Solver.py
===========
Main solve class for Polymode
Solvers
-------
AdaptiveWavelengthTrack
- Solver for calculating modes over a range of wavelengths
with an adaptive wavelength step
WavelengthScan
- Solver for calculating modes over a range of wavelengths
specifying each wavelength to solve at
WavelengthConditionScan
- Solver that returns a map of the condition number over
effective index versus the wavelength. No mode solving
is performed.
Utility functions
-----------------
batch_file_save(solvers, filename=None)
- Save solvers in batch file for later solution
batch_file_load(filename=None)
- Load solvers in from a batch file
batch_file_load_modes(filename=None)
- Load modes directly from batch file
batch_solve(solvers, filename=None)
- Solve problems in list solvers saving periodically to the specified file if given.
batch_continue(filename)
- Continue an aborted batch_solve with a batch file
export_to_nader(solver, prefix="")
- Export waveguide.in and input_paramters.in to be read by Nader's solver
"""
from __future__ import division
import logging
import numpy as np
#To be depricated, should use above imports only
from numpy import *
from . import Material, Waveguide, Equation, Modes, Plotter
# Cached random functions
class CachedRandom(object):
"""
Create
"""
def __init__(self):
self.cache = []
self.index = 0
def reset(self):
self.index = 0
def __call__(self, shape):
from scipy import random
if self.index>=len(self.cache):
self.cache += [random.random(shape)]
x = self.cache[self.index]
self.index += 1
return x
#**************************************************************************************
class Solve(object):
'''
Main solve class for VWE/SWE. Construct a solver object with
wg: The waveguide
Nres: The resolution of the calculation grid
store: Store mode field information if true
label: Dict of information to associate with the solver and mode
compress_to_size: Size to compress to or None to not store modes
mode_calculations: Calculate mode information before discarding fields
'''
def __init__(self, wg, Nres=None, store=True, compress_to_size=None,
mode_calculations=False, label={}):
self.wg = wg
self.base_shape = Nres
#General solver options - All solvers should support these paramters
self.store_mode_properties = mode_calculations
self.store_fields = store
self.compress_to_size = compress_to_size
self.force_electric_calculation = False
self.dependancies = [] #Don't run this solver until these are true
self.label = label #Custom label to identify the solver
self.dtype = complex128
self.modes = []
#Setup equation with default parameters, can call it to customize
#Solver specific paramters
self.setup()
def setup(self):
#Solver specific paramteres
pass
def add_dependancy(self, depends):
"Add solver to dependancy list"
depends = atleast_1d(depends)
for d in depends:
if hasattr(d,'id'):
self.dependancies.append(d.id)
elif 0<int(d)<len(Solve.ids):
self.dependancies.append(Solve.ids[d])
else:
raise LookupError , "Dependancy not recognised, should be a solver"
# +-----------------------------------------------------------------------+
# | General solver functions .. may be overloaded
# +-----------------------------------------------------------------------+
def plot(self):
"Plot the effective indices of the found modes"
import pylab as p_
col_red = array([0.8,0,0.2])
col_blue = array([0.2,0,0.8])
neffs = [md.neff for md in self.modes]
spurious = array([md.guess_spurious() for md in self.modes])
nconverged = array([md.residue>self.tolerance for md in self.modes])
colors = col_red*spurious[:,newaxis] + col_blue*nconverged[:,newaxis]
p_.scatter(real(neffs), imag(neffs), s=5, c=colors, marker='o')
##
## Mode information functions
##
def guess_spurious_mode(self, mode, cutoff=5.0):
'''
Guess if this is a real or spurious mode based on the mode
intensity distribution
'''
#The outermost object in the waveguide, guess if not given
router = 0.95*self.wg.get_rmax(0)
c = mode.coord
#If the coord object doesn't have a rv member or
#mode doesn't have field information this will fail
try:
#RMS of magnetic intensity over azimuthal direction
hr,ha,hz = mode.magnetic_field()
pprofile = mean(abs(hr)**2+abs(ha)**2+abs(ha)**2,axis=1)
pfrac = mean(pprofile[c.rv>router])/mean(pprofile[c.rv<router])
except:
pfrac=0
mode.is_spurious = pfrac>cutoff
return mode.is_spurious
def residue(self, x, l=None):
pass
## Interface to generic solver commands
def get_data(self):
return self.modes
def clear_data(self):
self.modes = []
def _clean_up_temporary_data(self):
"""
Remove and clean up any temporary matrices
or other data used
"""
pass
def calculate(self, number=inf):
pass
def __call__(self, *args, **kwargs):
"""
Solve the constructed problem with
m0: the waveguide symmetry index
wl: wavelength
neffrange: the upper and lower real effective indices of the search range
nefflist: Find modes near these effective indices
modelist: Find modes near these modes
totalnumber: total number of modes to find
"""
self.initialize(*args, **kwargs)
self.calculate()
self.finalize()
return self.modes
def initialize(self, wl, m0=0, neffrange=None, nefflist=None, modelist=None, number=1):
'''
Setup the solver with pre-calculation parameters with:
wl: wavelength
m0: the waveguide symmetry index
neffrange: the upper and lower real effective indices of the search range
nefflist: Find modes near these effective indices
modelist: Find modes near these modes
totalnumber: total number of modes to find
'''
self.m0 = m0
self.wl = wl
self.k0 = 2*pi/wl
#Set number and neffrange depending on the case
self.numbercalculated = 0
if nefflist is not None:
self.bracket = 0,inf
self.totalnumber = len(nefflist)
elif modelist is not None:
self.bracket = 0,inf
self.totalnumber = len(modelist)
else:
#Calculate range from core index if not given
self.bracket = self.wg.index_range(wl)
self.totalnumber = number
#Or manual setting
if neffrange is not None:
if iterable(neffrange):
self.bracket = neffrange
else:
self.bracket = (self.wg.index_range(wl)[0], neffrange)
#Clear modes
self.clear_data()
#Mode/neff lists if any
self.nefflist = nefflist
self.modelist = modelist
self.is_finalized = False
def _estimate_complete_fraction(self):
"Return a number between 0 (started) and 1 (finished)"
return float(len(self.modes))/self.totalnumber
def finalize(self):
"""
Finalize the modes after the solver has finished.
Including
- Clean up temporary objects
- Delete or compress mode vectors is required
- Remove debug information if not in debugging mode
"""
#Clean up temprorary data
self._clean_up_temporary_data()
logging.info("Finalizing calculated modes")
for ii,mode in enumerate(self.modes):
#Label the mode
mode.label = self.label
#Update spurious indicator
self.guess_spurious_mode(mode)
#Remove calculated EF if forced
if self.store_fields:
mode.store_calculated_electric_field(wg=self.wg, force=self.force_electric_calculation)
if self.compress_to_size is not None:
mode.compress(self.compress_to_size, self.wg)
#Add extension for behaviour outside the computational domain
mode.normalize(wg=self.wg)
else:
mode.discard_fields()
#Sort modes
self.modes.sort(reverse=True)
self.is_finalized = True
class AdaptiveWavelengthTrack(Solve):
'''
Track modes over a wavelength range with adaptive step size
'''
def __init__(self, solver, track_range=None, dont_lose_modes=False):
self.solver = solver
self.track_range = None
self.ga_target = 1e-3
self.dont_lose_modes = dont_lose_modes
Solve.__init__(self, solver.wg, compress_to_size=solver.compress_to_size)
def initialize(self, wl_range, *args, **kwargs):
self.wl_range = wl_range
self.solver_args = args
self.solver_kwargs = kwargs
#We need the m0 SC to restart the solver at different wavelengths
#This shouldn't be needed!
self.m0 = args[0] if len(args)>0 else kwargs.get('m0', 0)
def calculate(self, number=inf):
import pylab as pl
solver = self.solver
#Setup wavelength range
wl_start, wl_stop = self.wl_range
#Starting step size
dwl = (wl_stop-wl_start)/100.0
#Tolerances for adaptive step sizes
dwl_minimum = dwl/10
dwl_maximum = 5*dwl
ga_target = self.ga_target
ga_minimum = ga_target/10
ga_maximum = ga_target*10
#Find start modes to track
modes = self.solver(wl_start, *self.solver_args, **self.solver_kwargs)
#Tracking modes
Nm = len(modes)
#Bail if we can't find any modes to start with
if Nm<1:
logging.error("No modes found with intial solver parameters, wavelength track aborted")
return []
else:
logging.info("Now tracking %d modes" % Nm)
dneffdwl = zeros(Nm, complex_)
modes_track = [m.copy() for m in modes]
num_eval_backtrack = num_eval = 0
do_update=True
wl = wl_start
self.modes = list(modes)
while wl<wl_stop:
#Update wavelength
wl += dwl
logging.info("WL %.6g, step size: %.4g" % (wl,dwl))
#Find new modes
self.solver.initialize(wl, self.m0, modelist=modes_track)
modes_current = self.solver.calculate()
num_eval +=1
if 0:
m1 = modes[0]
solver.equation.set_lambda(m1.evalue)
M1x = solver.equation.matvec(m1.right) - m1.evalue*m1.right
solver.jacobian.setup(solver.base_shape,solver.wg,self.m0,wl+dwl)
solver.jacobian.set_lambda(m1.evalue)
M0px = solver.jacobian.matvec(m1.right) - m1.right
dmu = -dot(conj(m1.left), M1x)/dot(conj(m1.left), M0px)
neff_guess = sqrt(m1.evalue+dmu)/(2*pi/m1.wl)
Nm_current = len(modes_current)
if Nm_current==0: #Jump to next point and try and find modes there
continue
elif Nm_current<Nm: #Find a replacement mode?
if self.dont_lose_modes:
wl -= dwl/2
logging.warning("Lost %d modes: Retracking" % (Nm - Nm_current))
continue
else:
logging.warning("Lost %d modes" % (Nm - Nm_current))
elif Nm_current>Nm:
logging.warning("Found more modes than requested!")
#Calculate mode differences
remove_modes = []
dneffdwl_last = dneffdwl
dneffdwl = zeros(Nm_current, complex_)
ga_max = 0; ga_min = inf
for ii in range(Nm_current):
neff = modes_current[ii].neff
#Find closest neff
neff_differences = [neff - x.neff for x in modes_track]
track_closest = np.argmin(np.absolute(neff_differences))
#Calculate dispersion from previous mode
dneffdwl[ii] = (modes[track_closest].neff - neff)/dwl
#Guess accuracy
ga = abs(neff_differences[track_closest])/abs(neff)
ga_max=max(ga_max,ga); ga_min=min(ga_min,ga)
#Have the modes left the tracked range?
if self.track_range is not None and (neff<min(track_range) or neff>max(track_range)):
logging.warning("Mode has left tracked neff range")
remove_modes.append(ii)
#Adaptive guess for next dwl
accept = True
if wl>wl_start+dwl:
if ga_max>0:
dwl_target = dwl*(ga_target/ga_max)**(0.5)
if (ga_max>ga_maximum) and (dwl>dwl_minimum):
logging.info("Eigenvalue change to large. Backtracking")
accept = False
dwl_target = min(dwl_target,dwl*0.5)
dwl = dwl_target
#Guess next neff
if accept:
self.modes += modes_current
dneffdwl_last = dneffdwl
modes = modes_current
#Backtrack!!
else:
wl -= dwl
dneffdwl = dneffdwl_last
num_eval_backtrack +=1
#Use length of current modes, which must be the same as length of dneffdwl
Nm = len(modes)
#Truncate modes_track otherwise modes can be larger than modes_last
modes_track = [m.copy() for m in modes]
#Update neff for modes_track
for ii in range(Nm):
modes_track[ii].neff = (modes[ii].neff + dneffdwl[ii]*dwl)
logging.debug("Dispersion: %s " % dneffdwl)
logging.debug("Guess accuracy: %0.4g -> %0.4g" % (ga_max, ga_min))
logging.info("Total points: %d, number of backtracks: %d" % (num_eval, num_eval_backtrack))
return self.modes
def update_eigenvector(self,m1,m2):
#Calculate perturbation
# eps = 1e-3
# solver.equation.setup(solver.base_shape,solver.wg,m0,m1.wavelength+eps)
# solver.equation.set_lambda(m1.evalue)
# M1xp = solver.equation.matvec(m1.right)
# solver.equation.setup(solver.base_shape,solver.wg,m0,m1.wavelength-eps)
# solver.equation.set_lambda(m1.evalue)
# M1xm = solver.equation.matvec(m1.right)
# M1x = (M1xp-M1xm)/(2*eps)
solver.equation.setup(solver.base_shape,solver.wg,m0,m2.wavelength)
solver.equation.set_lambda(m1.evalue)
M1x = solver.equation.matvec(m1.right) - m1.evalue*m1.right
solver.jacobian.setup(solver.base_shape,solver.wg,m0,m1.wavelength)
solver.jacobian.set_lambda(m1.evalue)
M0px = solver.jacobian.matvec(m1.right) - m1.right
dmu = -dot(conj(m1.left), M1x)/dot(conj(m1.left), M0px)
dneffc1 = (m1.neff**2/m1.wavelength+0.5*dmu/m1.k0)/m1.neff
dneffc = sqrt(m1.evalue+dmu)/m2.k0 - m1.neff
print "dneff(1)", dneffc1
print "dneff(2)", dneffc
print
neff_guess += [sqrt(m1.evalue+dmu)/m2.k0]
#Find correction to eigenvector
mu2 = m2.evalue+0*dmu
Mx1 = -(M0px*dmu/delta + M1x)
#Approx:
if not hasattr(solver, 'matrix'):
Nr, Naz = solver.base_shape
bw = solver.equation.diff.bandwidth
blockshape = (solver.equation.pmax*Naz,)*2
solver.matrix = blockarray.BlockArray((Nr,bw), blockshape=blockshape, dtype=complex_)
si = Solver.ShiftInvertBlock(overwrite=False)
solver.generate()
si.set_shift(solver.matrix, complex(m1.evalue))
x1 = si.matvec(Mx1)
y = m1.right + delta*x1
solver.equation.set_lambda(m2.evalue)
print "Diff1", linalg.norm(solver.equation(y)-m2.evalue*y)
print "Diff2", linalg.norm(solver.equation(m1.right)-m2.evalue*m1.right)
def plot(self, style=''):
"""Plot the found effective index versus the wavelength for all modes
fourd in the wavelength scan.
Arguments:
style: the matplotlib line style for the plotted points
"""
Plotter.plot_mode_properties(self.modes, 'neff', 'wl', style=style)
def finalize(self):
#Modes should be already finalized by the subordinate solver,
#Here we should just sort them by wavelength
self.modes.sort(cmp=lambda x,y: cmp(x.wl,y.wl))
class WavelengthScan(Solve):
'''
Find all modes within a range at constant wavelength step size
'''
def __init__(self, solver, Nscan=100):
self.solver = solver
self.Nscan = Nscan
Solve.__init__(self, solver.wg, compress_to_size=solver.compress_to_size)
def initialize(self, wl_range, *args, **kwargs):
self.wl_range = wl_range
self.solver_args = args
self.solver_kwargs = kwargs
def calculate(self, number=inf):
import pylab as pl
solver = self.solver
#Setup wavelength range
wl_start, wl_stop = self.wl_range
#Step size
dwl = (wl_stop-wl_start)/self.Nscan
wl = wl_start
self.modes = []
while wl<wl_stop:
logging.info("WL %.6g, step size: %.4g" % (wl,dwl))
#Find new modes
modes_current = self.solver(wl, *self.solver_args, **self.solver_kwargs)
self.modes.extend(modes_current)
#Update wavelength
wl += dwl
return self.modes
def plot(self, style=''):
"""Plot the found effective index versus the wavelength for all modes
fourd in the wavelength scan.
Arguments:
style: the matplotlib line style for the plotted points
"""
Plotter.plot_mode_properties(self.modes, 'neff', 'wl', style=style)
def finalize(self):
#Modes should be already finalized by the subordinate solver,
#Here we should just sort them by wavelength
self.modes.sort(cmp=lambda x,y: cmp(x.wl,y.wl))
class WavelengthConditionScan(Solve):
'''
Scan over a wavelength range and plot a condition number for the modal
eigenvalue problem. The exact nature of this condition number depends
upon the nature of the algorithm in the supplied solver
'''
def __init__(self, solver, Nscan=(20,100)):
self.solver = solver
self.Nscan = Nscan
#The condition number scan is stored here
self.Cscan = np.zeros(self.Nscan, dtype=float)
self.neffscan = np.zeros(self.Nscan, dtype=float)
self.wlscan = np.zeros(self.Nscan[0], dtype=float)
Solve.__init__(self, solver.wg, compress_to_size=solver.compress_to_size)
def initialize(self, wl_range, *args, **kwargs):
self.wl_range = wl_range
self.solver_args = args
self.solver_kwargs = kwargs
self.solver.initialize(wl_range[0], *args, **kwargs)
if 'neffrange' in kwargs:
self.neffrange = kwargs['neffrange']
else:
self.neffrange = None
def calculate(self, number=inf):
import pylab as pl
solver = self.solver
#Setup wavelengths
dwl = (self.wl_range[1]-self.wl_range[0])/self.Nscan[0]
for ii in range(self.Nscan[0]):
wl = self.wl_range[0] + ii*dwl
logging.info("Calculating scan at %d of %d points" % (ii+1, self.Nscan[0]))
#Update wavelength
self.solver.initialize(wl, *self.solver_args)
#Range to scan
if self.neffrange is None:
neffrange=self.wg.index_range(wl)
else:
neffrange=self.neffrange
dneff = (neffrange[1]-neffrange[0])/self.Nscan[1]
neffs = np.arange(neffrange[0], neffrange[1], dneff)
#Scan over beta range
self.Cscan[ii] = np.abs(self.solver.condition(neffs*self.solver.k0))
self.neffscan[ii] = neffs
self.wlscan[ii] = wl
return self.Cscan
def plot(self, style={}):
import pylab as pl
dwl = (self.wl_range[1]-self.wl_range[0])/self.Nscan[0]
wls = np.arange(self.wl_range[0], self.wl_range[1], dwl)
wlscan = self.wlscan[:,newaxis] + 0*self.neffscan
#We need to plot it twice otherwise it introduces odd lines
pl.contourf(wlscan, self.neffscan, np.log10(self.Cscan), 100, **style)
pl.contourf(wlscan, self.neffscan, np.log10(self.Cscan), 100, **style)
if 0:
pl.plot(betascan/self.solver.k0, self.Cscan[ii])
pl.pcolor(wlscan, self.neffscan, np.log10(self.Cscan), **style)
def finalize(self):
pass
def batch_file_save(solvers, filename=None):
"Save solvers in batch file for later solution"
from cPickle import dump
try:
dump(solvers, open(filename,'wb'))
except IOError:
logging.error("Failed to save solvers to file %s" % filename)
def batch_file_load(filename=None):
"Load solvers in from a batch file"
from cPickle import load
try:
solvers = load(open(filename,'rb'))
except IOError:
solvers = []
logging.error("Failed to load batch solver file %s" % filename)
return solvers
def batch_file_load_modes(filename=None, return_wg=False):
"Load modes from batch file"
solvers = batch_file_load(filename)
#Add modes to list
modes = []
wgs = []
for solver in solvers:
modes += solver.get_data() #This must return a list!
wgs.append( solver.wg )
#Return waveguides if requested
if return_wg:
return modes, wgs
else:
return modes
def batch_solve(solvers, filename=None):
"""
Solve problems in list solvers saving periodically
to the specified file if given.
The batch solve can be continued if interrupted
with the function `batch_continue(filename)`.
"""
from cPickle import dump
for solver in solvers:
#Resume calculation if not finished
if not solver.isfinished():
solver.calculate()
#Save solver queue
if filename is not None:
dump(solvers, open(filename,'wb'))
modes = []
for solver in solvers:
modes += solver.get_data()
return modes
def batch_continue(filename):
"""
Continue an aborted batch_solve with a batch file
"""
solvers = batch_file_load(filename)
return batch_solve(solvers, filename)
| gpl-3.0 |
hopeall/odoo | addons/website_mail/__openerp__.py | 379 | 1623 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Mail',
'category': 'Hidden',
'summary': 'Website Module for Mail',
'version': '0.1',
'description': """Glue module holding mail improvements for website.""",
'author': 'OpenERP SA',
'depends': ['website', 'mail', 'email_template'],
'data': [
'views/snippets.xml',
'views/website_mail.xml',
'views/website_email_designer.xml',
'views/email_template_view.xml',
'data/mail_groups.xml',
'security/website_mail.xml',
],
'qweb': [
'static/src/xml/website_mail.xml'
],
'installable': True,
'auto_install': True,
}
| agpl-3.0 |
SpatialMetabolomics/SM_distributed | sm/engine/msm_basic/msm_basic_search.py | 2 | 2848 | from collections import OrderedDict
import pandas as pd
from sm.engine.util import SMConfig
from sm.engine.msm_basic.formula_imager_segm import compute_sf_images
from sm.engine.msm_basic.formula_img_validator import sf_image_metrics
from sm.engine.search_algorithm import SearchAlgorithm
import logging
logger = logging.getLogger('engine')
class MSMBasicSearch(SearchAlgorithm):
def __init__(self, sc, ds, ds_reader, mol_db, centr_gen, fdr, ds_config):
super(MSMBasicSearch, self).__init__(sc, ds, ds_reader, mol_db, fdr, ds_config)
self.metrics = OrderedDict([('chaos', 0), ('spatial', 0), ('spectral', 0),
('total_iso_ints', [0, 0, 0, 0]),
('min_iso_ints', [0, 0, 0, 0]),
('max_iso_ints', [0, 0, 0, 0])])
self.max_fdr = 0.5
self._centr_gen = centr_gen
def search(self):
""" Search for molecules in the dataset
Returns
-------
: tuple
(ion metrics DataFrame, ion image pyspark.RDD)
"""
logger.info('Running molecule search')
ion_centroids_df = self._centr_gen.centroids_subset(self._fdr.ion_tuples())
ion_images = compute_sf_images(self._sc, self._ds_reader, ion_centroids_df,
self.ds_config['image_generation']['ppm'])
ion_metrics_df = self.calc_metrics(ion_images, ion_centroids_df)
ion_metrics_fdr_df = self.estimate_fdr(ion_metrics_df)
ion_metrics_fdr_df = self.filter_sf_metrics(ion_metrics_fdr_df)
ion_images = self.filter_sf_images(ion_images, ion_metrics_fdr_df)
return ion_metrics_fdr_df, ion_images
def calc_metrics(self, sf_images, ion_centroids_df):
ion_centr_ints = (ion_centroids_df.reset_index().groupby(['ion_i'])
.apply(lambda df: df.int.tolist()).to_dict())
all_sf_metrics_df = sf_image_metrics(sf_images=sf_images, metrics=self.metrics, ds=self._ds,
ds_reader=self._ds_reader, ion_centr_ints=ion_centr_ints, sc=self._sc)
return all_sf_metrics_df
def estimate_fdr(self, ion_metrics_df):
ion_metrics_sf_adduct_df = ion_metrics_df.join(self._centr_gen.ion_df)
sf_adduct_fdr_df = self._fdr.estimate_fdr(
ion_metrics_sf_adduct_df.set_index(['sf', 'adduct']).msm)
ion_metrics_sf_adduct_fdr_df = pd.merge(ion_metrics_sf_adduct_df.reset_index(),
sf_adduct_fdr_df.reset_index(),
how='inner', on=['sf', 'adduct']).set_index('ion_i')
return ion_metrics_sf_adduct_fdr_df
def filter_sf_metrics(self, sf_metrics_df):
return sf_metrics_df[sf_metrics_df.fdr <= self.max_fdr]
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Cipher/_mode_cbc.py | 2 | 10971 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Ciphertext Block Chaining (CBC) mode.
"""
__all__ = ['CbcMode']
from Cryptodome.Util.py3compat import _copy_bytes
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
create_string_buffer, get_raw_buffer,
SmartPointer, c_size_t, c_uint8_ptr,
is_writeable_buffer)
from Cryptodome.Random import get_random_bytes
raw_cbc_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._raw_cbc", """
int CBC_start_operation(void *cipher,
const uint8_t iv[],
size_t iv_len,
void **pResult);
int CBC_encrypt(void *cbcState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CBC_decrypt(void *cbcState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CBC_stop_operation(void *state);
"""
)
class CbcMode(object):
"""*Cipher-Block Chaining (CBC)*.
Each of the ciphertext blocks depends on the current
and all previous plaintext blocks.
An Initialization Vector (*IV*) is required.
See `NIST SP800-38A`_ , Section 6.2 .
.. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
:undocumented: __init__
"""
def __init__(self, block_cipher, iv):
"""Create a new block cipher, configured in CBC mode.
:Parameters:
block_cipher : C pointer
A smart pointer to the low-level block cipher instance.
iv : bytes/bytearray/memoryview
The initialization vector to use for encryption or decryption.
It is as long as the cipher block.
**The IV must be unpredictable**. Ideally it is picked randomly.
Reusing the *IV* for encryptions performed with the same key
compromises confidentiality.
"""
self._state = VoidPointer()
result = raw_cbc_lib.CBC_start_operation(block_cipher.get(),
c_uint8_ptr(iv),
c_size_t(len(iv)),
self._state.address_of())
if result:
raise ValueError("Error %d while instantiating the CBC mode"
% result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
raw_cbc_lib.CBC_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
block_cipher.release()
self.block_size = len(iv)
"""The block size of the underlying cipher, in bytes."""
self.iv = _copy_bytes(None, None, iv)
"""The Initialization Vector originally used to create the object.
The value does not change."""
self.IV = self.iv
"""Alias for `iv`"""
self._next = [ self.encrypt, self.decrypt ]
def encrypt(self, plaintext, output=None):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not add any padding to the plaintext.
:Parameters:
plaintext : bytes/bytearray/memoryview
The piece of data to encrypt.
Its lenght must be multiple of the cipher block size.
:Keywords:
output : bytearray/memoryview
The location where the ciphertext must be written to.
If ``None``, the ciphertext is returned.
:Return:
If ``output`` is ``None``, the ciphertext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() cannot be called after decrypt()")
self._next = [ self.encrypt ]
if output is None:
ciphertext = create_string_buffer(len(plaintext))
else:
ciphertext = output
if not is_writeable_buffer(output):
raise TypeError("output must be a bytearray or a writeable memoryview")
if len(plaintext) != len(output):
raise ValueError("output must have the same length as the input"
" (%d bytes)" % len(plaintext))
result = raw_cbc_lib.CBC_encrypt(self._state.get(),
c_uint8_ptr(plaintext),
c_uint8_ptr(ciphertext),
c_size_t(len(plaintext)))
if result:
if result == 3:
raise ValueError("Data must be padded to %d byte boundary in CBC mode" % self.block_size)
raise ValueError("Error %d while encrypting in CBC mode" % result)
if output is None:
return get_raw_buffer(ciphertext)
else:
return None
def decrypt(self, ciphertext, output=None):
"""Decrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
The data to decrypt can be broken up in two or
more pieces and `decrypt` can be called multiple times.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is equivalent to:
>>> c.decrypt(a+b)
This function does not remove any padding from the plaintext.
:Parameters:
ciphertext : bytes/bytearray/memoryview
The piece of data to decrypt.
Its length must be multiple of the cipher block size.
:Keywords:
output : bytearray/memoryview
The location where the plaintext must be written to.
If ``None``, the plaintext is returned.
:Return:
If ``output`` is ``None``, the plaintext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() cannot be called after encrypt()")
self._next = [ self.decrypt ]
if output is None:
plaintext = create_string_buffer(len(ciphertext))
else:
plaintext = output
if not is_writeable_buffer(output):
raise TypeError("output must be a bytearray or a writeable memoryview")
if len(ciphertext) != len(output):
raise ValueError("output must have the same length as the input"
" (%d bytes)" % len(plaintext))
result = raw_cbc_lib.CBC_decrypt(self._state.get(),
c_uint8_ptr(ciphertext),
c_uint8_ptr(plaintext),
c_size_t(len(ciphertext)))
if result:
if result == 3:
raise ValueError("Data must be padded to %d byte boundary in CBC mode" % self.block_size)
raise ValueError("Error %d while decrypting in CBC mode" % result)
if output is None:
return get_raw_buffer(plaintext)
else:
return None
def _create_cbc_cipher(factory, **kwargs):
"""Instantiate a cipher object that performs CBC encryption/decryption.
:Parameters:
factory : module
The underlying block cipher, a module from ``Cryptodome.Cipher``.
:Keywords:
iv : bytes/bytearray/memoryview
The IV to use for CBC.
IV : bytes/bytearray/memoryview
Alias for ``iv``.
Any other keyword will be passed to the underlying block cipher.
See the relevant documentation for details (at least ``key`` will need
to be present).
"""
cipher_state = factory._create_base_cipher(kwargs)
iv = kwargs.pop("IV", None)
IV = kwargs.pop("iv", None)
if (None, None) == (iv, IV):
iv = get_random_bytes(factory.block_size)
if iv is not None:
if IV is not None:
raise TypeError("You must either use 'iv' or 'IV', not both")
else:
iv = IV
if len(iv) != factory.block_size:
raise ValueError("Incorrect IV length (it must be %d bytes long)" %
factory.block_size)
if kwargs:
raise TypeError("Unknown parameters for CBC: %s" % str(kwargs))
return CbcMode(cipher_state, iv)
| apache-2.0 |
joxeankoret/diaphora | pygments/lexers/c_cpp.py | 21 | 9415 | # -*- coding: utf-8 -*-
"""
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do',
'else', 'enum', 'extern', 'for', 'goto', 'if', 'register',
'restricted', 'return', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'volatile', 'while'),
suffix=r'\b'), Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread', 'typename'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl',
'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try',
'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
if re.search('^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search('^\s*#ifdef ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(words((
'asm', 'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'namespace', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'typeid', 'typename', 'using', 'virtual',
'constexpr', 'nullptr', 'decltype', 'thread_local',
'alignas', 'alignof', 'static_assert', 'noexcept', 'override',
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
| agpl-3.0 |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1B/instances/10_2_workflow_full_10files_secondary_wmj_3sh_3rs_with_annot_with_proj_3s_range_old/calculateratio_3/CalculateRatioCpuMemory_3.py | 33 | 4757 | #!/usr/bin/env python
"""
This activity will calculate the ratio between CPU request and Memory request by (job ID, task index, event type).
These fields are optional and could be null.
"""
# It will connect to DataStoreClient
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import math
import sys
##################################################################
import ConfigDB_Calc_StatsCPUMemory_3
client_statscpumemory = DataStoreClient("mongodb", ConfigDB_Calc_StatsCPUMemory_3)
data_statscpumemory = client_statscpumemory.getData()
if data_statscpumemory:
while True:
doc = data_statscpumemory.next()
if doc is None:
break;
sd_cpu = doc['standard deviation cpu']
avg_cpu = doc['average cpu']
sd_memory = doc['standard deviation memory']
avg_memory = doc['average memory']
sd_ratio = doc['standard deviation ratio']
avg_ratio = doc['average ratio']
##################################################################
import ConfigDB_Calc_TEInfo_3
client_taskinfo = DataStoreClient("mongodb", ConfigDB_Calc_TEInfo_3)
# according to config
eventList = client_taskinfo.getData() # return an array of docs (like a csv reader)
eventInfo = {}
if(eventList):
for index in eventList:
event_type = index[ConfigDB_Calc_TEInfo_3.COLUMN]
while True:
doc = index['data'].next()
if doc is None:
break;
info = {}
info["standard deviation cpu"] = doc["standard deviation cpu"]
info["average cpu"] = doc["average cpu"]
info["standard deviation memory"] = doc["standard deviation memory"]
info["average memory"] = doc["average memory"]
info["standard deviation ratio"] = doc["standard deviation ratio"]
info["average ratio"] = doc["average ratio"]
eventInfo[event_type] = info
##################################################################
import ConfigDB_Calc_TaskEvent_3
client_task = DataStoreClient("mongodb", ConfigDB_Calc_TaskEvent_3)
data_task = client_task.getData() # return an array of docs (like a csv reader)
output = []
count = 1
if(data_task):
# processing
while True:
doc = data_task.next()
if doc is None:
print "================="
print "finish"
print output
print len(output)
print count
if len(output) > 0:
print "done"
client_task.saveData(output, numline=count)
break;
if doc['event type'] in ["2","3","4","6"]:
if doc['CPU request'] and doc['memory request']:
#print doc
cpu = 0 if (not doc['CPU request']) else float(doc['CPU request'])
memory = 0 if not doc['memory request'] else float(doc['memory request'])
ratio = cpu/memory if (memory != 0) else 0
event_type = doc['event type']
event_avg_cpu = eventInfo[event_type]["average cpu"]
event_sd_cpu = eventInfo[event_type]["standard deviation cpu"]
event_avg_memory = eventInfo[event_type]["average memory"]
event_sd_memory = eventInfo[event_type]["standard deviation memory"]
event_avg_ratio = eventInfo[event_type]["average ratio"]
event_sd_ratio = eventInfo[event_type]["standard deviation ratio"]
newline = {}
newline['job ID'] = doc['job ID']
newline['task index'] = doc['task index']
newline['event type'] = doc['event type']
newline['time'] = doc['time']
newline['ratio cpu memory'] = ratio
newline['sds from all avg cpu'] = (cpu - avg_cpu)/sd_cpu if sd_cpu else None
newline['sds from all avg memory'] = (memory - avg_memory)/sd_memory if sd_memory else None
newline['sds from all avg ratio'] = (ratio - avg_ratio)/sd_ratio if sd_ratio else None
newline['sds from event avg cpu'] = (cpu - event_avg_cpu)/event_sd_cpu if event_sd_cpu else None
newline['sds from event avg memory'] = (memory - event_avg_memory)/event_sd_memory if event_sd_memory else None
newline['sds from event avg ratio'] = (ratio - event_avg_ratio)/event_sd_ratio if event_sd_ratio else None
output.append(newline)
if len(output) >= 3000:
print "================="
print "inside"
print count
client_task.saveData(output, numline=count)
count += 3000
output = []
| gpl-3.0 |
frreiss/tensorflow-fred | tensorflow/python/keras/layers/serialization.py | 5 | 8496 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer serialization/deserialization functions.
"""
# pylint: disable=wildcard-import
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python import tf2
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import convolutional
from tensorflow.python.keras.layers import convolutional_recurrent
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import cudnn_recurrent
from tensorflow.python.keras.layers import dense_attention
from tensorflow.python.keras.layers import einsum_dense
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.layers import local
from tensorflow.python.keras.layers import merge
from tensorflow.python.keras.layers import multi_head_attention
from tensorflow.python.keras.layers import noise
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.layers import pooling
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.keras.layers import rnn_cell_wrapper_v2
from tensorflow.python.keras.layers import wrappers
from tensorflow.python.keras.layers.preprocessing import category_crossing
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import category_encoding_v1
from tensorflow.python.keras.layers.preprocessing import discretization
from tensorflow.python.keras.layers.preprocessing import hashing
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.layers.preprocessing import integer_lookup as preprocessing_integer_lookup
from tensorflow.python.keras.layers.preprocessing import integer_lookup_v1 as preprocessing_integer_lookup_v1
from tensorflow.python.keras.layers.preprocessing import normalization as preprocessing_normalization
from tensorflow.python.keras.layers.preprocessing import normalization_v1 as preprocessing_normalization_v1
from tensorflow.python.keras.layers.preprocessing import string_lookup as preprocessing_string_lookup
from tensorflow.python.keras.layers.preprocessing import string_lookup_v1 as preprocessing_string_lookup_v1
from tensorflow.python.keras.layers.preprocessing import text_vectorization as preprocessing_text_vectorization
from tensorflow.python.keras.layers.preprocessing import text_vectorization_v1 as preprocessing_text_vectorization_v1
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_inspect as inspect
from tensorflow.python.util.tf_export import keras_export
ALL_MODULES = (base_layer, input_layer, advanced_activations, convolutional,
convolutional_recurrent, core, cudnn_recurrent, dense_attention,
embeddings, einsum_dense, local, merge, noise, normalization,
pooling, image_preprocessing, preprocessing_integer_lookup_v1,
preprocessing_normalization_v1, preprocessing_string_lookup_v1,
preprocessing_text_vectorization_v1, recurrent, wrappers,
hashing, category_crossing, category_encoding_v1, discretization,
multi_head_attention)
ALL_V2_MODULES = (rnn_cell_wrapper_v2, normalization_v2, recurrent_v2,
preprocessing_integer_lookup, preprocessing_normalization,
preprocessing_string_lookup, preprocessing_text_vectorization,
category_encoding)
# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in layer.
"""
global LOCAL
if not hasattr(LOCAL, 'ALL_OBJECTS'):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf2.enabled():
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf2.enabled()
base_cls = base_layer.Layer
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
# Overwrite certain V1 objects with V2 versions
if tf2.enabled():
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_V2_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
# These deserialization aliases are added for backward compatibility,
# as in TF 1.13, "BatchNormalizationV1" and "BatchNormalizationV2"
# were used as class name for v1 and v2 version of BatchNormalization,
# respectively. Here we explicitly convert them to their canonical names.
LOCAL.ALL_OBJECTS['BatchNormalizationV1'] = normalization.BatchNormalization
LOCAL.ALL_OBJECTS[
'BatchNormalizationV2'] = normalization_v2.BatchNormalization
# Prevent circular dependencies.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.premade.linear import LinearModel # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.premade.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.feature_column.sequence_feature_column import SequenceFeatures # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['Input'] = input_layer.Input
LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec
LOCAL.ALL_OBJECTS['Functional'] = models.Functional
LOCAL.ALL_OBJECTS['Model'] = models.Model
LOCAL.ALL_OBJECTS['SequenceFeatures'] = SequenceFeatures
LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential
LOCAL.ALL_OBJECTS['LinearModel'] = LinearModel
LOCAL.ALL_OBJECTS['WideDeepModel'] = WideDeepModel
if tf2.enabled():
from tensorflow.python.keras.feature_column.dense_features_v2 import DenseFeatures # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures
else:
from tensorflow.python.keras.feature_column.dense_features import DenseFeatures # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures
# Merge layers, function versions.
LOCAL.ALL_OBJECTS['add'] = merge.add
LOCAL.ALL_OBJECTS['subtract'] = merge.subtract
LOCAL.ALL_OBJECTS['multiply'] = merge.multiply
LOCAL.ALL_OBJECTS['average'] = merge.average
LOCAL.ALL_OBJECTS['maximum'] = merge.maximum
LOCAL.ALL_OBJECTS['minimum'] = merge.minimum
LOCAL.ALL_OBJECTS['concatenate'] = merge.concatenate
LOCAL.ALL_OBJECTS['dot'] = merge.dot
@keras_export('keras.layers.serialize')
def serialize(layer):
return generic_utils.serialize_keras_object(layer)
@keras_export('keras.layers.deserialize')
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
"""
populate_deserializable_objects()
return generic_utils.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name='layer')
| apache-2.0 |
selfcommit/gaedav | pyxml/dom/html/HTMLDocument.py | 4 | 11651 | ########################################################################
#
# File Name: HTMLDocument.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from pyxml.dom import Node
from pyxml.dom import NotSupportedErr
from pyxml.dom.Document import Document
from pyxml.dom import implementation
from pyxml.dom import ext
import string, sys
from pyxml.dom.html import HTML_DTD
class HTMLDocument(Document):
def __init__(self):
Document.__init__(self, None)
# These only make sense in a browser environment, therefore
# they never change
self.__dict__['__referrer'] = ''
self.__dict__['__domain'] = None
self.__dict__['__URL'] = ''
self.__dict__['__cookie'] = ''
self.__dict__['__writable'] = 0
self.__dict__['_html'] = vars(sys.modules['pyxml.dom.html'])
### Attribute Methods ###
def _get_URL(self):
return self.__dict__['__URL']
def _get_anchors(self):
anchors = self.getElementsByTagName('A');
anchors = filter(lambda x: x._get_name(), anchors)
return implementation._4dom_createHTMLCollection(anchors)
def _get_applets(self):
al = self.getElementsByTagName('APPLET')
ol = self.getElementsByTagName('OBJECT')
ol = filter(lambda x: x._get_code(), ol)
return implementation._4dom_createHTMLCollection(al+ol)
def _get_body(self):
body = ''
#Try to find the body or FRAMESET
elements = self.getElementsByTagName('FRAMESET')
if not elements:
elements = self.getElementsByTagName('BODY')
if elements:
body = elements[0]
else:
#Create a body
body = self.createElement('BODY')
self.documentElement.appendChild(body)
return body
def _set_body(self, newBody):
elements = self.getElementsByTagName('FRAMESET')
if not elements:
elements = self.getElementsByTagName('BODY')
if elements:
# Replace the existing one
elements[0].parentNode.replaceChild(newBody, elements[0])
else:
# Add it
self.documentElement.appendChild(newBody)
def _get_cookie(self):
return self.__dict__['__cookie']
def _set_cookie(self, cookie):
self.__dict__['__cookie'] = cookie
def _get_domain(self):
return self.__dict__['__domain']
def _get_forms(self):
forms = self.getElementsByTagName('FORM')
return implementation._4dom_createHTMLCollection(forms)
def _get_images(self):
images = self.getElementsByTagName('IMG')
return implementation._4dom_createHTMLCollection(images)
def _get_links(self):
areas = self.getElementsByTagName('AREA')
anchors = self.getElementsByTagName('A')
links = filter(lambda x: x._get_href(), areas+anchors)
return implementation._4dom_createHTMLCollection(links)
def _get_referrer(self):
return self.__dict__['__referrer']
def _get_title(self):
elements = self.getElementsByTagName('TITLE')
if elements:
#Take the first
title = elements[0]
title.normalize()
if title.firstChild:
return title.firstChild.data
return ''
def _set_title(self, title):
# See if we can find the title
title_nodes = self.getElementsByTagName('TITLE')
if title_nodes:
title_node = title_nodes[0]
title_node.normalize()
if title_node.firstChild:
title_node.firstChild.data = title
return
else:
title_node = self.createElement('TITLE')
self._4dom_getHead().appendChild(title_node)
text = self.createTextNode(title)
title_node.appendChild(text)
### Methods ###
def close(self):
self.__dict__['__writable'] = 0
def getElementsByName(self, elementName):
return self._4dom_getElementsByAttribute('*', 'NAME', elementName)
def open(self):
#Clear out the doc
self.__dict__['__referrer'] = ''
self.__dict__['__domain'] = None
self.__dict__['__url'] = ''
self.__dict__['__cookie'] = ''
self.__dict__['__writable'] = 1
def write(self, st):
if not self.__dict__['__writable']:
return
#We need to parse the string here
from pyxml.dom.ext.reader.HtmlLib import FromHTML
d = FromHtml(st, self)
if d != self:
self.appendChild(d)
def writeln(self, st):
st = st + '\n'
self.write(st)
def getElementByID(self, ID):
hc = self._4dom_getElementsByAttribute('*','ID',ID)
if hc.length != 0:
return hc[0]
return None
### Overridden Methods ###
def createElement(self, tagName):
return self._4dom_createHTMLElement(tagName)
def createElementNS(self, namespace, qname):
return self._4dom_createHTMLElement(qname)
def createAttribute(self, name):
return Document.createAttribute(self, string.upper(name))
def createCDATASection(*args, **kw):
raise NotSupportedErr()
def createEntityReference(*args, **kw):
raise NotSupportedErr()
def createProcessingInstruction(*args, **kw):
raise NotSupportedErr()
def _4dom_createEntity(*args, **kw):
raise NotSupportedErr()
def _4dom_createNotation(*args, **kw):
raise NotSupportedErr()
### Internal Methods ###
def _4dom_getElementsByAttribute(self, tagName, attribute, attrValue=None):
nl = self.getElementsByTagName(tagName)
hc = implementation._4dom_createHTMLCollection()
for elem in nl:
attr = elem.getAttribute(attribute)
if attrValue == None and attr != '':
hc.append(elem)
elif attr == attrValue:
hc.append(elem)
return hc
def _4dom_getHead(self):
nl = self.getElementsByTagName('HEAD')
if not nl:
head = self.createElement('HEAD')
#The head goes in front of the body
body = self._get_body()
self.documentElement.insertBefore(head, body)
else:
head = nl[0]
return head
def _4dom_createHTMLElement(self, tagName):
lowered = string.lower(tagName)
if not HTML_DTD.has_key(lowered):
raise TypeError('Unknown HTML Element: %s' % tagName)
if lowered in NoClassTags:
from HTMLElement import HTMLElement
return HTMLElement(self, tagName)
#FIXME: capitalize() broken with unicode in Python 2.0
#normTagName = string.capitalize(tagName)
capitalized = string.upper(tagName[0]) + lowered[1:]
element = HTMLTagMap.get(capitalized, capitalized)
module = 'HTML%sElement' % element
if not self._html.has_key(module):
#Try to import it (should never fail)
__import__('pyxml.dom.html.%s' % module)
# Class and module have the same name
klass = getattr(self._html[module], module)
return klass(self, tagName)
def cloneNode(self, deep):
clone = HTMLDocument()
clone.__dict__['__referrer'] = self._get_referrer()
clone.__dict__['__domain'] = self._get_domain()
clone.__dict__['__URL'] = self._get_URL()
clone.__dict__['__cookie'] = self._get_cookie()
if deep:
if self.doctype is not None:
# Cannot have any children, no deep needed
dt = self.doctype.cloneNode(0)
clone._4dom_setDocumentType(dt)
if self.documentElement is not None:
# The root element can have children, duh
root = self.documentElement.cloneNode(1, newOwner=clone)
clone.appendChild(root)
return clone
def isXml(self):
return 0
def isHtml(self):
return 1
### Attribute Access Mappings ###
_readComputedAttrs = Document._readComputedAttrs.copy()
_readComputedAttrs.update ({
'title' : _get_title,
'referrer' : _get_referrer,
'domain' : _get_domain,
'URL' : _get_URL,
'body' : _get_body,
'images' : _get_images,
'applets' : _get_applets,
'links' : _get_links,
'forms' : _get_forms,
'anchors' : _get_anchors,
'cookie' : _get_cookie
})
_writeComputedAttrs = Document._writeComputedAttrs.copy()
_writeComputedAttrs.update ({
'title' : _set_title,
'body' : _set_body,
'cookie' : _set_cookie,
})
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
Document._readOnlyAttrs + _readComputedAttrs.keys())
# HTML tags that don't map directly to a class name
HTMLTagMap = {'Isindex': 'IsIndex',
'Optgroup': 'OptGroup',
'Textarea': 'TextArea',
'Fieldset': 'FieldSet',
'Ul': 'UList',
'Ol': 'OList',
'Dl': 'DList',
'Dir': 'Directory',
'Li': 'LI',
'P': 'Paragraph',
'H1': 'Heading',
'H2': 'Heading',
'H3': 'Heading',
'H4': 'Heading',
'H5': 'Heading',
'H6': 'Heading',
'Q': 'Quote',
'Blockquote': 'Quote',
'Br': 'BR',
'Basefont': 'BaseFont',
'Hr': 'HR',
'A': 'Anchor',
'Img': 'Image',
'Caption': 'TableCaption',
'Col': 'TableCol',
'Colgroup': 'TableCol',
'Td': 'TableCell',
'Th': 'TableCell',
'Tr': 'TableRow',
'Thead': 'TableSection',
'Tbody': 'TableSection',
'Tfoot': 'TableSection',
'Frameset': 'FrameSet',
'Iframe': 'IFrame',
'Form': 'Form',
'Ins' : 'Mod',
'Del' : 'Mod',
}
#HTML Elements with no specific DOM Interface of their own
NoClassTags = ['sub',
'sup',
'span',
'bdo',
'tt',
'i',
'b',
'u',
's',
'strike',
'big',
'small',
'em',
'strong',
'dfn',
'code',
'samp',
'kbd',
'var',
'cite',
'acronym',
'abbr',
'dd',
'dt',
'noframes',
'noscript',
'address',
'center',
]
| lgpl-2.1 |
trik/djangae | djangae/blobstore_service.py | 6 | 3015 | import os
import threading
import logging
import re
blobstore_service = None
server = None
from wsgiref.simple_server import WSGIRequestHandler
class NoLogRequestHandler(WSGIRequestHandler):
def log_request(self, code='-', size='-'):
"""Normally logs an accepted request. Bug given
that this is not using global logging but stdout,
this becomes really annoying in tests. So let's
not log anything.
"""
pass
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
if blobstore_service:
return
from wsgiref.simple_server import make_server
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import blob_image
from django.core.handlers.wsgi import WSGIRequest
from django.utils.encoding import force_str
from socket import error as socket_error
def call_internal_upload(environ, start_response):
# Otherwise, just assume it's our internalupload handler
request = WSGIRequest(environ)
from djangae.views import internalupload
response = internalupload(request)
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
start_response(force_str(status), response_headers)
return response
def handler(environ, start_response):
path = environ["PATH_INFO"]
# If this is an image serving URL, then use use the blob_image WSGI app
if re.match(blob_image.BLOBIMAGE_URL_PATTERN, path.lstrip("/")):
return blob_image.Application()(environ, start_response)
return blob_upload.Application(call_internal_upload)(environ, start_response)
port = int(os.environ['SERVER_PORT'])
host = os.environ['SERVER_NAME']
logging.info("Starting blobstore service on %s:%s", host, port)
try:
server = make_server(host, port, handler, handler_class=NoLogRequestHandler)
except socket_error:
logging.warning("Not starting blobstore service, it may already be running")
return
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
| bsd-3-clause |
seanli9jan/tensorflow | tensorflow/contrib/tpu/python/tpu/tpu_embedding.py | 4 | 44967 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.ops import gen_tpu_ops
from tensorflow.contrib.tpu.proto import tpu_embedding_configuration_pb2 as elc
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
# TODO(shizhiw): A better interface is to make `num_hosts` and
# `num_cores_per_host` optional parameters for `TPUEmbedding`
# constructor. Usually they can be automatically detected, but
# user can also specify them for debugging (b/112112496).
# Auto-detection can be done with `tpu_system_metadata.py`.
_MASTER_JOB = 'tpu_worker'
_HOST_PATTERN = '/job:tpu_worker/task:{}/device:CPU:0'
_NUM_CORES_PER_HOST = 8
_TEST_MASTER_JOB = None
_TEST_HOST = '/replica:0/task:0/device:CPU:0'
_TEST_NUM_CORES_PER_HOST = 2
class TableConfig(
collections.namedtuple(
'TableConfig',
['vocabulary_size', 'dimension', 'initializer', 'combiner'])):
"""Embedding table configuration."""
@experimental
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean'):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. For more information, see
`tf.nn.embedding_lookup_sparse`.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn'):
raise ValueError('Invalid combiner {}'.format(combiner))
return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
initializer, combiner)
# TODO(shizhiw): Factor `use_gradient_accumulation` and
# `pipeline_execution_with_tensor_core` out of `_OptimizationParameters`.
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
def __init__(self, learning_rate, use_gradient_accumulation,
pipeline_execution_with_tensor_core):
self.learning_rate = learning_rate
self.use_gradient_accumulation = use_gradient_accumulation
self.pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad."""
def __init__(self, learning_rate, initial_accumulator,
use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
super(AdagradParameters, self).__init__(learning_rate,
use_gradient_accumulation,
pipeline_execution_with_tensor_core)
self.initial_accumulator = initial_accumulator
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam."""
def __init__(self, learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
lazy_adam=True,
sum_inside_sqrt=True,
use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
super(AdamParameters, self).__init__(learning_rate,
use_gradient_accumulation,
pipeline_execution_with_tensor_core)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
def __init__(self, learning_rate, use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
super(StochasticGradientDescentParameters, self).__init__(
learning_rate, use_gradient_accumulation,
pipeline_execution_with_tensor_core)
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_table_dict = {'watched': 'video',
'favorited': 'video',
'friends': 'user'}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_table_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
# `train_op` and `send_gradients_op` must happen in order.
with ops.control_dependencies([train_op]):
send_gradients_op = embedding.generate_send_gradients_op()
with ops.control_dependencies([send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding.init_ops)
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
"""
# TODO(shizhiw): Instead of `feature_to_table_dict` which maps to table
# name, consider `feature_to_config_dict` which maps to `FeatureConfig`.
# `FeatureConfig` could have fields other than table name. For example, it
# could have a field to indicate that the feature should not be used to
# update embedding table (cr/204852758, cr/204940540). Also, this can support
# different combiners for different features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_table_dict` lists of `TableSpec` and `FeatureSpec` respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
@experimental
def __init__(self,
table_to_config_dict,
feature_to_table_dict,
batch_size,
num_hosts,
mode,
optimization_parameters=None,
tpu_embedding_test=False):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_table_dict: A dictionary mapping from string of feature name
to string of table name. Feature refers to ids to lookup in embedding
table, e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
num_hosts: An `int` representing the number of TPU hosts.
mode: `TRAINING` or `INFERENCE`.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training and must
be `None` in inference.
tpu_embedding_test: A `bool`. Only used for testing.
Raises:
ValueError: if any input is invalid.
"""
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
self._combiners = _create_combiners(self._table_to_config_dict)
_validate_feature_to_table_dict(table_to_config_dict, feature_to_table_dict)
self._feature_to_table_dict = _create_ordered_dict(feature_to_table_dict)
self._table_to_features_dict = _create_table_to_features_dict(
self._feature_to_table_dict)
self._batch_size = batch_size
if tpu_embedding_test:
self._num_hosts = 1
self._hosts = [_TEST_HOST]
self._num_cores_per_host = _TEST_NUM_CORES_PER_HOST
else:
self._num_hosts = num_hosts
self._hosts = [_HOST_PATTERN.format(i) for i in range(self._num_hosts)]
self._num_cores_per_host = _NUM_CORES_PER_HOST
self._num_cores = self._num_cores_per_host * self._num_hosts
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
self._init_ops = []
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler = _get_optimization_handler(
self._optimization_parameters)
dummy_table_variables_init_op = self._create_dummy_table_variables()
self._init_ops.append(dummy_table_variables_init_op)
self._config_proto = self._create_config_proto()
self._create_variables_and_ops()
self._init_ops.extend(self._load_parameters_ops)
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return self._hosts
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
def init_ops(self):
"""Initialization ops for TPU embedding.
It must be called after all global variables have been initialized,
i.e. after `global_variables_initializer()`, as it loads embedding
tables into TPU.
Returns:
A list of ops.
"""
return self._init_ops
# TODO(shizhiw): get table variables the same way as getting slot variables.
@property
def table_to_table_variables_dict(self):
return copy.copy(self._table_to_table_variables_dict)
def get_slot_names(self):
"""Return a list of the names of slots created by `TPUEmbedding`."""
return self._optimizer_handler.get_slot_names()
def get_slot(self, table, name):
"""Return a slot named `name` create for `table` by `TPUEmbedding`."""
return self._optimizer_handler.get_slot(table, name)
# TODO(shizhiw): expose load to user too?
@property
def retrieve_parameters_ops(self):
return self._retrieve_parameters_ops
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
table_descriptor.vocabulary_size = table_config.vocabulary_size
table_descriptor.dimension = table_config.dimension
features_for_table = self._table_to_features_dict[table]
table_descriptor.num_features = len(features_for_table)
table_descriptor.optimization_parameters.learning_rate.constant = (
self._optimization_parameters.learning_rate)
table_descriptor.optimization_parameters.use_gradient_accumulation = (
self._optimization_parameters.use_gradient_accumulation)
self._optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = elc.TPUEmbeddingConfiguration.DIV_DEFAULT
config_proto.pipeline_execution_with_tensor_core = (
self._optimization_parameters.pipeline_execution_with_tensor_core)
return config_proto
def _create_variables_and_ops(self):
"""Create embedding variables and return ops to load them into TPU."""
self._load_parameters_ops = []
self._retrieve_parameters_ops = []
self._table_to_table_variables_dict = {}
for table in self._table_to_config_dict:
device_fn = _create_device_fn(self._hosts)
with ops.device(device_fn):
# TODO(shizhiw): allow user to specify variable name so that
# they could make the name consistent with CPU etc.
variable_name = table
table_variables = _create_partitioned_variables(
name=variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
self._table_to_table_variables_dict[table] = table_variables
self._optimizer_handler.create_variables_and_ops(
table, variable_name, self._num_hosts,
self._table_to_config_dict[table], table_variables,
self._load_parameters_ops, self._retrieve_parameters_ops)
def _create_dummy_table_variables(self):
"""Create dummy embedding table variables.
The sole purpose of these dummy variables are to trigger gradient
calcuation wrt them so that the gradients wrt activation can be captured
and later sent to TPU embedding.
Returns:
Initializer for these variables.
Raises:
RuntimeError: if collection to store gradients already exists and is not
empty.
"""
self._dummy_table_variables = []
# TODO(shizhiw): remove table id.
for table_id, table in enumerate(self._table_to_features_dict):
self._dummy_table_variables.append(
variable_scope.get_variable(
'tpu_embedding_dummy_table_variable_%s' % table,
dtype=dtypes.float32,
shape=[1],
use_resource=True,
trainable=True,
# TODO(shizhiw): Remove these dummy variables as
# tensorflow optimizer creates slot variable for them which
# is undesirable.
# e.g. tpu_embedding_dummy_table_variable_mlp_user/Adam{_1}.
# Explicitly specifying collections prevents this variable from
# being added to the GLOBAL_VARIABLES collection, so that Saver()
# ignores it.
collections=['tpu_embedding_dummy_table_variables']))
g = ops.get_default_graph()
table_gradients = g.get_collection_ref(
'tpu_embedding_gradients_table_%d' % table_id)
if table_gradients:
raise RuntimeError(
'tpu_embedding_gradients_table_%d is not empty.' % table_id)
table_gradients.extend([None] * len(self._table_to_features_dict[table]))
return variables.variables_initializer(
self._dummy_table_variables,
name='tpu_embedding_dummy_table_variables_init')
def generate_enqueue_ops(self, sparse_features_list):
"""Generate enqueue ops.
Args:
sparse_features_list: a list of dictionary mapping from string
of feature names to sparse tensor. Each dictionary is for one
TPU core. Dictionaries for the same core should be contiguous
on the list.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_sparse_features_list(
sparse_features_list)
return [
self._generate_enqueue_op(
sparse_features, device_ordinal=i % self._num_cores_per_host)
for i, sparse_features in enumerate(sparse_features_list)
]
def _validate_generate_enqueue_ops_sparse_features_list(
self, sparse_features_list):
"""Validate `sparse_features_list`."""
if len(sparse_features_list) != self._num_cores:
raise ValueError('Length of `sparse_features_list` should match the '
'number of cores; '
'`len(sparse_features_list)` is {}, '
'number of cores is {}.'.format(
len(sparse_features_list), self._num_cores))
feature_set = set(self._feature_to_table_dict.keys())
contiguous_device = None
for i, sparse_features in enumerate(sparse_features_list):
used_feature_set = set(sparse_features.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`sparse_features_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`sparse_features_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, tensor in six.iteritems(sparse_features):
if not isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('`sparse_features_list[{}]` has a feature that is '
'not mapped to `SparseTensor`. '
'`feature`: {}, type: {}'.format(
i, feature, type(tensor)))
# Check all features are on the same device.
if device is None:
device = tensor.op.device
device_feature = feature
else:
if device != tensor.op.device:
raise ValueError('Devices are different between features in '
'`sparse_features_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device, tensor.op.device, feature,
device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `sparse_features` which are on the '
'same host to be contiguous in '
'`sparse_features_list`, '
'`sparse_features_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self, sparse_features, device_ordinal):
with ops.colocate_with(list(sparse_features.values())[0]):
sample_idcs, embedding_idcs, aggregation_weights = (
self._format_for_tpu_embedding_sparse_batch(sparse_features))
return tpu_ops.enqueue_tpu_embedding_sparse_batch(
sample_idcs,
embedding_idcs,
aggregation_weights,
combiners=self._combiners,
device_ordinal=device_ordinal)
def _format_for_tpu_embedding_sparse_batch(self, sparse_features):
"""Format sparse features for `enqueue_tpu_embedding_sparse_batch()`.
Args:
sparse_features: a `Dict` of `SparseTensor`s for embedding.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_batch()`.
"""
sample_idcs, embedding_idcs, aggregation_weights = list(), list(), list()
for table in self._table_to_features_dict:
sample_t, indices_t, weights_t = list(), list(), list()
features = self._table_to_features_dict[table]
for i, feature in enumerate(features):
tensor = sparse_features[feature]
sample_indices = tensor.indices[:, 0]
embedding_indices = tensor.values
weights = array_ops.ones_like(embedding_indices)
sample_t.append(i * self._batch_size_per_core + sample_indices)
indices_t.append(embedding_indices)
weights_t.append(weights)
sample_idcs.append(
math_ops.cast(array_ops.concat(sample_t, axis=0), dtype=dtypes.int32))
embedding_idcs.append(
math_ops.cast(
array_ops.concat(indices_t, axis=0), dtype=dtypes.int32))
aggregation_weights.append(
math_ops.cast(
array_ops.concat(weights_t, axis=0), dtype=dtypes.float32))
return sample_idcs, embedding_idcs, aggregation_weights
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for lookup_id, feature in enumerate(features):
start_row = lookup_id * self._batch_size_per_core
end_row = start_row + self._batch_size_per_core
activations[feature] = gen_tpu_ops.tpu_embedding_activations(
self._dummy_table_variables[table_id],
recv_activations[table_id][start_row:end_row, :],
table_id=table_id,
lookup_id=lookup_id)
return activations
# TODO(shizhiw): Make `gradient_multiplier` per feature. Setting it to 0 would
# have the effect of `tf.stop_gradients()`.
# TODO(shizhiw): Consider alternative ways to capture gradients wrt embedding
# layer outputs to remove `_dummy_table_variables`,
# `_embedding_activation_grad` and `tpu_embedding_gradients_table_%d'.
def generate_send_gradients_op(self, gradient_multipliers=None):
"""Retrieve gradients from collections and send them to TPU embedding.
Args:
gradient_multipliers: None, or dict mapping table names to gradient
multiplier Tensors.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
ValueError: If required gradients have not been defined.
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
g = ops.get_default_graph()
gradients = list()
for table_id, table in enumerate(self._table_to_config_dict):
table_gradients = g.get_collection(
'tpu_embedding_gradients_table_%d' % table_id)
if any(gradient is None for gradient in table_gradients):
raise ValueError(
'Table {}/{} has undefined gradients: this is probably because the '
'model asked TPUEmbedding to compute activations that were not '
'used.'.format(table_id, table))
concat_table_grads = array_ops.concat(table_gradients, axis=0)
if gradient_multipliers is not None:
concat_table_grads *= gradient_multipliers[table.name]
gradients.append(concat_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients, config=self.config_proto.SerializeToString())
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_table_dict(table_to_config_dict,
feature_to_table_dict):
"""Validate `feature_to_table_dict`."""
used_table_set = set(feature_to_table_dict.values())
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_table_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_table_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_batch_size(batch_size, num_cores):
if batch_size % num_cores:
raise ValueError('`batch_size` is not a multiple of number of '
'cores. `batch_size`={}, `_num_cores`={}.'.format(
batch_size, num_cores))
def _validate_optimization_parameters(optimization_parameters):
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationPramaters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
def __init__(self, optimization_parameters):
self._optimization_parameters = optimization_parameters
def set_optimization_parameters(self, table_descriptor):
raise NotImplementedError()
def create_variables_and_ops(self, table, variable_name):
raise NotImplementedError()
def get_slot_names(self):
raise NotImplementedError()
def get_slot(self, table, name):
raise NotImplementedError()
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
def __init__(self, optimization_parameters):
super(_AdagradHandler, self).__init__(optimization_parameters)
self._table_to_accumulator_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adagrad.SetInParent()
def create_variables_and_ops(self, table, variable_name, num_hosts,
table_config, table_variables,
load_parameters_ops, retrieve_parameters_ops):
optimizer_name = 'Adagrad'
accumulator_initializer = init_ops.constant_initializer(
self._optimization_parameters.initial_accumulator)
accumulator_variables = _create_partitioned_variables(
name='%s/%s' % (variable_name, optimizer_name),
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=accumulator_initializer)
self._table_to_accumulator_variables_dict[table] = accumulator_variables
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_variable,
accumulators=accumulator_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieved_table, retrieved_accumulator = (
tpu_ops.retrieve_tpu_embedding_adagrad_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(accumulator_variable, retrieved_accumulator))
load_parameters_ops.append(load_parameters_op)
retrieve_parameters_ops.append(retrieve_parameters_op)
def get_slot_names(self):
return ['accumulator']
def get_slot(self, table, name):
if name not in self.get_slot_names():
raise ValueError('Adagrad has {} as slot names; got {}.'
.format(self.get_slot_names(), name))
return self._table_to_accumulator_variables_dict[table]
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
def __init__(self, optimization_parameters):
super(_AdamHandler, self).__init__(optimization_parameters)
self._table_to_m_variables_dict = {}
self._table_to_v_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adam.beta1 = (
self._optimization_parameters.beta1)
table_descriptor.optimization_parameters.adam.beta2 = (
self._optimization_parameters.beta2)
table_descriptor.optimization_parameters.adam.epsilon = (
self._optimization_parameters.epsilon)
table_descriptor.optimization_parameters.adam.use_non_lazy_adam = (
not self._optimization_parameters.lazy_adam)
table_descriptor.optimization_parameters.adam.use_sum_inside_sqrt = (
self._optimization_parameters.sum_inside_sqrt)
def create_variables_and_ops(self, table, variable_name, num_hosts,
table_config, table_variables,
load_parameters_ops, retrieve_parameters_ops):
optimizer_name = 'Adam'
m_initializer = init_ops.zeros_initializer()
m_variables = _create_partitioned_variables(
name='%s/%s/m' % (variable_name, optimizer_name),
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=m_initializer)
v_initializer = init_ops.zeros_initializer()
v_variables = _create_partitioned_variables(
name='%s/%s/v' % (variable_name, optimizer_name),
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=v_initializer)
self._table_to_m_variables_dict[table] = m_variables
self._table_to_v_variables_dict[table] = v_variables
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_variable,
momenta=m_variable,
velocities=v_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieved_table, retrieved_m, retrieved_v = (
tpu_ops.retrieve_tpu_embedding_adam_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(m_variable, retrieved_m),
state_ops.assign(v_variable, retrieved_v))
load_parameters_ops.append(load_parameters_op)
retrieve_parameters_ops.append(retrieve_parameters_op)
def get_slot_names(self):
return ['m', 'v']
def get_slot(self, table, name):
if name == 'm':
return self._table_to_m_variables_dict[table]
elif name == 'v':
return self._table_to_v_variables_dict[table]
else:
raise ValueError('Adam has {} as slot names; got {}.'
.format(self.get_slot_names(), name))
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def set_optimization_parameters(self, table_descriptor):
(table_descriptor.optimization_parameters.stochastic_gradient_descent
.SetInParent())
def create_variables_and_ops(self, table, variable_name, num_hosts,
table_config, table_variables,
load_parameters_ops, retrieve_parameters_ops):
del table_config
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieved_table = (
tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table))
load_parameters_ops.append(load_parameters_op)
retrieve_parameters_ops.append(retrieve_parameters_op)
def get_slot_names(self):
return []
def get_slot(self, table, name):
raise ValueError('Stochastic gradient descent does not have slot variable.')
def _get_optimization_handler(optimization_parameters):
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
else:
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict):
return [table_to_config_dict[t].combiner for t in table_to_config_dict]
def _create_table_to_features_dict(feature_to_table_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
for feature, table in six.iteritems(feature_to_table_dict):
if table in table_to_features_dict_tmp:
table_to_features_dict_tmp[table].append(feature)
else:
table_to_features_dict_tmp[table] = [feature]
table_to_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
return table_to_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
if part_match:
idx = int(part_match.group(1))
else:
raise RuntimeError('Internal Error: '
'Expected %s to contain /part_*.' % op.name)
device = hosts[idx]
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates ParitionedVariables based on `num_hosts` for `table`."""
# TODO(shizhiw): automatically place embedding lookup elsewhere?
if vocabulary_size < num_hosts:
raise ValueError('`vocabulary_size`({}) is smaller than `num_hosts`({}). '
'As TPU embedding is not optimized for small tables, '
'please consider other ways for this embedding lookup.')
slicing = [num_hosts, 1]
# TODO(shizhiw): deprecated, use tf.get_variable()?
return partitioned_variables.create_partitioned_variables(
name=name,
slicing=slicing,
shape=(vocabulary_size, embedding_dimension),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False)
@ops.RegisterGradient('TPUEmbeddingActivations')
def _embedding_activations_grad(activations_op, grad_wrt_activations):
"""Saves the gradient of embedding activations ops in a graph collection."""
g = ops.get_default_graph()
table_id = activations_op.get_attr('table_id')
lookup_id = activations_op.get_attr('lookup_id')
table_gradients = g.get_collection_ref(
'tpu_embedding_gradients_table_%d' % table_id)
if not table_gradients:
raise RuntimeError(
'Gradients for TPUEmbedding have been generated in non-training mode. '
'This is not expected. Consider putting your Optimizer.minimize code '
'behind the training mode condition check. For Estimator, you can '
'do \n\n'
' if mode == tf.estimator.ModeKeys.TRAIN:\n'
' train_op = opt.minimize(loss)\n'
'\n')
table_gradients[lookup_id] = array_ops.identity(grad_wrt_activations)
return [
# RegisterGradient requires that value be returned for all inputs. Since
# the first argument (tpu_gradient_variable_{table_name}) has shape [1],
# we will return zeros(shape=[1]). The actual gradient w.r.t. the
# embedding activations (grad_wrt_activations) has the same shape as the
# activations returned by embedding_activations.
array_ops.zeros(arg.shape, dtype=dtypes.float32)
for arg in activations_op.inputs
]
| apache-2.0 |
emnik/Pocket-Science-Lab | Measurements-using-expeyes.py | 2 | 3053 | '''
These are procedures for measuring various parameters using ExpEYES Jr
'''
'''
From croplus.py
'''
def measurecap():
msg(_('Starting Capacitance Measurement..'))
cap = p.measure_cap()
if cap == None:
msg(_('Error: Capacitance too high or short to ground'),'red')
return
g.disp(_('IN1: %6.1f pF')%cap)
if p.socket_cap == 30.0 and p.cap_calib == 1.0:
msg(_('IN1 Not Calibrated.'))
else:
msg(_('IN1: %6.1f pF')%cap)
'''
from eyesj.py
'''
def measure_cv(self, ch, ctime, i = 5.5):
'''
Using the CTMU of PIC, charges a capacitor connected to IN1, IN2 or SEN,
for 'ctime' microseconds and then mesures the voltage across it.
The value of current can be set to .55uA, 5.5 uA, 55uA or 550 uA
@param ch channel number
@param ctime duration in microseconds
@param i value of the current (defaults to 5.5 uA)
'''
if i > 500: # 550 uA
irange = 0
elif i > 50: #55 uA
irange = 3
elif i > 5: #5.5 uA, default value
irange = 2
else: # 0.55 uA
irange = 1
if ch not in [3,4]:
self.msg = _('Current to be set only on IN1(3) or IN2(4)')
print (_('Current to be set only on IN1 or IN2'))
return
self.sendByte(MEASURECV)
self.sendByte(chr(ch))
self.sendByte(chr(irange))
self.sendInt(ctime)
res = self.fd.read(1)
if res != b'D':
self.msg = warningWithResult(_('MEASURECV ERROR '), res)
print (warningWithResult(_('MEASURECV ERROR '), res))
return
res = self.fd.read(2)
if sys.version_info.major == 3:
iv = res[0] | (res[1] << 8)
else:
iv = ord(res[0]) | (ord(res[1]) << 8)
v = self.m12[ch] * iv + self.c[ch]
return v
def measure_cap_raw(self, ctmin = 10):
'''
Measures the capacitance connected between IN1 and GND. Stray
capacitance should be subtracted from the measured
value. Measurement is done by charging the capacitor with 5.5 uA
for a given time interval. Any error in the value of current
is corrected by calibrating.
'''
for ctime in range(ctmin, 1000, 10):
v = self.measure_cv(3, ctime, 5.5) # 5.5 uA range is chosen
if v > 2.0: break
if (v > 4) or (v == 0):
self.msg = _('Error measuring capacitance %5.3f') %v
print (_('Error measuring capacitance'), v)
return None
return 5.5 * ctime / v # returns value in pF
def measure_cap(self, ctmin = 10):
'''
Measures the capacitance connected between IN1 and GND.
Returns the value after applying corrections.
'''
cap = self.measure_cap_raw()
if cap != None:
return (cap - self.socket_cap) * self.cap_calib
else:
return None
| gpl-3.0 |
Bysmyyr/chromium-crosswalk | third_party/tlslite/tlslite/mathtls.py | 40 | 11303 | # Authors:
# Trevor Perrin
# Dave Baggett (Arcode Corporation) - MD5 support for MAC_SSL
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Miscellaneous helper functions."""
from .utils.compat import *
from .utils.cryptomath import *
import hmac
#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups]
goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\
(2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\
(2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\
(2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)]
def P_hash(macFunc, secret, seed, length):
bytes = bytearray(length)
A = seed
index = 0
while 1:
A = macFunc(secret, A)
output = macFunc(secret, A + seed)
for c in output:
if index >= length:
return bytes
bytes[index] = c
index += 1
return bytes
def PRF(secret, label, seed, length):
#Split the secret into left and right halves
# which may share a byte if len is odd
S1 = secret[ : int(math.ceil(len(secret)/2.0))]
S2 = secret[ int(math.floor(len(secret)/2.0)) : ]
#Run the left half through P_MD5 and the right half through P_SHA1
p_md5 = P_hash(HMAC_MD5, S1, label + seed, length)
p_sha1 = P_hash(HMAC_SHA1, S2, label + seed, length)
#XOR the output values and return the result
for x in range(length):
p_md5[x] ^= p_sha1[x]
return p_md5
def PRF_1_2(secret, label, seed, length):
return P_hash(HMAC_SHA256, secret, label + seed, length)
def PRF_SSL(secret, seed, length):
bytes = bytearray(length)
index = 0
for x in range(26):
A = bytearray([ord('A')+x] * (x+1)) # 'A', 'BB', 'CCC', etc..
input = secret + SHA1(A + secret + seed)
output = MD5(input)
for c in output:
if index >= length:
return bytes
bytes[index] = c
index += 1
return bytes
def calcMasterSecret(version, premasterSecret, clientRandom, serverRandom,
handshakeHash, useExtendedMasterSecret):
label = b"master secret"
seed = clientRandom + serverRandom
if useExtendedMasterSecret:
label = b"extended master secret"
seed = handshakeHash
if version == (3,0):
masterSecret = PRF_SSL(premasterSecret, seed, 48)
elif version in ((3,1), (3,2)):
masterSecret = PRF(premasterSecret, label, seed, 48)
elif version == (3,3):
masterSecret = PRF_1_2(premasterSecret, label, seed, 48)
else:
raise AssertionError()
return masterSecret
def makeX(salt, username, password):
if len(username)>=256:
raise ValueError("username too long")
if len(salt)>=256:
raise ValueError("salt too long")
innerHashResult = SHA1(username + bytearray(b":") + password)
outerHashResult = SHA1(salt + innerHashResult)
return bytesToNumber(outerHashResult)
#This function is used by VerifierDB.makeVerifier
def makeVerifier(username, password, bits):
bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits]
g,N = goodGroupParameters[bitsIndex]
salt = getRandomBytes(16)
x = makeX(salt, username, password)
verifier = powMod(g, x, N)
return N, g, salt, verifier
def PAD(n, x):
nLength = len(numberToByteArray(n))
b = numberToByteArray(x)
if len(b) < nLength:
b = (b"\0" * (nLength-len(b))) + b
return b
def makeU(N, A, B):
return bytesToNumber(SHA1(PAD(N, A) + PAD(N, B)))
def makeK(N, g):
return bytesToNumber(SHA1(numberToByteArray(N) + PAD(N, g)))
def createHMAC(k, digestmod=hashlib.sha1):
return hmac.HMAC(k, digestmod=digestmod)
def createMAC_SSL(k, digestmod=None):
mac = MAC_SSL()
mac.create(k, digestmod=digestmod)
return mac
class MAC_SSL(object):
def create(self, k, digestmod=None):
self.digestmod = digestmod or hashlib.sha1
# Repeat pad bytes 48 times for MD5; 40 times for other hash functions.
self.digest_size = 16 if (self.digestmod is hashlib.md5) else 20
repeat = 40 if self.digest_size == 20 else 48
opad = b"\x5C" * repeat
ipad = b"\x36" * repeat
self.ohash = self.digestmod(k + opad)
self.ihash = self.digestmod(k + ipad)
def update(self, m):
self.ihash.update(m)
def copy(self):
new = MAC_SSL()
new.ihash = self.ihash.copy()
new.ohash = self.ohash.copy()
new.digestmod = self.digestmod
new.digest_size = self.digest_size
return new
def digest(self):
ohash2 = self.ohash.copy()
ohash2.update(self.ihash.digest())
return bytearray(ohash2.digest())
| bsd-3-clause |
tody411/ImageViewerFramework | ivf/ui/tool/normal_constraint_tool.py | 1 | 7515 | # -*- coding: utf-8 -*-
## @package ivf.ui.tool.normal_constraint_tool
#
# ivf.ui.tool.normal_constraint_tool utility package.
# @author tody
# @date 2016/02/08
import numpy as np
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from ivf.ui.tool.base_tool import BaseTool
from ivf.scene.normal_constraint import NormalConstraintSet, NormalConstraint
from ivf.np.norm import normVectors, normalizeVector
from ivf.core.sfs.amg_constraints import normalConstraints, laplacianMatrix, silhouetteConstraints
from ivf.core.solver import amg_solver, image_solver
from ivf.cv.normal import normalizeImage, normalToColor
from ivf.cv.image import alpha, to8U, rgb, to32F, luminance
from ivf.core.sfs import image_constraints
from ivf.core.sfs.image_constraints import postNormalize
from ivf.core.sfs.pr_sfs import Wu08SFS
class NormalConstraintTool(BaseTool):
## Constructor
def __init__(self):
super(NormalConstraintTool, self).__init__()
self._normal_constraints = NormalConstraintSet()
self._p_old = None
self._n_old = None
self._selected_constraint = None
self._normal_radius = 40.0
self._image = None
self._N_32F = None
def setNormalConstraints(self, normal_constraints):
self._normal_constraints = normal_constraints
self._view.update()
def normalConstraints(self):
return self._normal_constraints
def setImage(self, image):
self._image = image
self._interpolateNormal()
self._view.render(image)
def mousePressEvent(self, e):
p = self._mousePosition(e)
if self._selectConstraint(p):
return
else:
self.addConstraint(p)
def mouseReleaseEvent(self, e):
print "p: ", self._selected_constraint.position()
print "n: ", self._selected_constraint.normal()
self._selected_constraint = None
#self._interpolateNormal()
def mouseMoveEvent(self, e):
p = self._mousePosition(e)
self._p_old = p
if e.buttons() & Qt.LeftButton:
if self._selected_constraint is None:
return
p_c = self._selected_constraint.position()
N_c = self._n_old
dP = p - p_c
Nx, Ny = N_c[0], N_c[1]
Nx += dP[0] / self._normal_radius
Ny += -dP[1] / self._normal_radius
r = np.linalg.norm(np.array([Nx, Ny]))
Nz = np.sqrt(max(0.001, 1.0 - r * r))
N = np.array([Nx, Ny, Nz])
N /= np.linalg.norm(N)
self._selected_constraint.setNormal(N)
self._view.update()
def keyPressEvent(self, e):
if e.key() == Qt.Key_0:
self._view.render(self._image)
if e.key() == Qt.Key_1:
if self._N_32F is None:
self._interpolateNormal()
A_8U = None
if self._image.shape[2] == 4:
A_8U = to8U(alpha(self._image))
self._view.render(normalToColor(self._N_32F, A_8U))
if e.key() == Qt.Key_2:
self._interpolateNormal()
if self._N_32F is None:
self._interpolateNormal()
A_8U = None
if self._image.shape[2] == 4:
A_8U = to8U(alpha(self._image))
self._view.render(normalToColor(self._N_32F, A_8U))
if e.key() == Qt.Key_Delete:
self._normal_constraints.clear()
self._view.update()
def keyReleaseEvent(self, e):
pass
def addConstraint(self, p):
n = np.array([0.0, 0.0, 1.0])
if self._N_32F is not None:
n = self._N_32F[p[1], p[0], :]
self._selected_constraint = NormalConstraint(point=p, normal=n)
self._normal_constraints.addConstraint(self._selected_constraint)
self._n_old = self._selected_constraint.normal()
self._view.update()
def _interpolateNormal(self):
if self._normal_constraints.empty():
return
if self._image is None:
return
ps = np.int32(self._normal_constraints.positions())
ns = self._normal_constraints.normals()
h, w = self._image.shape[:2]
N0_32F = np.zeros((h, w, 3))
N0_32F[ps[:, 1], ps[:, 0]] = ns
W_32F = np.zeros((h, w))
W_32F[ps[:, 1], ps[:, 0]] = 1.0
A_8U = None
if self._image.shape[2] == 4:
A_8U = to8U(alpha(self._image))
self._N_32F = self._interpolateNormalImage(N0_32F, W_32F, A_8U)
self._projectConstraints()
def _interpolateNormalImage(self, N0_32F, W_32F, A_8U):
constraints = []
constraints.append(image_constraints.laplacianConstraints(w_c=0.1))
constraints.append(image_constraints.normalConstraints(W_32F, N0_32F, w_c=3.0))
L = normalizeVector(np.array([-0.2, 0.3, 0.7]))
I_32F = luminance(to32F(rgb(self._image)))
I_min, I_max = np.min(I_32F), np.max(I_32F)
I_32F = (I_32F - I_min) / (I_max - I_min)
# constraints.append(image_constraints.brightnessConstraints(L, I_32F, w_c=0.5))
constraints.append(image_constraints.silhouetteConstraints(A_8U, w_c=0.8))
solver_iter = image_solver.solveIterator(constraints,
[postNormalize(th=0.0)])
N_32F = np.array(N0_32F)
N_32F = image_solver.solveMG(N_32F, solver_iter, iterations=10)
N_32F = image_constraints.NxyToNz(N_32F)
return N_32F
def _interpolateNormalAMG(self, N0_32F, W_32F, A_8U):
h, w = N0_32F.shape[:2]
A_c, b_c = normalConstraints(W_32F, N0_32F)
A_8U = None
if self._image.shape[2] == 4:
A_8U = to8U(alpha(self._image))
A_sil, b_sil = silhouetteConstraints(A_8U)
A_L = laplacianMatrix((h, w))
A = 10.0 * A_c + A_L + A_sil
b = 10.0 * b_c + b_sil
N_32F = amg_solver.solve(A, b).reshape(h, w, 3)
N_32F = normalizeImage(N_32F)
return N_32F
def _projectConstraints(self):
for constraint in self._normal_constraints.constraints():
p = constraint.position()
N = self._N_32F[p[1], p[0]]
constraint.setNormal(N)
self._view.update()
def _overlayFunc(self, painter):
pen = QPen(QColor.fromRgbF(0.0, 1.0, 0.0, 0.5))
pen.setWidth(5)
painter.setPen(pen)
p = self._p_old
if p is not None:
painter.drawPoint(QPoint(p[0], p[1]))
for constraint in self._normal_constraints.constraints():
pen = QPen(QColor.fromRgbF(1.0, 0.0, 0.0, 0.7))
pen.setWidth(5)
painter.setPen(pen)
p = constraint.position()
painter.drawPoint(QPoint(p[0], p[1]))
n = np.array(constraint.normal())
n[1] = -n[1]
p_n = p + self._normal_radius * n[:2]
pen.setWidth(2)
painter.setPen(pen)
painter.drawLine(QPoint(p[0], p[1]), QPoint(p_n[0], p_n[1]))
def _selectConstraint(self, p):
if self._normal_constraints.empty():
return False
ps = self._normal_constraints.positions()
dP = normVectors(ps - p)
p_min = np.argmin(dP)
if dP[p_min] < 5:
self._selected_constraint = self._normal_constraints.constraint(p_min)
self._n_old = self._selected_constraint.normal()
return True
return False
| mit |
gzzhanghao/mitmproxy | test/pathod/tutils.py | 3 | 3641 | import tempfile
import re
import shutil
import requests
from six.moves import cStringIO as StringIO
from six.moves import urllib
from six import BytesIO
from netlib import tcp
from netlib import utils
from netlib import tutils
from pathod import language
from pathod import pathoc
from pathod import pathod
from pathod import test
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = BytesIO(bytes)
return tcp.Reader(fp)
class DaemonTests(object):
nohang = False
ssl = False
timeout = None
hexdump = False
ssloptions = None
nocraft = False
explain = True
@classmethod
def setup_class(cls):
opts = cls.ssloptions or {}
cls.confdir = tempfile.mkdtemp()
opts["confdir"] = cls.confdir
so = pathod.SSLOptions(**opts)
cls.d = test.Daemon(
staticdir=test_data.path("data"),
anchors=[
(re.compile("/anchor/.*"), "202:da")
],
ssl=cls.ssl,
ssloptions=so,
sizelimit=1 * 1024 * 1024,
nohang=cls.nohang,
timeout=cls.timeout,
hexdump=cls.hexdump,
nocraft=cls.nocraft,
logreq=True,
logresp=True,
explain=cls.explain
)
@classmethod
def teardown_class(cls):
cls.d.shutdown()
shutil.rmtree(cls.confdir)
def teardown(self):
self.d.wait_for_silence()
self.d.clear_log()
def _getpath(self, path, params=None):
scheme = "https" if self.ssl else "http"
resp = requests.get(
"%s://localhost:%s/%s" % (
scheme,
self.d.port,
path
),
verify=False,
params=params
)
return resp
def getpath(self, path, params=None):
logfp = StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
if params:
path = path + "?" + urllib.parse.urlencode(params)
resp = c.request("get:%s" % path)
return resp
def get(self, spec):
logfp = StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
resp = c.request(
"get:/p/%s" % urllib.parse.quote(spec)
)
return resp
def pathoc(
self,
specs,
timeout=None,
connect_to=None,
ssl=None,
ws_read_limit=None,
use_http2=False,
):
"""
Returns a (messages, text log) tuple.
"""
if ssl is None:
ssl = self.ssl
logfp = StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=ssl,
ws_read_limit=ws_read_limit,
timeout=timeout,
fp=logfp,
use_http2=use_http2,
)
with c.connect(connect_to):
ret = []
for i in specs:
resp = c.request(i)
if resp:
ret.append(resp)
for frm in c.wait():
ret.append(frm)
c.stop()
return ret, logfp.getvalue()
tmpdir = tutils.tmpdir
raises = tutils.raises
test_data = utils.Data(__name__)
def render(r, settings=language.Settings()):
r = r.resolve(settings)
s = BytesIO()
assert language.serve(r, s, settings)
return s.getvalue()
| mit |
rodrigods/keystone | setup.py | 608 | 1045 | #!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| apache-2.0 |
viru/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 88 | 9882 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
import time
import sys
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
Shopify/dd-agent | tests/checks/integration/test_cassandra.py | 46 | 2080 | # stdlib
import threading
import time
from types import ListType
import unittest
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
from dogstatsd import Server
from jmxfetch import JMXFetch
from tests.checks.common import Fixtures
STATSD_PORT = 8121
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='cassandra')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = Fixtures.directory()
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
self.assertTrue(len([t for t in metrics if "cassandra.db." in t['metric'] and "instance:cassandra_instance" in t['tags']]) > 40, metrics)
| bsd-3-clause |
biodrone/plex-desk | desk/flask/lib/python3.4/site-packages/babel/messages/pofile.py | 136 | 16320 | # -*- coding: utf-8 -*-
"""
babel.messages.pofile
~~~~~~~~~~~~~~~~~~~~~
Reading and writing of files in the ``gettext`` PO (portable object)
format.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
from babel.messages.catalog import Catalog, Message
from babel.util import wraptext
from babel._compat import text_type
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
"""
def replace_escapes(match):
m = match.group(1)
if m == 'n':
return '\n'
elif m == 't':
return '\t'
elif m == 'r':
return '\r'
# m is \ or "
return m
return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
"""
if '\n' in string:
escaped_lines = string.splitlines()
if string.startswith('""'):
escaped_lines = escaped_lines[1:]
lines = map(unescape, escaped_lines)
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from datetime import datetime
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr "quux %(name)s"
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] "bar"
... msgstr[1] "baaz"
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', u'quux %(name)s')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), (u'bar', u'baaz'))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
.. versionadded:: 1.0
Added support for explicit charset argument.
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:param charset: the character set of the catalog.
"""
catalog = Catalog(locale=locale, domain=domain, charset=charset)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
context = []
in_msgid = [False]
in_msgstr = [False]
in_msgctxt = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
if context:
msgctxt = denormalize('\n'.join(context))
else:
msgctxt = None
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1,
context=msgctxt)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del context[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:];
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('msgctxt'):
if messages:
_add_message()
in_msgid[0] = in_msgstr[0] = False
context.append(line[7:].lstrip())
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
elif in_msgctxt[0]:
context.append(line.rstrip())
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, text_type):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for line in string.splitlines(True):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
<Message...>
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
<Message...>
>>> from io import BytesIO
>>> buf = BytesIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width)
def _write(text):
if isinstance(text, text_type):
text = text.encode(catalog.charset, 'backslashreplace')
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines)
_write(comment_header + u'\n')
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + sorted(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
| mit |
ledtvavs/repository.ledtv | script.tvguide.Vader/unidecode/x022.py | 165 | 4329 | data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'-', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'/', # 0x15
'\\', # 0x16
'*', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'|', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
':', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'~', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'<=', # 0x64
'>=', # 0x65
'<=', # 0x66
'>=', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
mindriot101/bokeh | bokeh/sampledata/unemployment1948.py | 5 | 1954 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide US Unemployment rate data by year, from 1948 to 2013
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = package_csv('unemployment1948', 'unemployment1948.csv')
| bsd-3-clause |
Obus/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
jounex/hue | desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/layer1.py | 148 | 44082 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.elastictranscoder import exceptions
class ElasticTranscoderConnection(AWSAuthConnection):
"""
AWS Elastic Transcoder Service
The AWS Elastic Transcoder Service.
"""
APIVersion = "2012-09-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"IncompatibleVersionException": exceptions.IncompatibleVersionException,
"LimitExceededException": exceptions.LimitExceededException,
"ResourceInUseException": exceptions.ResourceInUseException,
"AccessDeniedException": exceptions.AccessDeniedException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServiceException": exceptions.InternalServiceException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(ElasticTranscoderConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def cancel_job(self, id=None):
"""
The CancelJob operation cancels an unfinished job.
You can only cancel a job that has a status of `Submitted`. To
prevent a pipeline from starting to process a job while you're
getting the job identifier, use UpdatePipelineStatus to
temporarily pause the pipeline.
:type id: string
:param id: The identifier of the job that you want to cancel.
To get a list of the jobs (including their `jobId`) that have a status
of `Submitted`, use the ListJobsByStatus API action.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def create_job(self, pipeline_id=None, input_name=None, output=None,
outputs=None, output_key_prefix=None, playlists=None):
"""
When you create a job, Elastic Transcoder returns JSON data
that includes the values that you specified plus information
about the job that is created.
If you have specified more than one output for your jobs (for
example, one output for the Kindle Fire and another output for
the Apple iPhone 4s), you currently must use the Elastic
Transcoder API to list the jobs (as opposed to the AWS
Console).
:type pipeline_id: string
:param pipeline_id: The `Id` of the pipeline that you want Elastic
Transcoder to use for transcoding. The pipeline determines several
settings, including the Amazon S3 bucket from which Elastic
Transcoder gets the files to transcode and the bucket into which
Elastic Transcoder puts the transcoded files.
:type input_name: dict
:param input_name: A section of the request body that provides
information about the file that is being transcoded.
:type output: dict
:param output: The `CreateJobOutput` structure.
:type outputs: list
:param outputs: A section of the request body that provides information
about the transcoded (target) files. We recommend that you use the
`Outputs` syntax instead of the `Output` syntax.
:type output_key_prefix: string
:param output_key_prefix: The value, if any, that you want Elastic
Transcoder to prepend to the names of all files that this job
creates, including output files, thumbnails, and playlists.
:type playlists: list
:param playlists: If you specify a preset in `PresetId` for which the
value of `Container` is ts (MPEG-TS), Playlists contains
information about the master playlists that you want Elastic
Transcoder to create.
We recommend that you create only one master playlist. The maximum
number of master playlists in a job is 30.
"""
uri = '/2012-09-25/jobs'
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if input_name is not None:
params['Input'] = input_name
if output is not None:
params['Output'] = output
if outputs is not None:
params['Outputs'] = outputs
if output_key_prefix is not None:
params['OutputKeyPrefix'] = output_key_prefix
if playlists is not None:
params['Playlists'] = playlists
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_pipeline(self, name=None, input_bucket=None,
output_bucket=None, role=None, notifications=None,
content_config=None, thumbnail_config=None):
"""
The CreatePipeline operation creates a pipeline with settings
that you specify.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket in which you want Elastic
Transcoder to save the transcoded files. (Use this, or use
ContentConfig:Bucket plus ThumbnailConfig:Bucket.)
Specify this value when all of the following are true:
+ You want to save transcoded files, thumbnails (if any), and playlists
(if any) together in one bucket.
+ You do not want to specify the users or groups who have access to the
transcoded files, thumbnails, and playlists.
+ You do not want to specify the permissions that Elastic Transcoder
grants to the files. When Elastic Transcoder saves files in
`OutputBucket`, it grants full control over the files only to the
AWS account that owns the role that is specified by `Role`.
+ You want to associate the transcoded files and thumbnails with the
Amazon S3 Standard storage class.
If you want to save transcoded files and playlists in one bucket and
thumbnails in another bucket, specify which users can access the
transcoded files or the permissions the users have, or change the
Amazon S3 storage class, omit `OutputBucket` and specify values for
`ContentConfig` and `ThumbnailConfig` instead.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to create the pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic that you want
to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process a job in this pipeline. This is
the ARN that Amazon SNS returned when you created the topic. For
more information, see Create a Topic in the Amazon Simple
Notification Service Developer Guide.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job in
this pipeline. This is the ARN that Amazon SNS returned when you
created the topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines'
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_preset(self, name=None, description=None, container=None,
video=None, audio=None, thumbnails=None):
"""
The CreatePreset operation creates a preset with settings that
you specify.
Elastic Transcoder checks the CreatePreset settings to ensure
that they meet Elastic Transcoder requirements and to
determine whether they comply with H.264 standards. If your
settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (
`ValidationException`) and does not create the preset. If the
settings are valid for Elastic Transcoder but aren't strictly
compliant with the H.264 standard, Elastic Transcoder creates
the preset and returns a warning message in the response. This
helps you determine whether your settings comply with the
H.264 standard while giving you greater flexibility with
respect to the video that Elastic Transcoder produces.
Elastic Transcoder uses the H.264 video-compression format.
For more information, see the International Telecommunication
Union publication Recommendation ITU-T H.264: Advanced video
coding for generic audiovisual services .
:type name: string
:param name: The name of the preset. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
:type description: string
:param description: A description of the preset.
:type container: string
:param container: The container type for the output file. Valid values
include `mp3`, `mp4`, `ogg`, `ts`, and `webm`.
:type video: dict
:param video: A section of the request body that specifies the video
parameters.
:type audio: dict
:param audio: A section of the request body that specifies the audio
parameters.
:type thumbnails: dict
:param thumbnails: A section of the request body that specifies the
thumbnail parameters, if any.
"""
uri = '/2012-09-25/presets'
params = {}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if container is not None:
params['Container'] = container
if video is not None:
params['Video'] = video
if audio is not None:
params['Audio'] = audio
if thumbnails is not None:
params['Thumbnails'] = thumbnails
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def delete_pipeline(self, id=None):
"""
The DeletePipeline operation removes a pipeline.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
jobs). If the pipeline is currently in use, `DeletePipeline`
returns an error.
:type id: string
:param id: The identifier of the pipeline that you want to delete.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def delete_preset(self, id=None):
"""
The DeletePreset operation removes a preset that you've added
in an AWS region.
You can't delete the default presets that are included with
Elastic Transcoder.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None,
page_token=None):
"""
The ListJobsByPipeline operation gets a list of the jobs
currently in a pipeline.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
each job that satisfies the search criteria.
:type pipeline_id: string
:param pipeline_id: The ID of the pipeline for which you want to get
job information.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id)
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_jobs_by_status(self, status=None, ascending=None,
page_token=None):
"""
The ListJobsByStatus operation gets a list of jobs that have a
specified status. The response body contains one element for
each job that satisfies the search criteria.
:type status: string
:param status: To get information about all of the jobs associated with
the current AWS account that have a given status, specify the
following status: `Submitted`, `Progressing`, `Complete`,
`Canceled`, or `Error`.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByStatus/{0}'.format(status)
params = {}
if status is not None:
params['Status'] = status
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_pipelines(self, ascending=None, page_token=None):
"""
The ListPipelines operation gets a list of the pipelines
associated with the current AWS account.
:type ascending: string
:param ascending: To list pipelines in chronological order by the date
and time that they were created, enter `True`. To list pipelines in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/pipelines'.format()
params = {}
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_presets(self, ascending=None, page_token=None):
"""
The ListPresets operation gets a list of the default presets
included with Elastic Transcoder and the presets that you've
added in an AWS region.
:type ascending: string
:param ascending: To list presets in chronological order by the date
and time that they were created, enter `True`. To list presets in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/presets'.format()
params = {}
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def read_job(self, id=None):
"""
The ReadJob operation returns detailed information about a
job.
:type id: string
:param id: The identifier of the job for which you want to get detailed
information.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_pipeline(self, id=None):
"""
The ReadPipeline operation gets detailed information about a
pipeline.
:type id: string
:param id: The identifier of the pipeline to read.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_preset(self, id=None):
"""
The ReadPreset operation gets detailed information about a
preset.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def test_role(self, role=None, input_bucket=None, output_bucket=None,
topics=None):
"""
The TestRole operation tests the IAM role used to create the
pipeline.
The `TestRole` action lets you determine whether the IAM role
you are using has sufficient permissions to let Elastic
Transcoder perform tasks associated with the transcoding
process. The action attempts to assume the specified IAM role,
checks read access to the input and output buckets, and tries
to send a test notification to Amazon SNS topics that you
specify.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to test.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket that contains media files to
be transcoded. The action attempts to read from this bucket.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket that Elastic Transcoder will
write transcoded media files to. The action attempts to read from
this bucket.
:type topics: list
:param topics: The ARNs of one or more Amazon Simple Notification
Service (Amazon SNS) topics that you want the action to send a test
notification to.
"""
uri = '/2012-09-25/roleTests'
params = {}
if role is not None:
params['Role'] = role
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if topics is not None:
params['Topics'] = topics
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline(self, id, name=None, input_bucket=None, role=None,
notifications=None, content_config=None,
thumbnail_config=None):
"""
Use the `UpdatePipeline` operation to update settings for a
pipeline. When you change pipeline settings, your changes take
effect immediately. Jobs that you have already submitted and
that Elastic Transcoder has not started to process are
affected in addition to jobs that you submit after you change
settings.
:type id: string
:param id: The ID of the pipeline that you want to update.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode and the graphics that you want to
use as watermarks.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to transcode jobs for this pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic or topics to
notify in order to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('PUT', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_notifications(self, id=None, notifications=None):
"""
With the UpdatePipelineNotifications operation, you can update
Amazon Simple Notification Service (Amazon SNS) notifications
for a pipeline.
When you update notifications for a pipeline, Elastic
Transcoder returns the values that you specified in the
request.
:type id: string
:param id: The identifier of the pipeline for which you want to change
notification settings.
:type notifications: dict
:param notifications:
The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the
topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
"""
uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
params = {}
if id is not None:
params['Id'] = id
if notifications is not None:
params['Notifications'] = notifications
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_status(self, id=None, status=None):
"""
The UpdatePipelineStatus operation pauses or reactivates a
pipeline, so that the pipeline stops or restarts the
processing of jobs.
Changing the pipeline status is useful if you want to cancel
one or more jobs. You can't cancel jobs after Elastic
Transcoder has started processing them; if you pause the
pipeline to which you submitted the jobs, you have more time
to get the job IDs for the jobs that you want to cancel, and
to send a CancelJob request.
:type id: string
:param id: The identifier of the pipeline to update.
:type status: string
:param status:
The desired status of the pipeline:
+ `Active`: The pipeline is processing jobs.
+ `Paused`: The pipeline is not currently processing jobs.
"""
uri = '/2012-09-25/pipelines/{0}/status'.format(id)
params = {}
if id is not None:
params['Id'] = id
if status is not None:
params['Status'] = status
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = super(ElasticTranscoderConnection, self).make_request(
verb, resource, headers=headers, data=data, params=params)
body = json.loads(response.read().decode('utf-8'))
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
| apache-2.0 |
yize/grunt-tps | tasks/lib/python/Lib/python2.7/lib-tk/test/runtktests.py | 61 | 4100 | """
Use this module to get and run all tk tests.
Tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.test_support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
_tk_unavailable = None
def check_tk_availability():
"""Check that Tk is installed and available."""
global _tk_unavailable
if _tk_unavailable is None:
_tk_unavailable = False
if sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
_tk_unavailable = "cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
_tk_unavailable = "cannot run without OS X gui process"
else: # not OS X
import Tkinter
try:
Tkinter.Button()
except Tkinter.TclError as msg:
# assuming tk is not available
_tk_unavailable = "tk not available: %s" % msg
if _tk_unavailable:
raise unittest.SkipTest(_tk_unavailable)
return
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that want
their tests colleted.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s" % name[:-len(py_ext)], pkg_name)
except test.test_support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.test_support.use_resources = ['gui']
test.test_support.run_unittest(*get_tests())
| mit |
GoogleCloudPlatform/datacatalog-connectors-bi | google-datacatalog-looker-connector/src/google/datacatalog_connectors/looker/sync/metadata_synchronizer.py | 1 | 16983 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
from urllib.parse import urlparse
from google.datacatalog_connectors.commons import cleanup, ingest
from looker_sdk import error
from google.datacatalog_connectors.looker import entities, prepare, scrape
from google.datacatalog_connectors.looker.prepare import constants
class MetadataSynchronizer:
__ENTRY_GROUP_ID = 'looker'
__SPECIFIED_SYSTEM = 'looker'
def __init__(self, datacatalog_project_id, datacatalog_location_id,
looker_credentials_file):
self.__project_id = datacatalog_project_id
self.__location_id = datacatalog_location_id
self.__metadata_scraper = scrape.MetadataScraper(
looker_credentials_file)
self.__tag_template_factory = prepare.DataCatalogTagTemplateFactory(
project_id=datacatalog_project_id,
location_id=datacatalog_location_id)
self.__instance_url = self.__extract_instance_url(
looker_credentials_file)
self.__assembled_entry_factory = prepare.AssembledEntryFactory(
project_id=datacatalog_project_id,
location_id=datacatalog_location_id,
entry_group_id=self.__ENTRY_GROUP_ID,
user_specified_system=self.__SPECIFIED_SYSTEM,
instance_url=self.__instance_url)
@classmethod
def __extract_instance_url(cls, credentials_file):
config_parser = configparser.ConfigParser()
config_parser.read(credentials_file)
api_url = config_parser['Looker']['base_url']
parsed_uri = urlparse(api_url)
return f'{parsed_uri.scheme}://{parsed_uri.hostname}'
def run(self):
"""Coordinates a full scrape > prepare > ingest process."""
# Scrape metadata from Looker server.
logging.info('')
logging.info('===> Scraping Looker metadata...')
logging.info('Folders...')
folders_dict = self.__scrape_folders()
logging.info('')
logging.info('Queries...')
queries_dict = self.__scrape_queries(folders_dict)
logging.info('==== DONE ========================================')
# Prepare: convert Looker metadata into Data Catalog entities model.
logging.info('')
logging.info('===> Converting Looker metadata'
' into Data Catalog entities model...')
tag_templates_dict = self.__make_tag_templates_dict()
assembled_entries_dict = self.__make_assembled_entries_dict(
folders_dict, queries_dict, tag_templates_dict)
logging.info('==== DONE ========================================')
# Data Catalog entries relationship mapping.
logging.info('')
logging.info('===> Mapping Data Catalog entries relationships...')
self.__map_datacatalog_relationships(assembled_entries_dict)
logging.info('==== DONE ========================================')
# Data Catalog clean up: delete obsolete data.
logging.info('')
logging.info('===> Deleting Data Catalog obsolete metadata...')
self.__delete_obsolete_entries(assembled_entries_dict)
logging.info('==== DONE ========================================')
# Ingest metadata into Data Catalog.
logging.info('')
logging.info('===> Synchronizing Looker :: Data Catalog metadata...')
self.__ingest_metadata(tag_templates_dict, assembled_entries_dict)
logging.info('==== DONE ========================================')
def __scrape_folders(self):
"""
Scrape metadata from all folders belonging to a given Looker instance.
Folders metadata include nested objects such as sub-folders,
dashboards, dashboard elements (aka tiles), and looks.
:return: A ``dict`` in which keys are top-level folders IDs
and values are lists containing all metadata gathered from that
folders in a deep-first hierarchy.
"""
all_folders = self.__metadata_scraper.scrape_all_folders()
all_dashboards = self.__metadata_scraper.scrape_all_dashboards()
all_looks = self.__metadata_scraper.scrape_all_looks()
top_level_folders = [
folder for folder in all_folders
if (folder.parent_id is None or folder.parent_id == '' or
folder.parent_id == 'None')
]
folders_dict = {}
for folder in top_level_folders:
folders_dict[folder.id] = self.__scrape_folder_from_flat_lists(
folder, all_folders, all_dashboards, all_looks)
# Explict "lookml" folder handling.
# This special folder is not included in search_folders response
# (see ``MetadataScraper.scrape_all_folders()`` for details).
# Although all_folders response may include it, all_dashboards response
# returns nothing when space_id=lookml, so it's necessary to iterate
# through folder object's properties to get all metadata the connector
# cares about.
lookml_folder_id = 'lookml'
lookml_folder = self.__metadata_scraper.scrape_folder(lookml_folder_id)
if lookml_folder:
folders_dict[lookml_folder_id] = \
self.__scrape_folder_by_recursive_requests(lookml_folder)
self.__log_folders_related_scraping_results(folders_dict)
return folders_dict
def __scrape_folder_from_flat_lists(self, folder, all_folders,
all_dashboards, all_looks):
"""
Retrieve folders metadata from the given flat asset lists.
"""
if folder.dashboards is None:
folder.dashboards = []
folder.dashboards.extend([
dashboard for dashboard in all_dashboards
if dashboard.space.id == folder.id
])
if folder.looks is None:
folder.looks = []
folder.looks.extend(
[look for look in all_looks if look.space.id == folder.id])
folders = [folder]
child_folders = [
child_folder for child_folder in all_folders
if child_folder.parent_id == folder.id
]
# TODO Check whether the already used folders, dashboards, and looks
# can be removed from the given lists to improve next iterations
# performance and memory usage.
for folder in child_folders:
folders.extend(
self.__scrape_folder_from_flat_lists(folder, all_folders,
all_dashboards,
all_looks))
return folders
def __scrape_folder_by_recursive_requests(self, folder):
"""
Scrape the given folder metadata and do the same for all of its
children by recursively requesting their information.
"""
if folder.dashboards is None:
folder.dashboards = []
dashboards_ids = [dashboard.id for dashboard in folder.dashboards]
folder.dashboards.clear()
for dashboard_id in dashboards_ids:
try:
folder.dashboards.append(
self.__metadata_scraper.scrape_dashboard(dashboard_id))
except error.SDKError:
pass
if folder.looks is None:
folder.looks = []
looks_ids = [look.id for look in folder.looks]
folder.looks.clear()
for look_id in looks_ids:
folder.looks.append(self.__metadata_scraper.scrape_look(look_id))
folders = [folder]
child_folders = self.__metadata_scraper.scrape_child_folders(folder)
for folder in child_folders:
folders.extend(self.__scrape_folder_by_recursive_requests(folder))
return folders
@classmethod
def __log_folders_related_scraping_results(cls, folders_dict):
folders_count = 0
dashboards_count = 0
elements_count = 0
looks_count = 0
for folders in folders_dict.values():
folders_count += len(folders)
for folder in folders:
if folder.dashboards:
dashboards_count += len(folder.dashboards)
for dashboard in folder.dashboards:
if dashboard.dashboard_elements:
elements_count += len(dashboard.dashboard_elements)
looks_count += len(folder.looks) if folder.looks else 0
assets_count = sum(
[folders_count, dashboards_count, elements_count, looks_count])
assets_count_str_len = len(str(assets_count))
logging.info('')
logging.info('==== %s folders-related assets scraped!', assets_count)
spaces_count = assets_count_str_len - len(str(folders_count))
logging.info(' > %s%s folders', " " * spaces_count, folders_count)
spaces_count = assets_count_str_len - len(str(dashboards_count))
logging.info(' > %s%s dashboards', " " * spaces_count,
dashboards_count)
spaces_count = assets_count_str_len - len(str(elements_count))
logging.info(' > %s%s dashboard elements', " " * spaces_count,
elements_count)
spaces_count = assets_count_str_len - len(str(looks_count))
logging.info(' > %s%s looks', " " * spaces_count, looks_count)
def __scrape_queries(self, folders_dict):
"""
Scrape metadata from all queries related to the given folders nested
assets. A query metadata set includes its generated SQL statement,
related LookML explore, and connection.
:return: A ``dict`` in which keys are equals to the folders_dict keys
and values are lists of ``entities.AssembledQueryMetadata``
containing queries metadata gathered from assets nested to each
of the "key" folders.
"""
queries_dict = {}
for folder_id, folders in folders_dict.items():
query_ids = self.__get_folders_related_query_ids(folders)
queries_dict[folder_id] = \
[self.__scrape_query(query_id) for query_id in query_ids]
self.__log_queries_related_scraping_results(queries_dict)
return queries_dict
@classmethod
def __get_folders_related_query_ids(cls, folders):
"""
:return: A ``set`` with all query IDs related to folders nested assets.
"""
query_ids = set()
for folder in folders:
query_ids.update(cls.__get_folder_related_query_ids(folder))
return query_ids
@classmethod
def __get_folder_related_query_ids(cls, folder):
"""
Query IDs are found over folder dashboard elements and looks.
:return: A ``set`` with all query IDs related to folder nested assets.
"""
query_ids = set()
for dashboard in folder.dashboards:
query_ids.update([
element.query_id
for element in dashboard.dashboard_elements
if element.query_id
])
query_ids.update([
element.result_maker.query_id
for element in dashboard.dashboard_elements
if element.result_maker and element.result_maker.query_id
])
query_ids.update(
[look.query_id for look in folder.looks if look.query_id])
return query_ids
def __scrape_query(self, query_id):
query = self.__metadata_scraper.scrape_query(query_id)
model_explore = None
connection = None
generated_sql = None
try:
model_explore = self.__metadata_scraper\
.scrape_lookml_model_explore(query.model, query.view)
connection = self.__metadata_scraper.scrape_connection(
model_explore.connection_name)
generated_sql = \
self.__metadata_scraper.scrape_query_generated_sql(query_id)
except error.SDKError:
pass
return entities.AssembledQueryMetadata(query, generated_sql,
model_explore, connection)
@classmethod
def __log_queries_related_scraping_results(cls, queries_dict):
assets_count = sum([len(queries) for queries in queries_dict.values()])
assets_count_str_len = len(str(assets_count))
unique_ids_count = len(
set([
assembled.query.id
for queries in queries_dict.values()
for assembled in queries
]))
logging.info('')
logging.info('==== %s queries scraped!', assets_count)
spaces_count = assets_count_str_len - len(str(unique_ids_count))
logging.info(' > %s%s are unique', " " * spaces_count,
unique_ids_count)
def __make_tag_templates_dict(self):
return {
constants.TAG_TEMPLATE_ID_DASHBOARD:
self.__tag_template_factory.make_tag_template_for_dashboard(),
constants.TAG_TEMPLATE_ID_DASHBOARD_ELEMENT:
self.__tag_template_factory.
make_tag_template_for_dashboard_element(),
constants.TAG_TEMPLATE_ID_FOLDER:
self.__tag_template_factory.make_tag_template_for_folder(),
constants.TAG_TEMPLATE_ID_LOOK:
self.__tag_template_factory.make_tag_template_for_look(),
constants.TAG_TEMPLATE_ID_QUERY:
self.__tag_template_factory.make_tag_template_for_query(),
}
def __make_assembled_entries_dict(self, folders_dict, queries_dict,
tag_templates_dict):
"""
Make Data Catalog entries and tags for assets belonging to a given
Looker instance.
:return: A ``dict`` in which keys are equals to the folders_dict keys
and values are flat lists containing assembled objects with all
their related entries and tags.
"""
assembled_entries = {}
for folder_id, folders in folders_dict.items():
assembled_entries[folder_id] = self.__assembled_entry_factory\
.make_assembled_entries_list(
folders, queries_dict[folder_id], tag_templates_dict)
return assembled_entries
@classmethod
def __map_datacatalog_relationships(cls, assembled_entries_dict):
all_assembled_entries = []
for assembled_entries_data in assembled_entries_dict.values():
all_assembled_entries.extend(assembled_entries_data)
prepare.EntryRelationshipMapper().fulfill_tag_fields(
all_assembled_entries)
def __delete_obsolete_entries(self, new_assembled_entries_dict):
all_assembled_entries = []
for assembled_entry_data in new_assembled_entries_dict.values():
all_assembled_entries.extend(assembled_entry_data)
cleanup.DataCatalogMetadataCleaner(
self.__project_id, self.__location_id, self.__ENTRY_GROUP_ID).\
delete_obsolete_metadata(
all_assembled_entries,
f'system={self.__SPECIFIED_SYSTEM}'
f' tag:instance_url:{self.__instance_url}')
def __ingest_metadata(self, tag_templates_dict, assembled_entries_dict):
metadata_ingestor = ingest.DataCatalogMetadataIngestor(
self.__project_id, self.__location_id, self.__ENTRY_GROUP_ID)
entries_count = sum(
len(entries) for entries in assembled_entries_dict.values())
logging.info('==== %d entries to be synchronized!', entries_count)
synced_entries_count = 0
for folder_id, assembled_entries in assembled_entries_dict.items():
folder_entries_count = len(assembled_entries)
logging.info('')
logging.info('==== The Folder identified by %s has %d entries.',
folder_id, folder_entries_count)
metadata_ingestor.ingest_metadata(assembled_entries,
tag_templates_dict)
synced_entries_count = synced_entries_count + folder_entries_count
logging.info('')
logging.info('==== %d of %d entries successfully synchronized!',
synced_entries_count, entries_count)
| apache-2.0 |
pdellaert/ansible | lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py | 27 | 10824 | #!/usr/bin/python
#
# Copyright (c) 2017 Julien Stroheker, <juliens@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset
version_added: "2.4"
short_description: Manage Azure Availability Set
description:
- Create, update and delete Azure Availability Set.
- An availability set cannot be updated, you will have to recreate one instead.
- The only update operation will be for the tags.
options:
resource_group:
description:
- Name of a resource group where the availability set exists or will be created.
required: true
name:
description:
- Name of the availability set.
required: true
state:
description:
- Assert the state of the availability set.
- Use C(present) to create or update a availability set and C(absent) to delete a availability set.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
platform_update_domain_count:
description:
- Update domains indicate groups of virtual machines and underlying physical hardware that can be rebooted at the same time.
type: int
default: 5
platform_fault_domain_count:
description:
- Fault domains define the group of virtual machines that share a common power source and network switch.
- Should be between C(1) and C(3).
type: int
default: 3
sku:
description:
- Define if the availability set supports managed disks.
default: Classic
choices:
- Classic
- Aligned
extends_documentation_fragment:
- azure
- azure_tags
author:
- Julien Stroheker (@julienstroheker)
'''
EXAMPLES = '''
- name: Create an availability set with default options
azure_rm_availabilityset:
name: myAvailabilitySet
location: eastus
resource_group: myResourceGroup
- name: Create an availability set with advanced options
azure_rm_availabilityset:
name: myAvailabilitySet
location: eastus
resource_group: myResourceGroup
platform_update_domain_count: 5
platform_fault_domain_count: 3
sku: Aligned
- name: Delete an availability set
azure_rm_availabilityset:
name: myAvailabilitySet
location: eastus
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
state:
description: Current state of the availability set.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
type: str
sample: "/subscriptions/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/availabilitySets/myavailabilityset2"
location:
description:
- Location where the resource lives.
type: str
sample: eastus
name:
description:
- Resource name.
type: str
sample: myavailabilityset2
platform_fault_domain_count:
description:
- Fault domains values.
type: int
sample: 2
platform_update_domain_count:
description:
- Update domains values.
type: int
sample: 5
sku:
description:
- The availability set supports managed disks.
type: str
sample: Aligned
tags:
description:
- Resource tags.
type: dict
sample: {env: sandbox}
changed:
description: Whether or not the resource has changed
returned: always
type: bool
sample: true
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
def availability_set_to_dict(avaset):
'''
Serializing the availability set from the API to Dict
:return: dict
'''
return dict(
id=avaset.id,
name=avaset.name,
location=avaset.location,
platform_update_domain_count=avaset.platform_update_domain_count,
platform_fault_domain_count=avaset.platform_fault_domain_count,
tags=avaset.tags,
sku=avaset.sku.name
)
class AzureRMAvailabilitySet(AzureRMModuleBase):
"""Configuration class for an Azure RM availability set resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
platform_update_domain_count=dict(
type='int',
default=5
),
platform_fault_domain_count=dict(
type='int',
default=3
),
sku=dict(
type='str',
default='Classic',
choices=['Classic', 'Aligned']
)
)
self.resource_group = None
self.name = None
self.location = None
self.tags = None
self.platform_update_domain_count = None
self.platform_fault_domain_count = None
self.sku = None
self.state = None
self.warning = False
self.results = dict(changed=False, state=dict())
super(AzureRMAvailabilitySet, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
to_be_updated = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the AS already present in the RG
if self.state == 'present':
response = self.get_availabilityset()
self.results['state'] = response
if not response:
to_be_updated = True
else:
update_tags, response['tags'] = self.update_tags(response['tags'])
if update_tags:
self.log("Tags has to be updated")
to_be_updated = True
if response['platform_update_domain_count'] != self.platform_update_domain_count:
self.faildeploy('platform_update_domain_count')
if response['platform_fault_domain_count'] != self.platform_fault_domain_count:
self.faildeploy('platform_fault_domain_count')
if response['sku'] != self.sku:
self.faildeploy('sku')
if self.check_mode:
return self.results
if to_be_updated:
self.results['state'] = self.create_or_update_availabilityset()
self.results['changed'] = True
elif self.state == 'absent':
self.delete_availabilityset()
self.results['changed'] = True
return self.results
def faildeploy(self, param):
'''
Helper method to push fail message in the console.
Useful to notify that the users cannot change some values in a Availability Set
:param: variable's name impacted
:return: void
'''
self.fail("You tried to change {0} but is was unsuccessful. An Availability Set is immutable, except tags".format(str(param)))
def create_or_update_availabilityset(self):
'''
Method calling the Azure SDK to create or update the AS.
:return: void
'''
self.log("Creating availabilityset {0}".format(self.name))
try:
params_sku = self.compute_models.Sku(
name=self.sku
)
params = self.compute_models.AvailabilitySet(
location=self.location,
tags=self.tags,
platform_update_domain_count=self.platform_update_domain_count,
platform_fault_domain_count=self.platform_fault_domain_count,
sku=params_sku
)
response = self.compute_client.availability_sets.create_or_update(self.resource_group, self.name, params)
except CloudError as e:
self.log('Error attempting to create the availability set.')
self.fail("Error creating the availability set: {0}".format(str(e)))
return availability_set_to_dict(response)
def delete_availabilityset(self):
'''
Method calling the Azure SDK to delete the AS.
:return: void
'''
self.log("Deleting availabilityset {0}".format(self.name))
try:
response = self.compute_client.availability_sets.delete(self.resource_group, self.name)
except CloudError as e:
self.log('Error attempting to delete the availability set.')
self.fail("Error deleting the availability set: {0}".format(str(e)))
return True
def get_availabilityset(self):
'''
Method calling the Azure SDK to get an AS.
:return: void
'''
self.log("Checking if the availabilityset {0} is present".format(self.name))
found = False
try:
response = self.compute_client.availability_sets.get(self.resource_group, self.name)
found = True
except CloudError as e:
self.log('Did not find the Availability set.')
if found is True:
return availability_set_to_dict(response)
else:
return False
def main():
"""Main execution"""
AzureRMAvailabilitySet()
if __name__ == '__main__':
main()
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/bs4/builder/_lxml.py | 4 | 9594 | # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
try:
from collections.abc import Callable # Python 3.6
except ImportError , e:
from collections import Callable
from io import BytesIO
from StringIO import StringIO
from lxml import etree
from bs4.element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
XMLProcessingInstruction,
)
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
processing_instruction_class = XMLProcessingInstruction
NAME = "lxml-xml"
ALTERNATE_NAMES = ["xml"]
# Well, it's permissive by XML parser standards.
features = [NAME, LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
exclude_encodings=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
if is_html:
self.processing_instruction_class = ProcessingInstruction
else:
self.processing_instruction_class = XMLProcessingInstruction
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(
markup, try_encodings, is_html, exclude_encodings)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(nsmap) == 0 and len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
self.soup.endData()
self.soup.handle_data(target + ' ' + data)
self.soup.endData(self.processing_instruction_class)
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
NAME = LXML
ALTERNATE_NAMES = ["lxml-html"]
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
is_xml = False
processing_instruction_class = ProcessingInstruction
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| gpl-3.0 |
taknevski/tensorflow-xsmm | tensorflow/contrib/bayesflow/python/ops/stochastic_variables.py | 103 | 6065 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom `get_variable` for stochastic variables.
@@get_stochastic_variable
@@make_stochastic_variable_getter
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor as st
from tensorflow.contrib.bayesflow.python.ops import variational_inference as vi
def get_stochastic_variable(getter,
name,
shape=None,
dist_cls=None,
dist_kwargs=None,
param_initializers=None,
prior=None,
**kwargs):
"""Custom variable getter for stochastic variables.
`get_stochastic_variable` will create variables backing the parameters of a
distribution, defined by `dist_cls`, and return a `StochasticTensor` which
represents a sample from the backing distribution.
Meant to be passed as the `custom_getter` to a `variable_scope`. Use
`make_stochastic_variable_getter` to partially apply distribution-related
args.
Usage:
```python
sv = tf.contrib.bayesflow.stochastic_variables
dist = tf.contrib.distributions
with tf.variable_scope('my_scope',
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusSigma
param_initializers={
"sigma": lambda shape, dtype, pi: (
tf.constant(0.5, dtype=dtype, shape=shape))
})):
v = tf.get_variable('my_var', (10, 20))
```
`v` is a `StochasticTensor`, which is a sample from a backing
`NormalWithSoftplusSigma` distribution. Underneath, 2 variables have been
created: `my_var_mu` and `my_var_sigma`. `my_var_sigma` has been appropriately
constrained to be positive by the `NormalWithSoftplusSigma` constructor, and
initialized to a value of 0.5, which results in a sigma of ~1 after the
softplus. The sample will have shape `(10, 20)`.
Args:
getter: original variable getter.
name: prefix for variable(s) backing distribution parameters.
shape: shape of the sample from the distribution (i.e. shape of the
returned `StochasticTensor`).
dist_cls: subclass of `Distribution` that implements `param_shapes`. Should
accept unconstrained parameters (e.g. `NormalWithSoftplusSigma` accepts
real-valued `sigma` and constrains it to be positive with `softplus`).
dist_kwargs: `dict` of kwargs to be forwarded to `dist_cls`.
param_initializers: `dict` from parameter name to initializer (see
`get_variable` for initializer docs). Will override `initializer` in
`kwargs`. `param_initializers` may contain initializers for only some of
the parameters. Those parameters that do not contain entries will be
initialized by `kwargs['initializer']`, if provided; otherwise, the
default initialization of `getter` will be used.
prior: instance of `Distribution` or a callable
`(TensorShape, dtype) => Distribution`. If provided, will be registered
as the prior for the `StochasticTensor` using
`variational_inference.register_prior`.
**kwargs: kwargs forwarded to `getter`.
Returns:
`StochasticTensor`, which represents a sample from the backing distribution.
"""
param_initializers = param_initializers or {}
param_shapes = {}
if shape is not None:
param_shapes = dist_cls.param_static_shapes(shape)
param_names = set(list(param_shapes.keys()) + list(param_initializers.keys()))
params = {}
for param_name in param_names:
# For each parameter, its param_initializer is used, if provided. Otherwise,
# kwargs['initializer'] is used. If neither were provided, the default
# variable initialization in getter will be used (i.e. getter will be passed
# initializer=None.
original_initializer = kwargs.pop('initializer', None)
param_initializer = param_initializers.get(param_name, None)
if param_initializer is None:
param_initializer = original_initializer
if callable(param_initializer) or param_initializer is None:
param_shape = param_shapes.get(param_name, None)
else:
param_shape = None
params[param_name] = getter(
name + '_' + param_name,
shape=param_shape,
initializer=param_initializer,
**kwargs)
dist_kwargs = dist_kwargs or {}
dist_kwargs.update(params)
sample = st.StochasticTensor(dist_cls(**dist_kwargs))
if prior is not None:
if callable(prior):
sample_value = sample.value()
sample_value.get_shape().assert_is_fully_defined()
prior = prior(sample_value.get_shape(), sample_value.dtype)
vi.register_prior(sample, prior)
return sample
def make_stochastic_variable_getter(dist_cls,
dist_kwargs=None,
param_initializers=None,
prior=None):
"""`get_stochastic_variable` with args partially applied."""
return functools.partial(
get_stochastic_variable,
dist_cls=dist_cls,
dist_kwargs=dist_kwargs,
param_initializers=param_initializers,
prior=prior)
| apache-2.0 |
joerocklin/gem5 | ext/ply/test/yacc_inf.py | 174 | 1278 | # -----------------------------------------------------------------------------
# yacc_inf.py
#
# Infinite recursion
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/powerbidedicated/azure-mgmt-powerbidedicated/azure/mgmt/powerbidedicated/_configuration.py | 1 | 3345 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class PowerBIDedicatedConfiguration(Configuration):
"""Configuration for PowerBIDedicated.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: A unique identifier for a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(PowerBIDedicatedConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-01-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-powerbidedicated/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit |
mojones/Axelrod | axelrod/mock_player.py | 2 | 1425 | import copy
import axelrod
from axelrod import Player, update_histories, Actions
C, D = Actions.C, Actions.D
class MockPlayer(Player):
"""Creates a mock player that enforces a particular next move for a given
player."""
def __init__(self, player, move):
# Need to retain history for opponents that examine opponents history
# Do a deep copy just to be safe
Player.__init__(self)
self.history = copy.deepcopy(player.history)
self.cooperations = player.cooperations
self.defections = player.defections
self.move = move
def strategy(self, opponent):
# Just return the saved move
return self.move
def simulate_play(P1, P2, h1=None, h2=None):
"""
Simulates play with or without forced history. If h1 and h2 are given, these
moves are enforced in the players strategy. This generally should not be
necessary, but various tests may force impossible or unlikely histories.
"""
if h1 and h2:
# Simulate Plays
s1 = P1.strategy(MockPlayer(P2, h2))
s2 = P2.strategy(MockPlayer(P1, h1))
# Record intended history
# Update Cooperation / Defection counts
update_histories(P1, P2, h1, h2)
return (h1, h2)
else:
s1 = P1.strategy(P2)
s2 = P2.strategy(P1)
# Record history
update_histories(P1, P2, s1, s2)
return (s1, s2)
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/ipv6_express_route_circuit_peering_config.py | 1 | 2352 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Ipv6ExpressRouteCircuitPeeringConfig(Model):
"""Contains IPv6 peering config.
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeeringConfig
:param route_filter: The reference of the RouteFilter resource.
:type route_filter: ~azure.mgmt.network.v2017_09_01.models.RouteFilter
:param state: The state of peering. Possible values are: 'Disabled' and
'Enabled'. Possible values include: 'Disabled', 'Enabled'
:type state: str or
~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeeringState
"""
_attribute_map = {
'primary_peer_address_prefix': {'key': 'primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'secondaryPeerAddressPrefix', 'type': 'str'},
'microsoft_peering_config': {'key': 'microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'route_filter': {'key': 'routeFilter', 'type': 'RouteFilter'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Ipv6ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs)
self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None)
self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None)
self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None)
self.route_filter = kwargs.get('route_filter', None)
self.state = kwargs.get('state', None)
| mit |
TangXT/edx-platform | cms/djangoapps/contentstore/management/commands/cleanup_assets.py | 204 | 1226 | """
Script for removing all redundant Mac OS metadata files (with filename ".DS_Store"
or with filename which starts with "._") for all courses
"""
import logging
from django.core.management.base import BaseCommand
from xmodule.contentstore.django import contentstore
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Remove all Mac OS related redundant files for all courses in contentstore
"""
help = 'Remove all Mac OS related redundant file/files for all courses in contentstore'
def handle(self, *args, **options):
"""
Execute the command
"""
content_store = contentstore()
success = False
log.info(u"-" * 80)
log.info(u"Cleaning up assets for all courses")
try:
# Remove all redundant Mac OS metadata files
assets_deleted = content_store.remove_redundant_content_for_courses()
success = True
except Exception as err:
log.info(u"=" * 30 + u"> failed to cleanup")
log.info(u"Error:")
log.info(err)
if success:
log.info(u"=" * 80)
log.info(u"Total number of assets deleted: {0}".format(assets_deleted))
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.