repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ddelemeny/calligra | 3rdparty/google-breakpad/src/tools/gyp/test/msvs/missing_sources/gyptest-missing.py | 29 | 1319 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that missing 'sources' files are treated as fatal errors when the
the generator flag 'msvs_error_on_missing_sources' is set.
"""
import TestGyp
import os
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# With the flag not set
test.run_gyp('hello_missing.gyp')
# With the flag explicitly set to 0
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=0'
test.run_gyp('hello_missing.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
# With the flag explicitly set to 1
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=1'
# Test to make sure GYP raises an exception (exit status 1). Since this will
# also print a backtrace, ensure that TestGyp is not checking that stderr is
# empty by specifying None, which means do not perform any checking.
# Instead, stderr is checked below to ensure it contains the expected
# output.
test.run_gyp('hello_missing.gyp', status=1, stderr=None)
finally:
del os.environ['GYP_GENERATOR_FLAGS']
test.must_contain_any_line(test.stderr(),
["Missing input files:"])
test.pass_test() | gpl-2.0 |
MTgeophysics/mtpy | mtpy/uofa/mseed2ts.py | 1 | 4187 | #!/usr/bin/env python
"""
This is a convenience script for the conversion of MTpy-style time series data files to miniSeed.
It needs the location of a folder with TS data. The folder structure is mimicked and the TS converted into miniSeed. The folder structure is then put into the destination directory.
The latter can be given as an argument, if not, a local directory 'miniSeed' will be generated in the current directory
1 mandatory argument:
- path to TS data files
3 optional arguments (in this order):
- name of the output directory
- 'location' code (2 characters)
- 'network' code (2 characters)
"""
import numpy as np
import re
import sys
import os
import glob
import os.path as op
import glob
import fnmatch
import mtpy.utils.exceptions as MTex
import mtpy.utils.mseed as MTms
import mtpy.utils.filehandling as MTfh
#reload(MTfh)
#reload(MTex)
#reload(MTms)
def main():
if len(sys.argv) < 2:
sys.exit('\n\tNeed at least 1 argument:\n\n <path to files>\n[optional:'
'<output dir>] \n')
outdir = None
if len(sys.argv) > 2:
outdir = sys.argv[2]
# if len(sys.argv) > 3:
# network = sys.argv[3]
# if len(sys.argv) > 4:
# location = sys.argv[4]
pathname_raw = sys.argv[1]
# we need relative paths here!!!
indir = pathname_raw # op.abspath(op.realpath(pathname_raw))
if not op.isdir(indir):
raise MTex.MTpyError_inputarguments(
'Data file(s) path not existing: {0}'.format(indir))
# define output directory for storing miniSeed files
#outpath = op.join(os.curdir,'miniSeed')
if outdir is not None:
try:
outpath = op.abspath(op.join(os.curdir, outdir))
if not op.exists(outpath):
try:
os.makedirs(outpath)
except:
raise
if not os.access(outpath, os.W_OK):
raise
except:
print('Cannot generate writable output directory {0} - using generic'\
' location "ascii" instead'.format(outpath))
outdir = None
if outdir is None:
outpath = op.join(os.curdir, 'ascii')
try:
if not op.exists(outpath):
try:
os.makedirs(outpath)
except:
raise
if not os.access(outpath, os.W_OK):
raise
except:
sys.exit('Error ! - Cannot generate writable output directory '
'"ascii" - abort...')
outdir = op.abspath(outpath)
lo_dirs = []
for i, j, k in os.walk(indir):
lofolders = [op.join(i, f) for f in j]
lo_dirs.extend(lofolders)
lo_dirs.append(indir)
pathname = list(set(lo_dirs))
if len(pathname) == 0:
pathname = [indir]
lo_indirs = pathname
lo_outdirs = []
#'pathname' is a list of relative pathnames. to be reconstructed under the given 'outdir'
try:
for i in lo_indirs:
outpath = op.abspath(op.join(outdir, i))
if not op.isdir(outpath):
os.makedirs(outpath)
lo_outdirs.append(outpath)
except:
raise MTex.MTpyError_inputarguments(
'ERROR - Cannot set up output directory {0}'.format(outpath))
for idx_ipath, inpath in enumerate(lo_indirs):
lo_infiles = [i for i in os.listdir(inpath) if
op.isfile(op.abspath(op.join(inpath, i)))]
lo_outfiles = [op.abspath(op.join(lo_outdirs[idx_ipath], i)) for
i in lo_infiles]
lo_infiles = [op.abspath(op.join(inpath, i)) for i in lo_infiles]
for idx_fn, fn in enumerate(lo_infiles):
print('reading file {0}'.format(fn))
try:
outfn = MTms.convertfile_miniseed2ts(fn, lo_outfiles[idx_fn])
print('wrote file(s) {0}'.format(outfn))
except:
print('Warning - file {0} is not in valid miniseed format!!!'.format(fn))
continue
if __name__ == '__main__':
main()
| gpl-3.0 |
box/ClusterRunner | test/unit/master/test_atomizer.py | 4 | 2122 | from unittest.mock import Mock
from app.master.atomizer import Atomizer, AtomizerError
from app.project_type.project_type import ProjectType
from app.util.process_utils import get_environment_variable_setter_command
from test.framework.base_unit_test_case import BaseUnitTestCase
_FAKE_ATOMIZER_COMMAND = 'find . -name test_*.py'
_FAKE_ATOMIZER_COMMAND_OUTPUT = '/tmp/test/directory/test_a.py\n/tmp/test/directory/test_b.py\n/tmp/test/directory/test_c.py\n'
_SUCCESSFUL_EXIT_CODE = 0
_FAILING_EXIT_CODE = 1
class TestAtomizer(BaseUnitTestCase):
def test_atomizer_returns_expected_atom_list(self):
mock_project = Mock(spec=ProjectType)
mock_project.execute_command_in_project.return_value = (_FAKE_ATOMIZER_COMMAND_OUTPUT, _SUCCESSFUL_EXIT_CODE)
mock_project.project_directory = '/tmp/test/directory'
atomizer = Atomizer([{'TEST_FILE': _FAKE_ATOMIZER_COMMAND}])
actual_atoms = atomizer.atomize_in_project(mock_project)
actual_atom_commands = [atom.command_string for atom in actual_atoms]
expected_atom_commands = [
get_environment_variable_setter_command('TEST_FILE', '$PROJECT_DIR/test_a.py'),
get_environment_variable_setter_command('TEST_FILE', '$PROJECT_DIR/test_b.py'),
get_environment_variable_setter_command('TEST_FILE', '$PROJECT_DIR/test_c.py'),
]
self.assertListEqual(expected_atom_commands, actual_atom_commands,
'List of actual atoms should match list of expected atoms.')
mock_project.execute_command_in_project.assert_called_once_with(_FAKE_ATOMIZER_COMMAND)
def test_atomizer_raises_exception_when_atomize_command_fails(self):
mock_project = Mock(spec=ProjectType)
mock_project.execute_command_in_project.return_value = ('ERROR ERROR ERROR', _FAILING_EXIT_CODE)
atomizer = Atomizer([{'TEST_FILE': _FAKE_ATOMIZER_COMMAND}])
with self.assertRaises(AtomizerError):
atomizer.atomize_in_project(mock_project)
mock_project.execute_command_in_project.assert_called_once_with(_FAKE_ATOMIZER_COMMAND)
| apache-2.0 |
Split-Screen/android_kernel_motorola_msm8960-common | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
mjfarmer/scada_py | env/lib/python2.7/site-packages/traitlets/tests/test_traitlets.py | 4 | 61707 | # encoding: utf-8
"""Tests for traitlets.traitlets."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#
# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
# also under the terms of the Modified BSD License.
import pickle
import re
import sys
from unittest import TestCase
from ._warnings import expected_warnings
import nose.tools as nt
from nose import SkipTest
from traitlets import (
HasTraits, MetaHasTraits, TraitType, Any, Bool, CBytes, Dict, Enum,
Int, Long, Integer, Float, Complex, Bytes, Unicode, TraitError,
Union, All, Undefined, Type, This, Instance, TCPAddress, List, Tuple,
ObjectName, DottedObjectName, CRegExp, link, directional_link,
ForwardDeclaredType, ForwardDeclaredInstance, validate, observe, default,
observe_compat, BaseDescriptor, HasDescriptors,
)
from ipython_genutils import py3compat
from ipython_genutils.testing.decorators import skipif
def change_dict(*ordered_values):
change_names = ('name', 'old', 'new', 'owner', 'type')
return dict(zip(change_names, ordered_values))
#-----------------------------------------------------------------------------
# Helper classes for testing
#-----------------------------------------------------------------------------
class HasTraitsStub(HasTraits):
def notify_change(self, change):
self._notify_name = change['name']
self._notify_old = change['old']
self._notify_new = change['new']
self._notify_type = change['type']
#-----------------------------------------------------------------------------
# Test classes
#-----------------------------------------------------------------------------
class TestTraitType(TestCase):
def test_get_undefined(self):
class A(HasTraits):
a = TraitType
a = A()
with self.assertRaises(TraitError):
a.a
def test_set(self):
class A(HasTraitsStub):
a = TraitType
a = A()
a.a = 10
self.assertEqual(a.a, 10)
self.assertEqual(a._notify_name, 'a')
self.assertEqual(a._notify_old, Undefined)
self.assertEqual(a._notify_new, 10)
def test_validate(self):
class MyTT(TraitType):
def validate(self, inst, value):
return -1
class A(HasTraitsStub):
tt = MyTT
a = A()
a.tt = 10
self.assertEqual(a.tt, -1)
def test_default_validate(self):
class MyIntTT(TraitType):
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class A(HasTraits):
tt = MyIntTT(10)
a = A()
self.assertEqual(a.tt, 10)
# Defaults are validated when the HasTraits is instantiated
class B(HasTraits):
tt = MyIntTT('bad default')
self.assertRaises(TraitError, B)
def test_info(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertEqual(A.tt.info(), 'any value')
def test_error(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertRaises(TraitError, A.tt.error, a, 10)
def test_deprecated_dynamic_initializer(self):
class A(HasTraits):
x = Int(10)
def _x_default(self):
return 11
class B(A):
x = Int(20)
class C(A):
def _x_default(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b.x, 20)
self.assertEqual(b._trait_values, {'x': 20})
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
def test_dynamic_initializer(self):
class A(HasTraits):
x = Int(10)
@default('x')
def _default_x(self):
return 11
class B(A):
x = Int(20)
class C(A):
@default('x')
def _default_x(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b.x, 20)
self.assertEqual(b._trait_values, {'x': 20})
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
def test_tag_metadata(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10).tag(b=3, c=4)
self.assertEqual(a.metadata, {'a': 1, 'b': 3, 'c': 4})
def test_metadata_localized_instance(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10)
b = MyIntTT(10)
a.metadata['c'] = 3
# make sure that changing a's metadata didn't change b's metadata
self.assertNotIn('c', b.metadata)
def test_deprecated_metadata_access(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10)
with expected_warnings(["use the instance .metadata dictionary directly"]*2):
a.set_metadata('key', 'value')
v = a.get_metadata('key')
self.assertEqual(v, 'value')
with expected_warnings(["use the instance .help string directly"]*2):
a.set_metadata('help', 'some help')
v = a.get_metadata('help')
self.assertEqual(v, 'some help')
def test_trait_types_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Int
def test_trait_types_list_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = List(Int)
def test_trait_types_tuple_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Tuple(Int)
def test_trait_types_dict_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Dict(Int)
class TestHasDescriptorsMeta(TestCase):
def test_metaclass(self):
self.assertEqual(type(HasTraits), MetaHasTraits)
class A(HasTraits):
a = Int()
a = A()
self.assertEqual(type(a.__class__), MetaHasTraits)
self.assertEqual(a.a,0)
a.a = 10
self.assertEqual(a.a,10)
class B(HasTraits):
b = Int()
b = B()
self.assertEqual(b.b,0)
b.b = 10
self.assertEqual(b.b,10)
class C(HasTraits):
c = Int(30)
c = C()
self.assertEqual(c.c,30)
c.c = 10
self.assertEqual(c.c,10)
def test_this_class(self):
class A(HasTraits):
t = This()
tt = This()
class B(A):
tt = This()
ttt = This()
self.assertEqual(A.t.this_class, A)
self.assertEqual(B.t.this_class, A)
self.assertEqual(B.tt.this_class, B)
self.assertEqual(B.ttt.this_class, B)
class TestHasDescriptors(TestCase):
def test_setup_instance(self):
class FooDescriptor(BaseDescriptor):
def instance_init(self, inst):
foo = inst.foo # instance should have the attr
class HasFooDescriptors(HasDescriptors):
fd = FooDescriptor()
def setup_instance(self, *args, **kwargs):
self.foo = kwargs.get('foo', None)
super(HasFooDescriptors, self).setup_instance(*args, **kwargs)
hfd = HasFooDescriptors(foo='bar')
class TestHasTraitsNotify(TestCase):
def setUp(self):
self._notify1 = []
self._notify2 = []
def notify1(self, name, old, new):
self._notify1.append((name, old, new))
def notify2(self, name, old, new):
self._notify2.append((name, old, new))
def test_notify_all(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.on_trait_change(self.notify1)
a.a = 0
self.assertEqual(len(self._notify1),0)
a.b = 0.0
self.assertEqual(len(self._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in self._notify1)
a.b = 10.0
self.assertTrue(('b',0.0,10.0) in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
self.assertRaises(TraitError,setattr,a,'b','bad string')
self._notify1 = []
a.on_trait_change(self.notify1,remove=True)
a.a = 20
a.b = 20.0
self.assertEqual(len(self._notify1),0)
def test_notify_one(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.on_trait_change(self.notify1, 'a')
a.a = 0
self.assertEqual(len(self._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
def test_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
self.assertEqual(b.a,0)
self.assertEqual(b.b,0.0)
b.a = 100
b.b = 100.0
self.assertEqual(b.a,100)
self.assertEqual(b.b,100.0)
def test_notify_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
b.on_trait_change(self.notify1, 'a')
b.on_trait_change(self.notify2, 'b')
b.a = 0
b.b = 0.0
self.assertEqual(len(self._notify1),0)
self.assertEqual(len(self._notify2),0)
b.a = 10
b.b = 10.0
self.assertTrue(('a',0,10) in self._notify1)
self.assertTrue(('b',0.0,10.0) in self._notify2)
def test_static_notify(self):
class A(HasTraits):
a = Int()
_notify1 = []
def _a_changed(self, name, old, new):
self._notify1.append((name, old, new))
a = A()
a.a = 0
# This is broken!!!
self.assertEqual(len(a._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in a._notify1)
class B(A):
b = Float()
_notify2 = []
def _b_changed(self, name, old, new):
self._notify2.append((name, old, new))
b = B()
b.a = 10
b.b = 10.0
self.assertTrue(('a',0,10) in b._notify1)
self.assertTrue(('b',0.0,10.0) in b._notify2)
def test_notify_args(self):
def callback0():
self.cb = ()
def callback1(name):
self.cb = (name,)
def callback2(name, new):
self.cb = (name, new)
def callback3(name, old, new):
self.cb = (name, old, new)
def callback4(name, old, new, obj):
self.cb = (name, old, new, obj)
class A(HasTraits):
a = Int()
a = A()
a.on_trait_change(callback0, 'a')
a.a = 10
self.assertEqual(self.cb,())
a.on_trait_change(callback0, 'a', remove=True)
a.on_trait_change(callback1, 'a')
a.a = 100
self.assertEqual(self.cb,('a',))
a.on_trait_change(callback1, 'a', remove=True)
a.on_trait_change(callback2, 'a')
a.a = 1000
self.assertEqual(self.cb,('a',1000))
a.on_trait_change(callback2, 'a', remove=True)
a.on_trait_change(callback3, 'a')
a.a = 10000
self.assertEqual(self.cb,('a',1000,10000))
a.on_trait_change(callback3, 'a', remove=True)
a.on_trait_change(callback4, 'a')
a.a = 100000
self.assertEqual(self.cb,('a',10000,100000,a))
self.assertEqual(len(a._trait_notifiers['a']['change']), 1)
a.on_trait_change(callback4, 'a', remove=True)
self.assertEqual(len(a._trait_notifiers['a']['change']), 0)
def test_notify_only_once(self):
class A(HasTraits):
listen_to = ['a']
a = Int(0)
b = 0
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.on_trait_change(self.listener1, ['a'])
def listener1(self, name, old, new):
self.b += 1
class B(A):
c = 0
d = 0
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.on_trait_change(self.listener2)
def listener2(self, name, old, new):
self.c += 1
def _a_changed(self, name, old, new):
self.d += 1
b = B()
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
class TestObserveDecorator(TestCase):
def setUp(self):
self._notify1 = []
self._notify2 = []
def notify1(self, change):
self._notify1.append(change)
def notify2(self, change):
self._notify2.append(change)
def test_notify_all(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.observe(self.notify1)
a.a = 0
self.assertEqual(len(self._notify1),0)
a.b = 0.0
self.assertEqual(len(self._notify1),0)
a.a = 10
change = change_dict('a', 0, 10, a, 'change')
self.assertTrue(change in self._notify1)
a.b = 10.0
change = change_dict('b', 0.0, 10.0, a, 'change')
self.assertTrue(change in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
self.assertRaises(TraitError,setattr,a,'b','bad string')
self._notify1 = []
a.unobserve(self.notify1)
a.a = 20
a.b = 20.0
self.assertEqual(len(self._notify1),0)
def test_notify_one(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.observe(self.notify1, 'a')
a.a = 0
self.assertEqual(len(self._notify1),0)
a.a = 10
change = change_dict('a', 0, 10, a, 'change')
self.assertTrue(change in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
def test_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
self.assertEqual(b.a,0)
self.assertEqual(b.b,0.0)
b.a = 100
b.b = 100.0
self.assertEqual(b.a,100)
self.assertEqual(b.b,100.0)
def test_notify_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
b.observe(self.notify1, 'a')
b.observe(self.notify2, 'b')
b.a = 0
b.b = 0.0
self.assertEqual(len(self._notify1),0)
self.assertEqual(len(self._notify2),0)
b.a = 10
b.b = 10.0
change = change_dict('a', 0, 10, b, 'change')
self.assertTrue(change in self._notify1)
change = change_dict('b', 0.0, 10.0, b, 'change')
self.assertTrue(change in self._notify2)
def test_static_notify(self):
class A(HasTraits):
a = Int()
b = Int()
_notify1 = []
_notify_any = []
@observe('a')
def _a_changed(self, change):
self._notify1.append(change)
@observe(All)
def _any_changed(self, change):
self._notify_any.append(change)
a = A()
a.a = 0
self.assertEqual(len(a._notify1),0)
a.a = 10
change = change_dict('a', 0, 10, a, 'change')
self.assertTrue(change in a._notify1)
a.b = 1
self.assertEqual(len(a._notify_any), 2)
change = change_dict('b', 0, 1, a, 'change')
self.assertTrue(change in a._notify_any)
class B(A):
b = Float()
_notify2 = []
@observe('b')
def _b_changed(self, change):
self._notify2.append(change)
b = B()
b.a = 10
b.b = 10.0
change = change_dict('a', 0, 10, b, 'change')
self.assertTrue(change in b._notify1)
change = change_dict('b', 0.0, 10.0, b, 'change')
self.assertTrue(change in b._notify2)
def test_notify_args(self):
def callback0():
self.cb = ()
def callback1(change):
self.cb = change
class A(HasTraits):
a = Int()
a = A()
a.on_trait_change(callback0, 'a')
a.a = 10
self.assertEqual(self.cb,())
a.unobserve(callback0, 'a')
a.observe(callback1, 'a')
a.a = 100
change = change_dict('a', 10, 100, a, 'change')
self.assertEqual(self.cb, change)
self.assertEqual(len(a._trait_notifiers['a']['change']), 1)
a.unobserve(callback1, 'a')
self.assertEqual(len(a._trait_notifiers['a']['change']), 0)
def test_notify_only_once(self):
class A(HasTraits):
listen_to = ['a']
a = Int(0)
b = 0
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.observe(self.listener1, ['a'])
def listener1(self, change):
self.b += 1
class B(A):
c = 0
d = 0
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.observe(self.listener2)
def listener2(self, change):
self.c += 1
@observe('a')
def _a_changed(self, change):
self.d += 1
b = B()
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
class TestHasTraits(TestCase):
def test_trait_names(self):
class A(HasTraits):
i = Int()
f = Float()
a = A()
self.assertEqual(sorted(a.trait_names()),['f','i'])
self.assertEqual(sorted(A.class_trait_names()),['f','i'])
self.assertTrue(a.has_trait('f'))
self.assertFalse(a.has_trait('g'))
def test_trait_metadata_deprecated(self):
with expected_warnings(['Metadata should be set using the \.tag\(\) method']):
class A(HasTraits):
i = Int(config_key='MY_VALUE')
a = A()
self.assertEqual(a.trait_metadata('i','config_key'), 'MY_VALUE')
def test_trait_metadata(self):
class A(HasTraits):
i = Int().tag(config_key='MY_VALUE')
a = A()
self.assertEqual(a.trait_metadata('i','config_key'), 'MY_VALUE')
def test_trait_metadata_default(self):
class A(HasTraits):
i = Int()
a = A()
self.assertEqual(a.trait_metadata('i', 'config_key'), None)
self.assertEqual(a.trait_metadata('i', 'config_key', 'default'), 'default')
def test_traits(self):
class A(HasTraits):
i = Int()
f = Float()
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f))
self.assertEqual(A.class_traits(), dict(i=A.i, f=A.f))
def test_traits_metadata(self):
class A(HasTraits):
i = Int().tag(config_key='VALUE1', other_thing='VALUE2')
f = Float().tag(config_key='VALUE3', other_thing='VALUE2')
j = Int(0)
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f, j=A.j))
traits = a.traits(config_key='VALUE1', other_thing='VALUE2')
self.assertEqual(traits, dict(i=A.i))
# This passes, but it shouldn't because I am replicating a bug in
# traits.
traits = a.traits(config_key=lambda v: True)
self.assertEqual(traits, dict(i=A.i, f=A.f, j=A.j))
def test_traits_metadata_deprecated(self):
with expected_warnings(['Metadata should be set using the \.tag\(\) method']*2):
class A(HasTraits):
i = Int(config_key='VALUE1', other_thing='VALUE2')
f = Float(config_key='VALUE3', other_thing='VALUE2')
j = Int(0)
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f, j=A.j))
traits = a.traits(config_key='VALUE1', other_thing='VALUE2')
self.assertEqual(traits, dict(i=A.i))
# This passes, but it shouldn't because I am replicating a bug in
# traits.
traits = a.traits(config_key=lambda v: True)
self.assertEqual(traits, dict(i=A.i, f=A.f, j=A.j))
def test_init(self):
class A(HasTraits):
i = Int()
x = Float()
a = A(i=1, x=10.0)
self.assertEqual(a.i, 1)
self.assertEqual(a.x, 10.0)
def test_positional_args(self):
class A(HasTraits):
i = Int(0)
def __init__(self, i):
super(A, self).__init__()
self.i = i
a = A(5)
self.assertEqual(a.i, 5)
# should raise TypeError if no positional arg given
self.assertRaises(TypeError, A)
#-----------------------------------------------------------------------------
# Tests for specific trait types
#-----------------------------------------------------------------------------
class TestType(TestCase):
def test_default(self):
class B(object): pass
class A(HasTraits):
klass = Type(allow_none=True)
a = A()
self.assertEqual(a.klass, object)
a.klass = B
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', 10)
def test_default_options(self):
class B(object): pass
class C(B): pass
class A(HasTraits):
# Different possible combinations of options for default_value
# and klass. default_value=None is only valid with allow_none=True.
k1 = Type()
k2 = Type(None, allow_none=True)
k3 = Type(B)
k4 = Type(klass=B)
k5 = Type(default_value=None, klass=B, allow_none=True)
k6 = Type(default_value=C, klass=B)
self.assertIs(A.k1.default_value, object)
self.assertIs(A.k1.klass, object)
self.assertIs(A.k2.default_value, None)
self.assertIs(A.k2.klass, object)
self.assertIs(A.k3.default_value, B)
self.assertIs(A.k3.klass, B)
self.assertIs(A.k4.default_value, B)
self.assertIs(A.k4.klass, B)
self.assertIs(A.k5.default_value, None)
self.assertIs(A.k5.klass, B)
self.assertIs(A.k6.default_value, C)
self.assertIs(A.k6.klass, B)
a = A()
self.assertIs(a.k1, object)
self.assertIs(a.k2, None)
self.assertIs(a.k3, B)
self.assertIs(a.k4, B)
self.assertIs(a.k5, None)
self.assertIs(a.k6, C)
def test_value(self):
class B(object): pass
class C(object): pass
class A(HasTraits):
klass = Type(B)
a = A()
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', C)
self.assertRaises(TraitError, setattr, a, 'klass', object)
a.klass = B
def test_allow_none(self):
class B(object): pass
class C(B): pass
class A(HasTraits):
klass = Type(B)
a = A()
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', None)
a.klass = C
self.assertEqual(a.klass, C)
def test_validate_klass(self):
class A(HasTraits):
klass = Type('no strings allowed')
self.assertRaises(ImportError, A)
class A(HasTraits):
klass = Type('rub.adub.Duck')
self.assertRaises(ImportError, A)
def test_validate_default(self):
class B(object): pass
class A(HasTraits):
klass = Type('bad default', B)
self.assertRaises(ImportError, A)
class C(HasTraits):
klass = Type(None, B)
self.assertRaises(TraitError, C)
def test_str_klass(self):
class A(HasTraits):
klass = Type('ipython_genutils.ipstruct.Struct')
from ipython_genutils.ipstruct import Struct
a = A()
a.klass = Struct
self.assertEqual(a.klass, Struct)
self.assertRaises(TraitError, setattr, a, 'klass', 10)
def test_set_str_klass(self):
class A(HasTraits):
klass = Type()
a = A(klass='ipython_genutils.ipstruct.Struct')
from ipython_genutils.ipstruct import Struct
self.assertEqual(a.klass, Struct)
class TestInstance(TestCase):
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
class Bah(object): pass
class A(HasTraits):
inst = Instance(Foo, allow_none=True)
a = A()
self.assertTrue(a.inst is None)
a.inst = Foo()
self.assertTrue(isinstance(a.inst, Foo))
a.inst = Bar()
self.assertTrue(isinstance(a.inst, Foo))
self.assertRaises(TraitError, setattr, a, 'inst', Foo)
self.assertRaises(TraitError, setattr, a, 'inst', Bar)
self.assertRaises(TraitError, setattr, a, 'inst', Bah())
def test_default_klass(self):
class Foo(object): pass
class Bar(Foo): pass
class Bah(object): pass
class FooInstance(Instance):
klass = Foo
class A(HasTraits):
inst = FooInstance(allow_none=True)
a = A()
self.assertTrue(a.inst is None)
a.inst = Foo()
self.assertTrue(isinstance(a.inst, Foo))
a.inst = Bar()
self.assertTrue(isinstance(a.inst, Foo))
self.assertRaises(TraitError, setattr, a, 'inst', Foo)
self.assertRaises(TraitError, setattr, a, 'inst', Bar)
self.assertRaises(TraitError, setattr, a, 'inst', Bah())
def test_unique_default_value(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo,(),{})
a = A()
b = A()
self.assertTrue(a.inst is not b.inst)
def test_args_kw(self):
class Foo(object):
def __init__(self, c): self.c = c
class Bar(object): pass
class Bah(object):
def __init__(self, c, d):
self.c = c; self.d = d
class A(HasTraits):
inst = Instance(Foo, (10,))
a = A()
self.assertEqual(a.inst.c, 10)
class B(HasTraits):
inst = Instance(Bah, args=(10,), kw=dict(d=20))
b = B()
self.assertEqual(b.inst.c, 10)
self.assertEqual(b.inst.d, 20)
class C(HasTraits):
inst = Instance(Foo, allow_none=True)
c = C()
self.assertTrue(c.inst is None)
def test_bad_default(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo)
a = A()
with self.assertRaises(TraitError):
a.inst
def test_instance(self):
class Foo(object): pass
def inner():
class A(HasTraits):
inst = Instance(Foo())
self.assertRaises(TraitError, inner)
class TestThis(TestCase):
def test_this_class(self):
class Foo(HasTraits):
this = This()
f = Foo()
self.assertEqual(f.this, None)
g = Foo()
f.this = g
self.assertEqual(f.this, g)
self.assertRaises(TraitError, setattr, f, 'this', 10)
def test_this_inst(self):
class Foo(HasTraits):
this = This()
f = Foo()
f.this = Foo()
self.assertTrue(isinstance(f.this, Foo))
def test_subclass(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
pass
f = Foo()
b = Bar()
f.t = b
b.t = f
self.assertEqual(f.t, b)
self.assertEqual(b.t, f)
def test_subclass_override(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
t = This()
f = Foo()
b = Bar()
f.t = b
self.assertEqual(f.t, b)
self.assertRaises(TraitError, setattr, b, 't', f)
def test_this_in_container(self):
class Tree(HasTraits):
value = Unicode()
leaves = List(This())
tree = Tree(
value='foo',
leaves=[Tree(value='bar'), Tree(value='buzz')]
)
with self.assertRaises(TraitError):
tree.leaves = [1, 2]
class TraitTestBase(TestCase):
"""A best testing class for basic trait types."""
def assign(self, value):
self.obj.value = value
def coerce(self, value):
return value
def test_good_values(self):
if hasattr(self, '_good_values'):
for value in self._good_values:
self.assign(value)
self.assertEqual(self.obj.value, self.coerce(value))
def test_bad_values(self):
if hasattr(self, '_bad_values'):
for value in self._bad_values:
try:
self.assertRaises(TraitError, self.assign, value)
except AssertionError:
assert False, value
def test_default_value(self):
if hasattr(self, '_default_value'):
self.assertEqual(self._default_value, self.obj.value)
def test_allow_none(self):
if (hasattr(self, '_bad_values') and hasattr(self, '_good_values') and
None in self._bad_values):
trait=self.obj.traits()['value']
try:
trait.allow_none = True
self._bad_values.remove(None)
#skip coerce. Allow None casts None to None.
self.assign(None)
self.assertEqual(self.obj.value,None)
self.test_good_values()
self.test_bad_values()
finally:
#tear down
trait.allow_none = False
self._bad_values.append(None)
def tearDown(self):
# restore default value after tests, if set
if hasattr(self, '_default_value'):
self.obj.value = self._default_value
class AnyTrait(HasTraits):
value = Any()
class AnyTraitTest(TraitTestBase):
obj = AnyTrait()
_default_value = None
_good_values = [10.0, 'ten', u'ten', [10], {'ten': 10},(10,), None, 1j]
_bad_values = []
class UnionTrait(HasTraits):
value = Union([Type(), Bool()])
class UnionTraitTest(TraitTestBase):
obj = UnionTrait(value='ipython_genutils.ipstruct.Struct')
_good_values = [int, float, True]
_bad_values = [[], (0,), 1j]
class OrTrait(HasTraits):
value = Bool() | Unicode()
class OrTraitTest(TraitTestBase):
obj = OrTrait()
_good_values = [True, False, 'ten']
_bad_values = [[], (0,), 1j]
class IntTrait(HasTraits):
value = Int(99, min=-100)
class TestInt(TraitTestBase):
obj = IntTrait()
_default_value = 99
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None, 1j,
10.1, -10.1, '10L', '-10L', '10.1', '-10.1', u'10L',
u'-10L', u'10.1', u'-10.1', '10', '-10', u'10', -200]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
class LongTrait(HasTraits):
value = Long(99 if py3compat.PY3 else long(99))
class TestLong(TraitTestBase):
obj = LongTrait()
_default_value = 99 if py3compat.PY3 else long(99)
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,),
None, 1j, 10.1, -10.1, '10', '-10', '10L', '-10L', '10.1',
'-10.1', u'10', u'-10', u'10L', u'-10L', u'10.1',
u'-10.1']
if not py3compat.PY3:
# maxint undefined on py3, because int == long
_good_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
_bad_values.extend([[long(10)], (long(10),)])
@skipif(py3compat.PY3, "not relevant on py3")
def test_cast_small(self):
"""Long casts ints to long"""
self.obj.value = 10
self.assertEqual(type(self.obj.value), long)
class IntegerTrait(HasTraits):
value = Integer(1)
class TestInteger(TestLong):
obj = IntegerTrait()
_default_value = 1
def coerce(self, n):
return int(n)
@skipif(py3compat.PY3, "not relevant on py3")
def test_cast_small(self):
"""Integer casts small longs to int"""
if py3compat.PY3:
raise SkipTest("not relevant on py3")
self.obj.value = long(100)
self.assertEqual(type(self.obj.value), int)
class FloatTrait(HasTraits):
value = Float(99.0, max=200.0)
class TestFloat(TraitTestBase):
obj = FloatTrait()
_default_value = 99.0
_good_values = [10, -10, 10.1, -10.1]
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None,
1j, '10', '-10', '10L', '-10L', '10.1', '-10.1', u'10',
u'-10', u'10L', u'-10L', u'10.1', u'-10.1', 201.0]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class ComplexTrait(HasTraits):
value = Complex(99.0-99.0j)
class TestComplex(TraitTestBase):
obj = ComplexTrait()
_default_value = 99.0-99.0j
_good_values = [10, -10, 10.1, -10.1, 10j, 10+10j, 10-10j,
10.1j, 10.1+10.1j, 10.1-10.1j]
_bad_values = [u'10L', u'-10L', 'ten', [10], {'ten': 10},(10,), None]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class BytesTrait(HasTraits):
value = Bytes(b'string')
class TestBytes(TraitTestBase):
obj = BytesTrait()
_default_value = b'string'
_good_values = [b'10', b'-10', b'10L',
b'-10L', b'10.1', b'-10.1', b'string']
_bad_values = [10, -10, 10.1, -10.1, 1j, [10],
['ten'],{'ten': 10},(10,), None, u'string']
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class UnicodeTrait(HasTraits):
value = Unicode(u'unicode')
class TestUnicode(TraitTestBase):
obj = UnicodeTrait()
_default_value = u'unicode'
_good_values = ['10', '-10', '10L', '-10L', '10.1',
'-10.1', '', u'', 'string', u'string', u"€"]
_bad_values = [10, -10, 10.1, -10.1, 1j,
[10], ['ten'], [u'ten'], {'ten': 10},(10,), None]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class ObjectNameTrait(HasTraits):
value = ObjectName("abc")
class TestObjectName(TraitTestBase):
obj = ObjectNameTrait()
_default_value = "abc"
_good_values = ["a", "gh", "g9", "g_", "_G", u"a345_"]
_bad_values = [1, "", u"€", "9g", "!", "#abc", "aj@", "a.b", "a()", "a[0]",
None, object(), object]
if sys.version_info[0] < 3:
_bad_values.append(u"þ")
else:
_good_values.append(u"þ") # þ=1 is valid in Python 3 (PEP 3131).
class DottedObjectNameTrait(HasTraits):
value = DottedObjectName("a.b")
class TestDottedObjectName(TraitTestBase):
obj = DottedObjectNameTrait()
_default_value = "a.b"
_good_values = ["A", "y.t", "y765.__repr__", "os.path.join", u"os.path.join"]
_bad_values = [1, u"abc.€", "_.@", ".", ".abc", "abc.", ".abc.", None]
if sys.version_info[0] < 3:
_bad_values.append(u"t.þ")
else:
_good_values.append(u"t.þ")
class TCPAddressTrait(HasTraits):
value = TCPAddress()
class TestTCPAddress(TraitTestBase):
obj = TCPAddressTrait()
_default_value = ('127.0.0.1',0)
_good_values = [('localhost',0),('192.168.0.1',1000),('www.google.com',80)]
_bad_values = [(0,0),('localhost',10.0),('localhost',-1), None]
class ListTrait(HasTraits):
value = List(Int())
class TestList(TraitTestBase):
obj = ListTrait()
_default_value = []
_good_values = [[], [1], list(range(10)), (1,2)]
_bad_values = [10, [1,'a'], 'a']
def coerce(self, value):
if value is not None:
value = list(value)
return value
class Foo(object):
pass
class NoneInstanceListTrait(HasTraits):
value = List(Instance(Foo))
class TestNoneInstanceList(TraitTestBase):
obj = NoneInstanceListTrait()
_default_value = []
_good_values = [[Foo(), Foo()], []]
_bad_values = [[None], [Foo(), None]]
class InstanceListTrait(HasTraits):
value = List(Instance(__name__+'.Foo'))
class TestInstanceList(TraitTestBase):
obj = InstanceListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, Foo)
_default_value = []
_good_values = [[Foo(), Foo()], []]
_bad_values = [['1', 2,], '1', [Foo], None]
class UnionListTrait(HasTraits):
value = List(Int() | Bool())
class TestUnionListTrait(HasTraits):
obj = UnionListTrait()
_default_value = []
_good_values = [[True, 1], [False, True]]
_bad_values = [[1, 'True'], False]
class LenListTrait(HasTraits):
value = List(Int(), [0], minlen=1, maxlen=2)
class TestLenList(TraitTestBase):
obj = LenListTrait()
_default_value = [0]
_good_values = [[1], [1,2], (1,2)]
_bad_values = [10, [1,'a'], 'a', [], list(range(3))]
def coerce(self, value):
if value is not None:
value = list(value)
return value
class TupleTrait(HasTraits):
value = Tuple(Int(allow_none=True), default_value=(1,))
class TestTupleTrait(TraitTestBase):
obj = TupleTrait()
_default_value = (1,)
_good_values = [(1,), (0,), [1]]
_bad_values = [10, (1, 2), ('a'), (), None]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int(), CBytes(), default_value=(1,5))
class LooseTupleTrait(HasTraits):
value = Tuple((1,2,3))
class TestLooseTupleTrait(TraitTestBase):
obj = LooseTupleTrait()
_default_value = (1,2,3)
_good_values = [(1,), [1], (0,), tuple(range(5)), tuple('hello'), ('a',5), ()]
_bad_values = [10, 'hello', {}, None]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int(), CBytes(), default_value=(1,5))
class MultiTupleTrait(HasTraits):
value = Tuple(Int(), Bytes(), default_value=[99,b'bottles'])
class TestMultiTuple(TraitTestBase):
obj = MultiTupleTrait()
_default_value = (99,b'bottles')
_good_values = [(1,b'a'), (2,b'b')]
_bad_values = ((),10, b'a', (1,b'a',3), (b'a',1), (1, u'a'))
class CRegExpTrait(HasTraits):
value = CRegExp(r'')
class TestCRegExp(TraitTestBase):
def coerce(self, value):
return re.compile(value)
obj = CRegExpTrait()
_default_value = re.compile(r'')
_good_values = [r'\d+', re.compile(r'\d+')]
_bad_values = ['(', None, ()]
class DictTrait(HasTraits):
value = Dict()
def test_dict_assignment():
d = dict()
c = DictTrait()
c.value = d
d['a'] = 5
nt.assert_equal(d, c.value)
nt.assert_true(c.value is d)
class ValidatedDictTrait(HasTraits):
value = Dict(trait=Unicode(),
traits={'foo': Int()},
default_value={'foo': 1})
class TestInstanceDict(TraitTestBase):
obj = ValidatedDictTrait()
_default_value = {'foo': 1}
_good_values = [{'0': 'foo', 'foo': 1}, {'1': 'bar', 'foo': 2}]
_bad_values = [{'0': 0, 'foo': 1}, {'1': 'bar', 'foo': 'bar'}]
def test_dict_default_value():
"""Check that the `{}` default value of the Dict traitlet constructor is
actually copied."""
class Foo(HasTraits):
d1 = Dict()
d2 = Dict()
foo = Foo()
nt.assert_equal(foo.d1, {})
nt.assert_equal(foo.d2, {})
nt.assert_is_not(foo.d1, foo.d2)
class TestValidationHook(TestCase):
def test_parity_trait(self):
"""Verify that the early validation hook is effective"""
class Parity(HasTraits):
value = Int(0)
parity = Enum(['odd', 'even'], default_value='even')
@validate('value')
def _value_validate(self, proposal):
value = proposal['value']
if self.parity == 'even' and value % 2:
raise TraitError('Expected an even number')
if self.parity == 'odd' and (value % 2 == 0):
raise TraitError('Expected an odd number')
return value
u = Parity()
u.parity = 'odd'
u.value = 1 # OK
with self.assertRaises(TraitError):
u.value = 2 # Trait Error
u.parity = 'even'
u.value = 2 # OK
def test_multiple_validate(self):
"""Verify that we can register the same validator to multiple names"""
class OddEven(HasTraits):
odd = Int(1)
even = Int(0)
@validate('odd', 'even')
def check_valid(self, proposal):
if proposal['trait'].name == 'odd' and not proposal['value'] % 2:
raise TraitError('odd should be odd')
if proposal['trait'].name == 'even' and proposal['value'] % 2:
raise TraitError('even should be even')
u = OddEven()
u.odd = 3 # OK
with self.assertRaises(TraitError):
u.odd = 2 # Trait Error
u.even = 2 # OK
with self.assertRaises(TraitError):
u.even = 3 # Trait Error
class TestLink(TestCase):
def test_connect_same(self):
"""Verify two traitlets of the same type can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = link((a, 'value'), (b, 'value'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.value)
# Change one of the values to make sure they stay in sync.
a.value = 5
self.assertEqual(a.value, b.value)
b.value = 6
self.assertEqual(a.value, b.value)
def test_link_different(self):
"""Verify two traitlets of different types can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Conenct the two classes.
c = link((a, 'value'), (b, 'count'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.count)
# Change one of the values to make sure they stay in sync.
a.value = 5
self.assertEqual(a.value, b.count)
b.count = 4
self.assertEqual(a.value, b.count)
def test_unlink(self):
"""Verify two linked traitlets can be unlinked."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Connect the two classes.
c = link((a, 'value'), (b, 'value'))
a.value = 4
c.unlink()
# Change one of the values to make sure they don't stay in sync.
a.value = 5
self.assertNotEqual(a.value, b.value)
def test_callbacks(self):
"""Verify two linked traitlets have their callbacks called once."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Register callbacks that count.
callback_count = []
def a_callback(name, old, new):
callback_count.append('a')
a.on_trait_change(a_callback, 'value')
def b_callback(name, old, new):
callback_count.append('b')
b.on_trait_change(b_callback, 'count')
# Connect the two classes.
c = link((a, 'value'), (b, 'count'))
# Make sure b's count was set to a's value once.
self.assertEqual(''.join(callback_count), 'b')
del callback_count[:]
# Make sure a's value was set to b's count once.
b.count = 5
self.assertEqual(''.join(callback_count), 'ba')
del callback_count[:]
# Make sure b's count was set to a's value once.
a.value = 4
self.assertEqual(''.join(callback_count), 'ab')
del callback_count[:]
class TestDirectionalLink(TestCase):
def test_connect_same(self):
"""Verify two traitlets of the same type can be linked together using directional_link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = directional_link((a, 'value'), (b, 'value'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.value)
# Change one the value of the source and check that it synchronizes the target.
a.value = 5
self.assertEqual(b.value, 5)
# Change one the value of the target and check that it has no impact on the source
b.value = 6
self.assertEqual(a.value, 5)
def test_tranform(self):
"""Test transform link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = directional_link((a, 'value'), (b, 'value'), lambda x: 2 * x)
# Make sure the values are correct at the point of linking.
self.assertEqual(b.value, 2 * a.value)
# Change one the value of the source and check that it modifies the target.
a.value = 5
self.assertEqual(b.value, 10)
# Change one the value of the target and check that it has no impact on the source
b.value = 6
self.assertEqual(a.value, 5)
def test_link_different(self):
"""Verify two traitlets of different types can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Conenct the two classes.
c = directional_link((a, 'value'), (b, 'count'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.count)
# Change one the value of the source and check that it synchronizes the target.
a.value = 5
self.assertEqual(b.count, 5)
# Change one the value of the target and check that it has no impact on the source
b.value = 6
self.assertEqual(a.value, 5)
def test_unlink(self):
"""Verify two linked traitlets can be unlinked."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Connect the two classes.
c = directional_link((a, 'value'), (b, 'value'))
a.value = 4
c.unlink()
# Change one of the values to make sure they don't stay in sync.
a.value = 5
self.assertNotEqual(a.value, b.value)
class Pickleable(HasTraits):
i = Int()
@observe('i')
def _i_changed(self, change): pass
@validate('i')
def _i_validate(self, commit):
return commit['value']
j = Int()
def __init__(self):
with self.hold_trait_notifications():
self.i = 1
self.on_trait_change(self._i_changed, 'i')
def test_pickle_hastraits():
c = Pickleable()
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(c, protocol)
c2 = pickle.loads(p)
nt.assert_equal(c2.i, c.i)
nt.assert_equal(c2.j, c.j)
c.i = 5
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(c, protocol)
c2 = pickle.loads(p)
nt.assert_equal(c2.i, c.i)
nt.assert_equal(c2.j, c.j)
def test_hold_trait_notifications():
changes = []
class Test(HasTraits):
a = Integer(0)
b = Integer(0)
def _a_changed(self, name, old, new):
changes.append((old, new))
def _b_validate(self, value, trait):
if value != 0:
raise TraitError('Only 0 is a valid value')
return value
# Test context manager and nesting
t = Test()
with t.hold_trait_notifications():
with t.hold_trait_notifications():
t.a = 1
nt.assert_equal(t.a, 1)
nt.assert_equal(changes, [])
t.a = 2
nt.assert_equal(t.a, 2)
with t.hold_trait_notifications():
t.a = 3
nt.assert_equal(t.a, 3)
nt.assert_equal(changes, [])
t.a = 4
nt.assert_equal(t.a, 4)
nt.assert_equal(changes, [])
t.a = 4
nt.assert_equal(t.a, 4)
nt.assert_equal(changes, [])
nt.assert_equal(changes, [(0, 4)])
# Test roll-back
try:
with t.hold_trait_notifications():
t.b = 1 # raises a Trait error
except:
pass
nt.assert_equal(t.b, 0)
class RollBack(HasTraits):
bar = Int()
def _bar_validate(self, value, trait):
if value:
raise TraitError('foobar')
return value
class TestRollback(TestCase):
def test_roll_back(self):
def assign_rollback():
RollBack(bar=1)
self.assertRaises(TraitError, assign_rollback)
class CacheModification(HasTraits):
foo = Int()
bar = Int()
def _bar_validate(self, value, trait):
self.foo = value
return value
def _foo_validate(self, value, trait):
self.bar = value
return value
def test_cache_modification():
CacheModification(foo=1)
CacheModification(bar=1)
class OrderTraits(HasTraits):
notified = Dict()
a = Unicode()
b = Unicode()
c = Unicode()
d = Unicode()
e = Unicode()
f = Unicode()
g = Unicode()
h = Unicode()
i = Unicode()
j = Unicode()
k = Unicode()
l = Unicode()
def _notify(self, name, old, new):
"""check the value of all traits when each trait change is triggered
This verifies that the values are not sensitive
to dict ordering when loaded from kwargs
"""
# check the value of the other traits
# when a given trait change notification fires
self.notified[name] = {
c: getattr(self, c) for c in 'abcdefghijkl'
}
def __init__(self, **kwargs):
self.on_trait_change(self._notify)
super(OrderTraits, self).__init__(**kwargs)
def test_notification_order():
d = {c:c for c in 'abcdefghijkl'}
obj = OrderTraits()
nt.assert_equal(obj.notified, {})
obj = OrderTraits(**d)
notifications = {
c: d for c in 'abcdefghijkl'
}
nt.assert_equal(obj.notified, notifications)
###
# Traits for Forward Declaration Tests
###
class ForwardDeclaredInstanceTrait(HasTraits):
value = ForwardDeclaredInstance('ForwardDeclaredBar', allow_none=True)
class ForwardDeclaredTypeTrait(HasTraits):
value = ForwardDeclaredType('ForwardDeclaredBar', allow_none=True)
class ForwardDeclaredInstanceListTrait(HasTraits):
value = List(ForwardDeclaredInstance('ForwardDeclaredBar'))
class ForwardDeclaredTypeListTrait(HasTraits):
value = List(ForwardDeclaredType('ForwardDeclaredBar'))
###
# End Traits for Forward Declaration Tests
###
###
# Classes for Forward Declaration Tests
###
class ForwardDeclaredBar(object):
pass
class ForwardDeclaredBarSub(ForwardDeclaredBar):
pass
###
# End Classes for Forward Declaration Tests
###
###
# Forward Declaration Tests
###
class TestForwardDeclaredInstanceTrait(TraitTestBase):
obj = ForwardDeclaredInstanceTrait()
_default_value = None
_good_values = [None, ForwardDeclaredBar(), ForwardDeclaredBarSub()]
_bad_values = ['foo', 3, ForwardDeclaredBar, ForwardDeclaredBarSub]
class TestForwardDeclaredTypeTrait(TraitTestBase):
obj = ForwardDeclaredTypeTrait()
_default_value = None
_good_values = [None, ForwardDeclaredBar, ForwardDeclaredBarSub]
_bad_values = ['foo', 3, ForwardDeclaredBar(), ForwardDeclaredBarSub()]
class TestForwardDeclaredInstanceList(TraitTestBase):
obj = ForwardDeclaredInstanceListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, ForwardDeclaredBar)
_default_value = []
_good_values = [
[ForwardDeclaredBar(), ForwardDeclaredBarSub()],
[],
]
_bad_values = [
ForwardDeclaredBar(),
[ForwardDeclaredBar(), 3, None],
'1',
# Note that this is the type, not an instance.
[ForwardDeclaredBar],
[None],
None,
]
class TestForwardDeclaredTypeList(TraitTestBase):
obj = ForwardDeclaredTypeListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, ForwardDeclaredBar)
_default_value = []
_good_values = [
[ForwardDeclaredBar, ForwardDeclaredBarSub],
[],
]
_bad_values = [
ForwardDeclaredBar,
[ForwardDeclaredBar, 3],
'1',
# Note that this is an instance, not the type.
[ForwardDeclaredBar()],
[None],
None,
]
###
# End Forward Declaration Tests
###
class TestDynamicTraits(TestCase):
def setUp(self):
self._notify1 = []
def notify1(self, name, old, new):
self._notify1.append((name, old, new))
def test_notify_all(self):
class A(HasTraits):
pass
a = A()
self.assertTrue(not hasattr(a, 'x'))
self.assertTrue(not hasattr(a, 'y'))
# Dynamically add trait x.
a.add_traits(x=Int())
self.assertTrue(hasattr(a, 'x'))
self.assertTrue(isinstance(a, (A, )))
# Dynamically add trait y.
a.add_traits(y=Float())
self.assertTrue(hasattr(a, 'y'))
self.assertTrue(isinstance(a, (A, )))
self.assertEqual(a.__class__.__name__, A.__name__)
# Create a new instance and verify that x and y
# aren't defined.
b = A()
self.assertTrue(not hasattr(b, 'x'))
self.assertTrue(not hasattr(b, 'y'))
# Verify that notification works like normal.
a.on_trait_change(self.notify1)
a.x = 0
self.assertEqual(len(self._notify1), 0)
a.y = 0.0
self.assertEqual(len(self._notify1), 0)
a.x = 10
self.assertTrue(('x', 0, 10) in self._notify1)
a.y = 10.0
self.assertTrue(('y', 0.0, 10.0) in self._notify1)
self.assertRaises(TraitError, setattr, a, 'x', 'bad string')
self.assertRaises(TraitError, setattr, a, 'y', 'bad string')
self._notify1 = []
a.on_trait_change(self.notify1, remove=True)
a.x = 20
a.y = 20.0
self.assertEqual(len(self._notify1), 0)
def test_enum_no_default():
class C(HasTraits):
t = Enum(['a', 'b'])
c = C()
c.t = 'a'
assert c.t == 'a'
c = C()
with nt.assert_raises(TraitError):
t = c.t
c = C(t='b')
assert c.t == 'b'
def test_default_value_repr():
class C(HasTraits):
t = Type('traitlets.HasTraits')
t2 = Type(HasTraits)
n = Integer(0)
lis = List()
d = Dict()
nt.assert_equal(C.t.default_value_repr(), "'traitlets.HasTraits'")
nt.assert_equal(C.t2.default_value_repr(), "'traitlets.traitlets.HasTraits'")
nt.assert_equal(C.n.default_value_repr(), '0')
nt.assert_equal(C.lis.default_value_repr(), '[]')
nt.assert_equal(C.d.default_value_repr(), '{}')
class TransitionalClass(HasTraits):
d = Any()
@default('d')
def _d_default(self):
return TransitionalClass
parent_super = False
calls_super = Integer(0)
@default('calls_super')
def _calls_super_default(self):
return -1
@observe('calls_super')
@observe_compat
def _calls_super_changed(self, change):
self.parent_super = change
parent_override = False
overrides = Integer(0)
@observe('overrides')
@observe_compat
def _overrides_changed(self, change):
self.parent_override = change
class SubClass(TransitionalClass):
def _d_default(self):
return SubClass
subclass_super = False
def _calls_super_changed(self, name, old, new):
self.subclass_super = True
super(SubClass, self)._calls_super_changed(name, old, new)
subclass_override = False
def _overrides_changed(self, name, old, new):
self.subclass_override = True
def test_subclass_compat():
obj = SubClass()
obj.calls_super = 5
nt.assert_true(obj.parent_super)
nt.assert_true(obj.subclass_super)
obj.overrides = 5
nt.assert_true(obj.subclass_override)
nt.assert_false(obj.parent_override)
nt.assert_is(obj.d, SubClass)
class DefinesHandler(HasTraits):
parent_called = False
trait = Integer()
@observe('trait')
def handler(self, change):
self.parent_called = True
class OverridesHandler(DefinesHandler):
child_called = False
@observe('trait')
def handler(self, change):
self.child_called = True
def test_subclass_override_observer():
obj = OverridesHandler()
obj.trait = 5
nt.assert_true(obj.child_called)
nt.assert_false(obj.parent_called)
class DoesntRegisterHandler(DefinesHandler):
child_called = False
def handler(self, change):
self.child_called = True
def test_subclass_override_not_registered():
"""Subclass that overrides observer and doesn't re-register unregisters both"""
obj = DoesntRegisterHandler()
obj.trait = 5
nt.assert_false(obj.child_called)
nt.assert_false(obj.parent_called)
class AddsHandler(DefinesHandler):
child_called = False
@observe('trait')
def child_handler(self, change):
self.child_called = True
def test_subclass_add_observer():
obj = AddsHandler()
obj.trait = 5
nt.assert_true(obj.child_called)
nt.assert_true(obj.parent_called)
def test_super_args():
class SuperRecorder(object):
def __init__(self, *args, **kwargs):
self.super_args = args
self.super_kwargs = kwargs
class SuperHasTraits(HasTraits, SuperRecorder):
i = Integer()
obj = SuperHasTraits('a1', 'a2', b=10, i=5, c='x')
nt.assert_equal(obj.i, 5)
assert not hasattr(obj, 'b')
assert not hasattr(obj, 'c')
nt.assert_equal(obj.super_args, ('a1', 'a2'))
nt.assert_equal(obj.super_kwargs, {'b': 10, 'c': 'x'})
def test_super_bad_args():
class SuperHasTraits(HasTraits):
a = Integer()
if sys.version_info < (3,):
# Legacy Python, object.__init__ warns itself, instead of raising
w = ['object.__init__']
else:
w = ["Passing unrecoginized arguments"]
with expected_warnings(w):
obj = SuperHasTraits(a=1, b=2)
nt.assert_equal(obj.a, 1)
assert not hasattr(obj, 'b')
| gpl-3.0 |
qPCR4vir/orange3 | Orange/tests/test_remove.py | 3 | 6364 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
from Orange.data import Table
from Orange.preprocess import Remove
from Orange.tests import test_filename
class TestRemover(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test8 = Table(test_filename('test8.tab'))
def test_remove(self):
data = Table("iris")[:5]
attr_flags = sum([Remove.SortValues,
Remove.RemoveConstant,
Remove.RemoveUnusedValues])
class_flags = sum([Remove.SortValues,
Remove.RemoveConstant,
Remove.RemoveUnusedValues])
remover = Remove(attr_flags, class_flags)
new_data = remover(data)
attr_res, class_res = remover.attr_results, remover.class_results
self.assertEqual([a.name for a in new_data.domain.attributes],
["sepal length", "sepal width", "petal length"])
self.assertEqual([c.name for c in new_data.domain.class_vars], [])
self.assertDictEqual(attr_res,
{'removed': 1, 'reduced': 0, 'sorted': 0})
self.assertDictEqual(class_res,
{'removed': 1, 'reduced': 0, 'sorted': 0})
def test_remove_constant_attr(self):
data = self.test8
remover = Remove(Remove.RemoveConstant)
new_data = remover(data)
attr_res, class_res = remover.attr_results, remover.class_results
np.testing.assert_equal(new_data.X, np.hstack((data[:, 1],
data[:, 3])))
np.testing.assert_equal(new_data.Y, data.Y)
self.assertEqual([a.name for a in new_data.domain.attributes],
["c0", "d0"])
self.assertEqual([c.name for c in new_data.domain.class_vars],
["cl1", "cl0", "cl3", "cl4"])
self.assertEqual([a.values for a in new_data.domain.attributes
if a.is_discrete], [['4', '6']])
self.assertEqual([c.values for c in new_data.domain.class_vars
if c.is_discrete], [['1', '2', '3'], ['2']])
self.assertDictEqual(attr_res,
{'removed': 2, 'reduced': 0, 'sorted': 0})
self.assertDictEqual(class_res,
{'removed': 0, 'reduced': 0, 'sorted': 0})
def test_remove_constant_class(self):
data = self.test8
remover = Remove(class_flags=Remove.RemoveConstant)
new_data = remover(data)
attr_res, class_res = remover.attr_results, remover.class_results
np.testing.assert_equal(new_data.X, data.X)
np.testing.assert_equal(new_data.Y, np.hstack((data[:, 4],
data[:, 5])))
self.assertEqual([a.name for a in new_data.domain.attributes],
["c1", "c0", "d1", "d0"])
self.assertEqual([c.name for c in new_data.domain.class_vars],
["cl1", "cl0"])
self.assertEqual([a.values for a in new_data.domain.attributes
if a.is_discrete], [['1'], ['4', '6']])
self.assertEqual([c.values for c in new_data.domain.class_vars
if c.is_discrete], [['1', '2', '3']])
self.assertDictEqual(attr_res,
{'removed': 0, 'reduced': 0, 'sorted': 0})
self.assertDictEqual(class_res,
{'removed': 2, 'reduced': 0, 'sorted': 0})
def test_remove_unused_values_attr(self):
data = self.test8
data = data[1:]
remover = Remove(Remove.RemoveUnusedValues)
new_data = remover(data)
attr_res, class_res = remover.attr_results, remover.class_results
np.testing.assert_equal(new_data.X, data.X)
np.testing.assert_equal(new_data.Y, data.Y)
self.assertEqual([a.name for a in new_data.domain.attributes],
["c1", "c0", "d1", "d0"])
self.assertEqual([c.name for c in new_data.domain.class_vars],
["cl1", "cl0", "cl3", "cl4"])
self.assertEqual([a.values for a in new_data.domain.attributes
if a.is_discrete], [['1'], ['4']])
self.assertEqual([c.values for c in new_data.domain.class_vars
if c.is_discrete], [['1', '2', '3'], ['2']])
self.assertDictEqual(attr_res,
{'removed': 0, 'reduced': 1, 'sorted': 0})
self.assertDictEqual(class_res,
{'removed': 0, 'reduced': 0, 'sorted': 0})
def test_remove_unused_values_class(self):
data = self.test8
data = data[:2]
remover = Remove(class_flags=Remove.RemoveUnusedValues)
new_data = remover(data)
attr_res, class_res = remover.attr_results, remover.class_results
for i in range(len(data)):
for j in range(len(data[i])):
self.assertEqual(new_data[i, j], data[i, j])
self.assertEqual([a.name for a in new_data.domain.attributes],
["c1", "c0", "d1", "d0"])
self.assertEqual([c.name for c in new_data.domain.class_vars],
["cl1", "cl0", "cl3", "cl4"])
self.assertEqual([a.values for a in new_data.domain.attributes
if a.is_discrete], [['1'], ['4', '6']])
self.assertEqual([c.values for c in new_data.domain.class_vars
if c.is_discrete], [['2', '3'], ['2']])
self.assertDictEqual(attr_res,
{'removed': 0, 'reduced': 0, 'sorted': 0})
self.assertDictEqual(class_res,
{'removed': 0, 'reduced': 1, 'sorted': 0})
def test_remove_unused_values_metas(self):
data = Table(test_filename("test9.tab"))
subset = data[:4]
res = Remove(subset,
attr_flags=Remove.RemoveUnusedValues,
meta_flags=Remove.RemoveUnusedValues)
self.assertEqual(res.domain["b"].values, res.domain["c"].values)
self.assertEqual(res.domain["d"].values, ["1", "2"])
self.assertEqual(res.domain["f"].values, ['1', 'hey'])
| bsd-2-clause |
lvwang2002/python-data-mining-platform | pymining/classifier/smo_csvc.py | 8 | 33742 | import math
import matplotlib
import matplotlib.pyplot as plt
import numpy
import os
import pickle
import psyco
psyco.full()
import sys
import time
from ..common.global_info import GlobalInfo
from ..common.configuration import Configuration
from ..math.matrix import Matrix
from ..math.text2matrix import Text2Matrix
from numpy import *
from operator import itemgetter
class Svm_Param:
'''the parameter configuration of csvc.'''
def __init__(self,config, nodeName):
try:
self.curNode = config.GetChild(nodeName)
#-------------------begin model info-------------------------------
self.modelpath = self.curNode.GetChild("model_path").GetValue()
self.logpath = self.curNode.GetChild("log_path").GetValue()
#store penalty coefficient of slack variable.
self.C = float(self.curNode.GetChild("c").GetValue())
#store a number nearly zero.
self.eps = float(self.curNode.GetChild("eps").GetValue())
#store tolerance of KKT conditions.
self.tolerance = float(self.curNode.GetChild("tolerance").GetValue())
#-------------------end model info-------------------------------
#-------------------begin times info-------------------------------
#log frequency
self.times = int(self.curNode.GetChild("times").GetValue())
#-------------------end times info-------------------------------
#-------------------begin kernel info-------------------------------
self.kernelnode = self.curNode.GetChild("kernel")
#to get kernel's type.
self.kernel_type = self.kernelnode.GetChild("name").GetValue();
#to get parameters from top to button -> from left to right -> from inner to outer.
self.parameters = self.kernelnode.GetChild("parameters").GetValue().split(',')
#-------------------end kernel info-------------------------------
#to get size of cache.
self.cachesize = float(self.curNode.GetChild("cachesize").GetValue())
#matrix is dense or sparse.
self.isdense = False
except Exception as detail:
print 'to read configuration file error,detail is:', detail
class Svm_Model:
'''the support vector machine model.'''
def __init__(self,config, nodeName):
#the configuration of svc.
self.config = Svm_Param(config, nodeName)
#the number of support vector machines.
self.svn = 0
#alpha
self.alpha = []
#the support vector.
self.sv = None
#the label of sv.
self.label = []
#the weight of model.
self.w = []
#the bias of model.
self.b = 0.0
class Svm_Util:
'''utilities of support vector machine.'''
@staticmethod
def dot( sparam, trainx, trainy, i, j):
'''to calculate dot product of two dense matrix or sparse matrix.'''
if trainx == None or trainy == None:
print 'train matrix should not be empty.'
return -1
if i < 0 or j < 0:
print 'index must bigger then zero.'
return -1
try:
#isdense = True -> dense matrix,isdense = False -> sparse matrix,
isdense = sparam.isdense
#the training set or sv is sparse matrix.
if isdense == False:
if trainx.nCol <> trainy.nCol:
print "the dimension of trainx and trainy must be equal. "
return -1
if i >= trainx.nRow or j >= trainy.nRow:
print "index i and j out. "
return -1
sum = 0.0
# to calculate dot product with O(nlgn)
i1 = trainx.rows[i]
i2 = trainy.rows[j]
p1 = 0 #the elements number of row i
p2 = 0 #the elements number of row j
if i < len(trainx.rows)-1 :
p1 = trainx.rows[i+1] - trainx.rows[i]
if j < len(trainy.rows)-1 :
p2 = trainy.rows[j+1] - trainy.rows[j]
if p2 <= p1:
curlow = i1
for k in range(i2, i2+p2):
pos = Svm_Util.binary_search(trainx.cols,curlow,i1+p1-1,trainy.cols[k])
if pos != -1:
sum += trainy.vals[k] * trainx.vals[pos]
curlow = pos + 1
else:
curlow = i2
for k in range(i1, i1+p1):
pos = Svm_Util.binary_search(trainx.cols,curlow,i2+p2-1,trainx.cols[k])
if pos != -1:
sum += trainx.vals[k] * trainy.vals[pos]
curlow = pos + 1
return sum
else:
if i >= trainx.shape[0] or j >= trainy.shape[0]:
print "index i or j out. "
return -1
if trainx.ndim <> trainy.ndim or trainx.shape[1] <> trainy.shape[1]:
print 'the dimension of two object is not equal.'
return -1
return float(numpy.dot(trainx[i].tolist()[0], trainy[j].tolist()[0]))
except Exception as detail:
print 'dot product error,detail:', detail
@staticmethod
def binary_search(collist,low,high,value):
'''sorted list's binary search'''
try:
if low < 0 or high < 0 or low > high or len(collist) <= high or len(collist) <= low:
return -1
if value < collist[low] or value > collist[high]:
return -1
if value == collist[low]:
return low
if value == collist[high]:
return high
l = low
h = high
while(l<=h):
mid = (l+h)/2
if collist[mid] > value:
h = mid - 1
elif collist[mid] < value:
l = mid + 1
else:
return mid
except Exception as detail:
print 'binary_search error detail is:', detail
return -1
@staticmethod
def convert(sparam, vec):
'''To convert vector to matrix.'''
if sparam.isdense == False:
rows = [0]
cols = []
vals = []
for i in range(len(vec)):
if vec[i] <> 0:
cols.append(i)
vals.append(vec[i])
rows.append(len(cols))
return Matrix(rows, cols, vals, 1, len(vec))
else:
return matrix(vec)
@staticmethod
def RBF(sparam,trainx, trainy,xi,yi):
'''the RBF kernel.'''
paramlist = sparam.parameters
eta = Svm_Util.dot(sparam, trainx, trainx,xi,xi)+Svm_Util.dot(sparam, trainy, trainy,yi,yi) - 2*Svm_Util.dot(sparam, trainx, trainy,xi,yi)
res = 0.0
if eta <0:
res = math.exp(sparam.tolerance*float(paramlist[0]))
else:
res = math.exp(-eta*float(paramlist[0]))
return res
@staticmethod
def kernel_function(sparam, trainx, trainy):
'''the kernel function.'''
paramlist = sparam.parameters
kernel_type = sparam.kernel_type
if kernel_type == 'RBF':
return lambda xi,yi: Svm_Util.RBF(sparam,trainx, trainy,xi,yi)
elif kernel_type == 'Linear':
return lambda xi,yi:Svm_Util.dot(sparam, trainx, trainy,xi,yi) + float(paramlist[0])
elif kernel_type == 'Polynomial':
return lambda xi,yi: (float(paramlist[0]) * Svm_Util.dot(sparam, trainx, trainy, xi,yi) + float(paramlist[1])) ** int(paramlist[2])
elif kernel_type == 'Sigmoid':
return lambda xi,yi: math.tanh(float(paramlist[0]) * Svm_Util.dot(sparam, trainx, trainy,xi,yi) + float(paramlist[1]))
@staticmethod
def check_float_int(p,t):
'''to Check the value of p can be transformed into a float (t = 0) or integer (t = 1).'''
try:
if t == 0:
tmp = float(p)
elif t == 1:
tmp = int(p)
except:
tmp = ''
if (isinstance(tmp,float) and t == 0) or (isinstance(tmp,int) and t == 1):
return True
else:
return False
@staticmethod
def draw_scatter(xOffsets, yOffsets, xlabel = 'X', ylabel = 'Y', colors = None):
'''to draw draw_scatter picture.'''
if (not isinstance(xOffsets,list)) or (not isinstance(yOffsets,list)):
print 'xOffsets and yOffsets should be list type.'
return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(0,1), ylim=(0,1))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if colors == None:
ax.scatter(xOffsets, yOffsets)
else:
ax.scatter(xOffsets, yOffsets, c=colors, alpha=0.75)
plt.show()
file_name = 'mining/scatter_' + time.ctime() + '.png'
plt.savefig(file_name)
@staticmethod
def draw_plot(xOffsets, yOffsets, xl = 'X', yl = 'Y', title = 'figure'):
'''to draw plot picture.'''
if (not isinstance(xOffsets,list)) or (not isinstance(yOffsets,list)):
print 'xOffsets and yOffsets should be list type.'
return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(0,1), ylim=(0,1))
plt.xlabel(xl)
plt.ylabel(yl)
ax.plot(xOffsets, yOffsets, lw=3, color='purple')
plt.title(title)
plt.show()
file_name = 'mining/plot_' + time.ctime() + '.png'
plt.savefig(file_name)
class Smo_Csvc:
'''a support vector machine classifier using 'C' to balance empirical risk and structural risk.'''
def __init__(self,config, nodeName, loadFromFile = False, recoverFromLog = False, npRatio = 1):
'''to initialize csvc.
config: configuration file.
nodeName: xml file's node.
loadFromFile: Whether to read the csvc model from disk.
recoverFromLog: Whether to recover the training procedure from disk.
npRatio: negative samples / positive samples.
'''
self.istrained = False
#alpha
self.alpha = []
#gradient array
self.G = []
#weight
self.w = []
#bias
self.b = 0.0
#caching kii
self.kcache = {}
#initialize svm model.
self.model = Svm_Model(config, nodeName)
#negative samples number divide positive samples.
self.npRatio = npRatio
#to get C1 and C2 for negative or positive samples.
if self.npRatio > 1:
self.C1 = self.model.config.C / self.npRatio
self.C2 = self.model.config.C
else:
self.C1 = self.model.config.C * self.npRatio
self.C2 = self.model.config.C
#to read model from disk.
if (loadFromFile):
try:
f = open(self.model.config.modelpath, "r")
modelStr = pickle.load(f)
self.model = pickle.loads(modelStr)
f.close()
self.istrained = True
except IOError:
pass
#to recover training from log file.
if recoverFromLog:
try:
f = open(self.model.config.logpath, "r")
modelStr = pickle.load(f)
[self.alpha,self.G,self.w,self.b,self.model] = pickle.loads(modelStr)
f.close()
except IOError:
pass
def check_config(self):
'''To check configuration file.'''
kernel = ['Linear', 'RBF', 'Polynomial', 'Sigmoid']
if self.model.config.kernel_type not in kernel:
print '~kernel type error.'
return False
if self.model.config.kernel_type == 'Linear' or self.model.config.kernel_type == 'RBF':
if len(self.model.config.parameters) != 1:
print '~Wrong number of parameters.'
return False
if not Svm_Util.check_float_int(self.model.config.parameters[0],0):
print '~Parameter type error. detail:',self.model.config.parameters[0],'should be float type.'
return False
else:
return True
if self.model.config.kernel_type == 'Polynomial':
if len(self.model.config.parameters) != 3:
print '~Wrong number of parameters.'
return False
if not (Svm_Util.check_float_int(self.model.config.parameters[0],0) and Svm_Util.check_float_int(self.model.config.parameters[1],0)):
print '~Parameter type error. detail:',self.model.config.parameters[0], ' and ',self.model.config.parameters[1],'should be float type.'
return False
elif not Svm_Util.check_float_int(self.model.config.parameters[2],1):
print '~Parameter type error. detail:',self.model.config.parameters[2], 'should be integer type.'
return False
else:
return True
if self.model.config.kernel_type == 'Sigmoid':
if len(self.model.config.parameters) != 2:
print '~Wrong number of parameters.'
return False
if not (Svm_Util.check_float_int(self.model.config.parameters[0],0) and Svm_Util.check_float_int(self.model.config.parameters[1],0)):
print '~Parameter type error. detail:',self.model.config.parameters[0], ' and ',self.model.config.parameters[1],'should be float type.'
return False
else:
return True
def GetValueFromKernelCache(self, i, i1, K, trainy):
'''To get kernel value from kernel cache.'''
key1 = '%s%s%s'%(str(i1), '-', str(i))
key2 = '%s%s%s'%(str(i), '-', str(i1))
if self.kcache.has_key(key1):
k = self.kcache[key1]
elif self.kcache.has_key(key2):
k = self.kcache[key2]
else:
k = K(i1,i)
if k < self.model.config.tolerance:
k = 0
self.kcache[key1] = k
return k
def ReduceCache(self):
'To free memory & to prevent memory leaks.'
try:
newcache = {}
if sys.getsizeof(self.kcache) > self.model.config.cachesize * (1024 **2):
for key in self.kcache.iterkeys():
kl = key.split('-')
if kl[0] == kl[1]:
newcache[key] = self.kcache[key]
self.kcache = 0
self.kcache = newcache
print 'To free memory success.'
except Exception as detail:
print 'To free memory error,detail:', detail
def SelectMaximumViolatingPair(self, trainy, K):
'''To find the maximum violating pair from all samples.'''
i = -1
G_max = float("-Infinity")
obj_min = float("Infinity")
for t in range(0, len(trainy)):
if (trainy[t] == 1 and (self.C2 - self.alpha[t]) > self.model.config.tolerance ) or (trainy[t] == -1 and self.alpha[t] > 0):
if -trainy[t] * self.G[t] >= G_max:
i = t
G_max = -trainy[t] * self.G[t]
j = -1
G_min = float("Infinity")
for t in range(0, len(trainy)):
if (trainy[t] == -1 and (self.C1 - self.alpha[t]) > self.model.config.tolerance ) or (trainy[t] == 1 and self.alpha[t] > 0) :
b = G_max + trainy[t] * self.G[t]
if -trainy[t] * self.G[t] <= G_min:
G_min = -trainy[t] * self.G[t]
if b > 0:
a = 0.0
try:
a = self.GetValueFromKernelCache(i, i, K, trainy) +self.GetValueFromKernelCache(t, t, K, trainy) - 2 * self.GetValueFromKernelCache(i, t, K, trainy)
if a <= 0:
a = self.model.config.tolerance
if -(b*b)/(2*a) <= obj_min:
j = t
obj_min = -(b*b)/(2*a)
except Exception as detail:
print 'error detail is:', detail
print 'Gap = ',G_max - G_min,'Fi=',trainy[i] * self.G[i],'Fj=',trainy[j] * self.G[j]
if G_max - G_min < self.model.config.eps:
return [-1, -1, float("Infinity")]
return [i, j, obj_min]
def W(self,trainy, alpha1new,alpha2newclipped,i,j,K):
'''To calculate W value.'''
alpha1 = self.alpha[i]
alpha2 = self.alpha[j]
y1 = trainy[i]
y2 = trainy[j]
s = y1 * y2
k11 = self.GetValueFromKernelCache(i, i, K, trainy)
k22 = self.GetValueFromKernelCache(j, j, K, trainy)
k12 = self.GetValueFromKernelCache(i, j, K, trainy)
w1 = alpha1new * (y1 * (-y1*self.G[i]) + alpha1 * k11 + s * alpha2 * k12)
w1 += alpha2newclipped * (y2 * (-y2*self.G[j]) + alpha2 * k22 + s * alpha1 * k12)
w1 = w1 - k11 * alpha1new * alpha1new/2 - k22 * alpha2newclipped * alpha2newclipped/2 - s * k12 * alpha1new * alpha2newclipped
return w1
def calculate_auc(self,output,label):
'''to calculate auc value.'''
if output == None or label == None:
return 0.0
pos, neg = 0, 0
for i in range(len(label)):
if label[i]>0:
pos+=1
else:
neg+=1
output = sorted(output, key=itemgetter(0), reverse=True)
tprlist = []
fprlist = []
tp, fp = 0., 0.
for i in range(len(output)):
if output[i][1]>0:
tp+=1
else:
fp+=1
tprlist.append(tp/pos)
fprlist.append(fp/neg)
auc = 0.
prev_rectangular_right_vertex = 0
tpr_max = 0
fpr_max = 0
for i in range(0,len(fprlist)):
if tpr_max < tprlist[i]:
tpr_max = tprlist[i]
if fpr_max < fprlist[i]:
fpr_max = fprlist[i]
if fprlist[i] != prev_rectangular_right_vertex:
auc += (fprlist[i] - prev_rectangular_right_vertex) * tprlist[i]
prev_rectangular_right_vertex = fprlist[i]
Svm_Util.draw_plot(fprlist, tprlist, 'FPR', 'TPR', 'ROC Curve(AUC = %.4f)' % auc)
return auc
def Train(self,trainx,trainy):
'''To train classifier.
trainx is training matrix and trainy is classifying label'''
if self.model.config.isdense == False:
if len(trainy) != trainx.nRow:
print "ERROR!, trainx.nRow should == len(y)"
return 0
else:
if trainx.shape[0] != len(trainy):
print "ERROR!, trainx.shape[0] should == trainy.shape[0]"
return 0
#to check configuration.
if not self.check_config():
return [0,0]
#to initialize all lagrange multipliers with zero.
nrow = 0
if self.model.config.isdense == True:
nrow = trainx.shape[0]
else:
nrow = trainx.nRow
ncol = 0
if self.model.config.isdense == True:
ncol = trainx.shape[1]
else:
ncol = trainx.nCol
for i in range(0,nrow):
self.alpha.append(0.0)
for i in range(0,nrow):
self.G.append(-1.0)
#to initialize w with zero.
for j in range(0,ncol):
self.w.append(float(0))
#to get kernel function.
K = Svm_Util.kernel_function(self.model.config, trainx, trainx)
#the value of objective function.
obj = 0.0
#the iterations.
iterations = 0
starttime = time.time()
while True:
begin = time.time()
#to select maximum violating pair.
[i, j, obj] = self.SelectMaximumViolatingPair(trainy, K)
if j == -1:
break
#-------------------------------------------------------------------begin to optimize lagrange multipiers i and j-------------------------------------------------------
L = 0.0 #the lower bound.
H = 0.0 #the upper bound
y1 = trainy[i] #sample i's label.
y2 = trainy[j] #sample j's label.
s = y1 * y2
alpha1 = self.alpha[i] #sample i's alpha value.
alpha2 = self.alpha[j] #sample j's alpha value.
#to store old alpha value of sample i and j.
oldalphai = self.alpha[i]
oldalphaj = self.alpha[j]
#the eta value.
eta = self.GetValueFromKernelCache(i, i, K, trainy) +self.GetValueFromKernelCache(j, j, K, trainy) - 2 * self.GetValueFromKernelCache(i, j, K, trainy)
#to calculate upper and lower bound.
if y1*y2 == -1:
gamma = alpha2 - alpha1
if y1 == -1:
if gamma > 0:
L = gamma
H = self.C2
else:
L = 0
H = self.C1 + gamma
else:
if gamma > 0:
L = gamma
H = self.C1
else:
L = 0
H = self.C2 + gamma
if y1*y2 == 1:
gamma = alpha2 + alpha1
if y1 == 1:
if gamma - self.C2 > 0:
L = gamma - self.C2
H = self.C2
else:
L = 0
H = gamma
else:
if gamma - self.C1 > 0:
L = gamma - self.C1
H = self.C1
else:
L = 0
H = gamma
if -eta < 0:
#to calculate apha2's new value
alpha2new = alpha2 + y2 * (y1*self.G[i] - y2*self.G[j])/eta
if alpha2new < L:
alpha2newclipped = L
elif alpha2new > H:
alpha2newclipped = H
else:
alpha2newclipped = alpha2new
else:
w1 = self.W(trainy, alpha1 + s * (alpha2 - L),L,i,j,K)
w2 = self.W(trainy, alpha1 + s * (alpha2 - H),H,i,j,K)
if w1 - w2 > self.model.config.eps:
alpha2newclipped = L
elif w2 - w1 > self.model.config.eps:
alpha2newclipped = H
else:
alpha2newclipped = alpha2
#to calculate aplha1
alpha1new = alpha1 + s * (alpha2 - alpha2newclipped)
if alpha1new < self.model.config.tolerance:
alpha2newclipped += s * alpha1new
alpha1new = 0
elif y1 == -1 and alpha1new > self.C1:
alpha2newclipped += s * (alpha1new - self.C1)
alpha1new = self.C1
elif y1 == 1 and alpha1new > self.C2:
alpha2newclipped += s * (alpha1new - self.C2)
alpha1new = self.C2
self.alpha[i] = alpha1new
self.alpha[j] = alpha2newclipped
#to deal with Linear kernel.
if self.model.config.kernel_type == 'Linear':
ncol = 0
if self.model.config.isdense == True:
ncol = trainx.shape[1]
else:
ncol = trainx.nCol
if self.model.config.isdense == True:
self.w += (alpha1new - alpha1) * y1 * trainx[i] + (alpha2newclipped - alpha2) * y2 *trainx[j]
else:
i1 = trainx.rows[i]
i2 = trainx.rows[j]
p1 = 0 #the elements number of row i
p2 = 0 #the elements number of row j
if i < len(trainx.rows)-1 :
p1 = trainx.rows[i+1] - trainx.rows[i]
if j < len(trainx.rows)-1 :
p2 = trainx.rows[j+1] - trainx.rows[j]
for k in range(i1, i1+p1-1):
self.w[trainx.cols[k]] += (alpha1new - alpha1) * y1 * trainx.vals[k]
for k in range(i2, i2+p2-1):
self.w[trainx.cols[k]] += (alpha2newclipped - alpha2) * y2 * trainx.vals[k]
#-------------------------------------------------------------------end to optimize lagrange multipiers i and j-------------------------------------------------------
deltaalphai = self.alpha[i] - oldalphai
deltaalphaj = self.alpha[j] - oldalphaj
#to update gradient.
for t in range(0, nrow):
try:
part1 = trainy[t] * trainy[i] * self.GetValueFromKernelCache(t, i, K, trainy) * deltaalphai
part2 = trainy[t] * trainy[j] * self.GetValueFromKernelCache(t, j, K, trainy) * deltaalphaj
self.G[t] += part1 + part2
except Exception as detail:
print 'error detail is:', detail
print 'alpha', i, '=',self.alpha[i],'alpha', j,'=', self.alpha[j], 'the objective function value =', obj
print time.time() - begin
iterations += 1
if iterations%self.model.config.times == 0:
#dump to log file.
f = open(self.model.config.logpath, "w")
log = [self.alpha,self.G,self.w,self.b,self.model]
modelStr = pickle.dumps(log,1)
pickle.dump(modelStr, f)
f.close()
self.ReduceCache()
#To store support vectors.
index = []
for i in range(0, len(self.alpha)):
if self.alpha[i] > 0:
index.append(i)
self.model.alpha.append(self.alpha[i])
self.model.label.append(trainy[i])
self.model.svn = len(index)
self.model.w = self.w
#--------------------------------------------------------
b1 = 0.0
b2 = 0.0
c1 = 0
for i in range(0,len(index) ):
if trainy[index[i]] == -1:
b1 += -trainy[index[i]] * self.G[index[i]]
c1 += 1
else:
b2 += -trainy[index[i]] * self.G[index[i]]
self.b = ((b1/c1)+(b2/(self.model.svn - c1)))/2
self.model.b = self.b
print 'the threshold value =', self.b
#--------------------------------------------------------
#to store support vector machines.
if self.model.config.isdense == True:
sv = []
for i in range(0, len(index)):
sv.append(trainx[index[i]].tolist()[0])
self.model.sv = matrix(sv)
else:
rows = []
cols = []
vals = []
pos = 0
for i in range(0, len(index)):
i1 = trainx.rows[index[i]]
p1 = 0
if index[i] < len(trainx.rows)-1 :
p1 = trainx.rows[index[i]+1] - trainx.rows[index[i]]
k = 0
while(k < p1):
cols.append(trainx.cols[i1 + k])
vals.append(trainx.vals[i1 + k])
k += 1
rows.append(pos)
pos += p1
rows.append(len(vals))
self.model.sv = Matrix(rows, cols, vals ,self.model.svn, trainx.nCol )
#dump model path
f = open(self.model.config.modelpath, "w")
modelStr = pickle.dumps(self.model, 1)
pickle.dump(modelStr, f)
f.close()
self.istrained = True
try:
os.remove(self.model.config.logpath)
except:
pass
return [time.time()-starttime,iterations]
def Test(self,testx,testy):
'''To test samples.
self.testx is training matrix and self.testy is classifying label'''
TP = 0.0
TN = 0.0
FP = 0.0
FN = 0.0
Recall = 0.0
Precision = 0.0
Accuracy = 0.0
Fbeta1 = 0.0
Fbeta2 = 0.0
AUCb = 0.0
TPR= 0.0
FPR = 0.0
pn = 0.0
nn = 0.0
tprlist = []
fprlist = []
outputlist = []
for i in range(len(testy)):
if testy[i] == 1:
pn = pn + 1
else:
nn = nn + 1
#check parameter
if (not self.istrained):
print "Error!, not trained!"
return False
K = Svm_Util.kernel_function(self.model.config, self.model.sv, testx)
nrow = 0
if self.model.config.isdense == True:
nrow = testx.shape[0]
else:
nrow = testx.nRow
for i in range(0, nrow):
fxi = 0.0
if self.model.config.kernel_type == 'Linear':
fxi = Svm_Util.dot(self.model.config, Svm_Util.convert(self.model.config, self.model.w), testx, 0, i) + self.model.b
else:
for j in range(0, self.model.svn):
fxi += self.model.alpha[j] * self.model.label[j] * K(j, i)
fxi += self.model.b
if testy[i] == 1 and fxi >=0:
TP += 1
if testy[i] == -1 and fxi <=0:
TN += 1
if testy[i] == -1 and fxi >=0:
FP += 1
if testy[i] == 1 and fxi <=0:
FN += 1
#to calculate ROC value.
TPR = TP/pn
FPR = FP/nn
tprlist.append(TPR)
fprlist.append(FPR)
outputlist.append([fxi,testy[i]])
print i,': Actual output is', fxi, 'It\'s label is', testy[i]
#to calculate auc
auc = 0.
prev_rectangular_right_vertex = 0
tpr_max = 0
fpr_max = 0
for i in range(0,len(fprlist)):
if tpr_max < tprlist[i]:
tpr_max = tprlist[i]
if fpr_max < fprlist[i]:
fpr_max = fprlist[i]
if fprlist[i] != prev_rectangular_right_vertex:
auc += (fprlist[i] - prev_rectangular_right_vertex) * tprlist[i]
prev_rectangular_right_vertex = fprlist[i]
try:
Recall = TP/(TP + FN)
Precision = TP/(TP + FP)
Accuracy = (TP + TN)/(TP + TN + FP + FN)
Fbeta1 = 2 * (Recall * Precision)/(1 + Precision + Recall)
Fbeta2 = 5 * (Recall * Precision)/(4 + Precision + Recall)
AUCb = (Recall + TN/(FP + TN))/2
print 'Recall = ', Recall, 'Precision = ', Precision,'Accuracy = ', Accuracy,'\n', 'F(beta=1) = ', Fbeta1, 'F(beta=2) = ', Fbeta2, 'AUCb = ',AUCb
except Exception as detail:
print 'to test error,detail is:', detail
self.calculate_auc(outputlist,testy)
return [Recall,Precision,Accuracy,Fbeta1,Fbeta2,AUCb,auc]
| bsd-3-clause |
sloanyang/android_kernel_zte_u950 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
eestay/edx-platform | lms/djangoapps/shoppingcart/migrations/0005_auto__add_paidcourseregistrationannotation__add_field_orderitem_report.py | 114 | 9808 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PaidCourseRegistrationAnnotation'
db.create_table('shoppingcart_paidcourseregistrationannotation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128, db_index=True)),
('annotation', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('shoppingcart', ['PaidCourseRegistrationAnnotation'])
# Adding field 'OrderItem.report_comments'
db.add_column('shoppingcart_orderitem', 'report_comments',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
def backwards(self, orm):
# Deleting model 'PaidCourseRegistrationAnnotation'
db.delete_table('shoppingcart_paidcourseregistrationannotation')
# Deleting field 'OrderItem.report_comments'
db.delete_column('shoppingcart_orderitem', 'report_comments')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
sealcode/gpandoc | ui/howToUse_ui.py | 1 | 3691 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'howToUse.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(185, 153)
Dialog.setMinimumSize(QtCore.QSize(0, 0))
Dialog.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_2 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
self.buttonBox.setFont(font)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout_2.addWidget(self.buttonBox)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Instrukcja"))
self.label_2.setText(_translate("Dialog", "Instrukcja użycia:\n"
"\n"
"1) Załaduj pliki\n"
"2) Wybierz przepis\n"
"3) Podaj nazwę pliku \n"
"4) Wypełnij zmienne\n"
"5) Wygeneruj dokument"))
| lgpl-3.0 |
ermo/privateer_wcu | bases/agricultural.py | 1 | 2835 | import Base
import dynamic_mission
import VS
import pleasure_land
time_of_day=''
bar=-1
weap=-1
room0=-1
plist=VS.musicAddList('agricultural.m3u')
VS.musicPlayList(plist)
dynamic_mission.CreateMissions()
room0 = pleasure_land.MakePleasureAgriculturalLanding(time_of_day)
room = Base.Room ('Main Concourse')
room1 = room
Base.Texture (room, 'background', 'bases/agricultural/Helen_Concourse'+time_of_day+'.spr', 0, 0)
Base.Texture (room, 'wtr', 'bases/agricultural/Helen_Concourse_wtr'+time_of_day+'.spr', 0, 0.75)
Base.Texture (room, 'wk0', 'bases/agricultural/Helen_Concourse_wk0'+time_of_day+'.spr', 0.6875, -0.83)
Base.Texture (room, 'wk1', 'bases/agricultural/Helen_Concourse_wk1'+time_of_day+'.spr', -0.43125, -0.86)
Base.Texture (room, 'wk2', 'bases/agricultural/Helen_Concourse_wk2'+time_of_day+'.spr', 0.275, -0.37)
import commodity_lib
room2 = commodity_lib.MakeCommodity(room1,time_of_day);
Base.Link (room0, 'my_link_id', 0.6025, -0.463333, 0.29, 0.633333, 'Main Concourse', room1)
import bar_lib
bar = bar_lib.MakeBar (room1,time_of_day,'agricultural', "bases/bar/Helen_Bar", True, True, 'agricultural', False, [('ag0', -0.9, -0.123046875),('ag1', -0.58125, -0.15234375),('ag2', -0.11875, -0.103515625),('ag3', 0.41875, -0.03515625)])
#ar = bar_lib.MakeBar (room1,time_of_day,'agricultural', "bases/bar/Helen_Bar", True, True, 'agricultural', False, [('ag0', -0.873, -0.1455),('ag1', -0.5638125, -0.1746),('ag2', -0.1151875, -0.1261),('ag3', 0.4061875, -0.0582)])
Base.Link (room1, 'bar', -0.61, -0.113333, 0.2075, 0.25, 'Bar', bar)
import merchant_guild
if (merchant_guild.Can()):
merchant = merchant_guild.MakeMerchantGuild (room1,time_of_day)
Base.Link (room1, 'merchant', 0.03, 0.0933333, 0.22, 0.176667, "Merchant's Guild", merchant)
else:
Base.Texture (room, 'nomerchant', 'bases/agricultural/nomerchant'+time_of_day+'.spr', 0.15, 0.1796875)
import mercenary_guild
if (mercenary_guild.Can()):
merchant = mercenary_guild.MakeMercenaryGuild (room1,time_of_day)
Base.Link (room1, 'mercenary', 0.77, 0.0233333, 0.22, 0.226667, "Mercenary's Guild", merchant)
else:
Base.Texture (room, 'nomercenary', 'bases/agricultural/nomercenary'+time_of_day+'.spr', 0.8875, 0.130859375)
import weapons_lib
if (weapons_lib.CanRepair()):
weap = weapons_lib.MakeWeapon (room1,time_of_day)
Base.Link (room1, 'weapon_room', -0.5725, -0.583333, 0.315, 0.386667, 'Ship_Dealer/Upgrades', weap)
else:
Base.Texture (room, 'noshipdealer', 'bases/agricultural/noshipdealer'+time_of_day+'.spr', -0.26875, -0.4453125)
Base.Link (room1, 'my_link_id', 0.035, -0.346667, 0.2825, 0.27, 'Landing_Pad', room0)
Base.Link (room1, 'my_link_id', 0.6275, -0.37, 0.3425, 0.17, 'Commodity_Exchange', room2)
Base.Comp (room1, 'my_comp_id', 0.3725, -0.843333, 0.2825, 0.423333, 'Mission_Computer', 'Missions News Info ')
| gpl-2.0 |
martinjrobins/hobo | pints/_mcmc/_relativistic.py | 1 | 15546 | #
# Relativistic MCMC method
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pints
import numpy as np
class RelativisticMCMC(pints.SingleChainMCMC):
r"""
Implements Relativistic Monte Carlo as described in [1]_.
Uses a physical analogy of a particle moving across a landscape under
Hamiltonian dynamics to aid efficient exploration of parameter space.
Introduces an auxilary variable -- the momentum (``p_i``) of a particle
moving in dimension ``i`` of negative log posterior space -- which
supplements the position (``q_i``) of the particle in parameter space. The
particle's motion is dictated by solutions to Hamilton's equations,
.. math::
dq_i/dt &= \partial H/\partial p_i\\
dp_i/dt &= - \partial H/\partial q_i.
The Hamiltonian is given by,
.. math::
H(q,p) &= U(q) + KE(p)\\
&= -\text{log}(p(q|X)p(q)) +
mc^2 (\Sigma_{i=1}^{d} p_i^2 / (mc^2) + 1)^{0.5}
where ``d`` is the dimensionality of model, ``m`` is the scalar 'mass'
given to each particle (chosen to be 1 as default) and ``c`` is the
speed of light (chosen to be 10 by default).
To numerically integrate Hamilton's equations, it is essential to use a
sympletic discretisation routine, of which the most typical approach is
the leapfrog method,
.. math::
p_i(t + \epsilon/2) &= p_i(t) - (\epsilon/2) d U(q_i(t))/dq_i\\
q_i(t + \epsilon) &= q_i(t) +
\epsilon M^{-1}(p_i(t + \epsilon/2)) p_i(t + \epsilon/2)\\
p_i(t + \epsilon) &= p_i(t + \epsilon/2) -
(\epsilon/2) d U(q_i(t + \epsilon))/dq_i
where relativistic mass (a scalar) is,
.. math::
M(p) = m (\Sigma_{i=1}^{d} p_i^2 / (mc^2) + 1)^{0.5}
In particular, the algorithm we implement follows eqs. in section 2.1 of
[1]_.
Extends :class:`SingleChainMCMC`.
References
----------
.. [1] "Relativistic Monte Carlo". Xiaoyu Lu, Valerio Perrone,
Leonard Hasenclever, Yee Whye Teh, Sebastian J. Vollmer,
2017, Proceedings of Machine Learning Research.
"""
def __init__(self, x0, sigma0=None):
super(RelativisticMCMC, self).__init__(x0, sigma0)
# Set initial state
self._running = False
self._ready_for_tell = False
# Current point in the Markov chain
self._current = None # Aka current_q in the chapter
self._current_energy = None # Aka U(current_q) = -log_pdf
self._current_gradient = None
self._current_momentum = None # Aka current_p
# Current point in the leapfrog iterations
self._momentum = None # Aka p in the chapter
self._position = None # Aka q in the chapter
self._gradient = None # Aka grad_U(q) in the chapter
# Iterations, acceptance monitoring, and leapfrog iterations
self._mcmc_iteration = 0
self._mcmc_acceptance = 0
self._frog_iteration = 0
# Default number of leapfrog iterations
self._n_frog_iterations = 20
# Default integration step size for leapfrog algorithm
self._epsilon = 0.1
self._step_size = None
self._mass = 1
self._c = 10
self.set_leapfrog_step_size(np.diag(self._sigma0))
# Divergence checking
# Create a vector of divergent iterations
self._divergent = np.asarray([], dtype='int')
# Default threshold for Hamiltonian divergences
# (currently set to match Stan)
self._hamiltonian_threshold = 10**3
def ask(self):
""" See :meth:`SingleChainMCMC.ask()`. """
# Check ask/tell pattern
if self._ready_for_tell:
raise RuntimeError('Ask() called when expecting call to tell().')
# Initialise on first call
if not self._running:
self._running = True
self._mc2 = self._mass * self._c**2
# Notes:
# Ask is responsible for updating the position, which is the point
# returned to the user
# Tell is then responsible for updating the momentum, which uses the
# gradient at this new point
# The MCMC step happens in tell, and does not require any new
# information (it uses the log_pdf and gradient of the final point
# in the leapfrog run).
# Very first iteration
if self._current is None:
# Ask for the pdf and gradient of x0
self._ready_for_tell = True
return np.array(self._x0, copy=True)
# First iteration of a run of leapfrog iterations
if self._frog_iteration == 0:
# Sample random momentum for current point using identity cov
self._current_momentum = np.random.multivariate_normal(
np.zeros(self._n_parameters), np.eye(self._n_parameters))
# First leapfrog position is the current sample in the chain
self._position = np.array(self._current, copy=True)
self._gradient = np.array(self._current_gradient, copy=True)
self._momentum = np.array(self._current_momentum, copy=True)
# Perform a half-step before starting iteration 0 below
self._momentum -= self._scaled_epsilon * self._gradient * 0.5
# Perform a leapfrog step for the position
squared = np.sum(np.array(self._momentum)**2)
relativistic_mass = self._mass * np.sqrt(squared / self._mc2 + 1)
self._position += (
self._scaled_epsilon * self._momentum / relativistic_mass)
# Ask for the pdf and gradient of the current leapfrog position
# Using this, the leapfrog step for the momentum is performed in tell()
self._ready_for_tell = True
return np.array(self._position, copy=True)
def current_log_pdf(self):
""" See :meth:`SingleChainMCMC.current_log_pdf()`. """
return -self._current_energy
def divergent_iterations(self):
"""
Returns the iteration number of any divergent iterations
"""
return self._divergent
def epsilon(self):
"""
Returns epsilon used in leapfrog algorithm
"""
return self._epsilon
def hamiltonian_threshold(self):
"""
Returns threshold difference in Hamiltonian value from one iteration to
next which determines whether an iteration is divergent.
"""
return self._hamiltonian_threshold
def leapfrog_steps(self):
"""
Returns the number of leapfrog steps to carry out for each iteration.
"""
return self._n_frog_iterations
def leapfrog_step_size(self):
"""
Returns the step size for the leapfrog algorithm.
"""
return self._step_size
def _log_init(self, logger):
""" See :meth:`Loggable._log_init()`. """
logger.add_float('Accept.')
def _log_write(self, logger):
""" See :meth:`Loggable._log_write()`. """
logger.log(self._mcmc_acceptance)
def _kinetic_energy(self, momentum):
"""
Kinetic energy of relativistic particle, which is defined in [1]_.
"""
squared = np.sum(np.array(momentum)**2)
return self._mc2 * (squared / self._mc2 + 1)**0.5
def mass(self):
""" Returns ``mass`` which is the rest mass of particle. """
return self._mass
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 4
def name(self):
""" See :meth:`pints.MCMCSampler.name()`. """
return 'Relativistic MCMC'
def needs_sensitivities(self):
""" See :meth:`pints.MCMCSampler.needs_sensitivities()`. """
return True
def scaled_epsilon(self):
"""
Returns scaled epsilon used in leapfrog algorithm
"""
return self._scaled_epsilon
def _set_scaled_epsilon(self):
"""
Rescales epsilon along the dimensions of step_size
"""
self._scaled_epsilon = np.zeros(self._n_parameters)
for i in range(self._n_parameters):
self._scaled_epsilon[i] = self._epsilon * self._step_size[i]
def set_epsilon(self, epsilon):
"""
Sets epsilon for the leapfrog algorithm
"""
epsilon = float(epsilon)
if epsilon <= 0:
raise ValueError('epsilon must be positive for leapfrog algorithm')
self._epsilon = epsilon
self._set_scaled_epsilon()
def set_hamiltonian_threshold(self, hamiltonian_threshold):
"""
Sets threshold difference in Hamiltonian value from one iteration to
next which determines whether an iteration is divergent.
"""
if hamiltonian_threshold < 0:
raise ValueError('Threshold for divergent iterations must be ' +
'non-negative.')
self._hamiltonian_threshold = hamiltonian_threshold
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[leapfrog_steps, leapfrog_step_size,
mass, c]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_leapfrog_steps(x[0])
self.set_leapfrog_step_size(x[1])
self.set_mass(x[2])
self.set_speed_of_light(x[3])
def set_leapfrog_steps(self, steps):
"""
Sets the number of leapfrog steps to carry out for each iteration.
"""
steps = int(steps)
if steps < 1:
raise ValueError('Number of steps must exceed 0.')
self._n_frog_iterations = steps
def set_leapfrog_step_size(self, step_size):
"""
Sets the step size for the leapfrog algorithm.
"""
a = np.atleast_1d(step_size)
if len(a[a < 0]) > 0:
raise ValueError(
'Step size for leapfrog algorithm must' +
'be greater than zero.'
)
if len(a) == 1:
step_size = np.repeat(step_size, self._n_parameters)
elif not len(step_size) == self._n_parameters:
raise ValueError(
'Step size should either be of length 1 or equal to the' +
'number of parameters'
)
self._step_size = step_size
self._set_scaled_epsilon()
def set_mass(self, mass):
""" Sets scalar mass. """
if isinstance(mass, list):
raise ValueError('Mass must be scalar.')
if mass <= 0:
raise ValueError('Mass must be positive.')
self._mass = mass
def set_speed_of_light(self, c):
""" Sets `speed of light`. """
if c <= 0:
raise ValueError('Speed of light must be positive.')
self._c = c
def speed_of_light(self):
""" Returns `speed of light`. """
return self._c
def tell(self, reply):
""" See :meth:`pints.SingleChainMCMC.tell()`. """
if not self._ready_for_tell:
raise RuntimeError('Tell called before proposal was set.')
self._ready_for_tell = False
# Unpack reply
energy, gradient = reply
# Check reply, copy gradient
energy = float(energy)
gradient = pints.vector(gradient)
assert(gradient.shape == (self._n_parameters, ))
# Energy = -log_pdf, so flip both signs!
energy = -energy
gradient = -gradient
# Very first call
if self._current is None:
# Check first point is somewhere sensible
if not np.isfinite(energy):
raise ValueError(
'Initial point for MCMC must have finite logpdf.')
# Set current sample, energy, and gradient
self._current = self._x0
self._current_energy = energy
self._current_gradient = gradient
# Increase iteration count
self._mcmc_iteration += 1
# Mark current as read-only, so it can be safely returned
self._current.setflags(write=False)
# Return first point in chain
return self._current
# Set gradient of current leapfrog position
self._gradient = gradient
# Update the leapfrog iteration count
self._frog_iteration += 1
# Not the last iteration? Then perform a leapfrog step and return
if self._frog_iteration < self._n_frog_iterations:
self._momentum -= self._scaled_epsilon * self._gradient
# Return None to indicate there is no new sample for the chain
return None
# Final leapfrog iteration: only do half a step
self._momentum -= self._scaled_epsilon * self._gradient * 0.5
# Before starting accept/reject procedure, check if the leapfrog
# procedure has led to a finite momentum and logpdf. If not, reject.
accept = 0
if np.isfinite(energy) and np.all(np.isfinite(self._momentum)):
# Evaluate potential and kinetic energies at start and end of
# leapfrog trajectory
current_U = self._current_energy
current_K = self._kinetic_energy(self._current_momentum)
proposed_U = energy
proposed_K = self._kinetic_energy(self._momentum)
# Check for divergent iterations by testing whether the
# Hamiltonian difference is above a threshold
div = proposed_U + proposed_K - (self._current_energy + current_K)
if np.abs(div) > self._hamiltonian_threshold: # pragma: no cover
self._divergent = np.append(
self._divergent, self._mcmc_iteration)
self._momentum = self._position = self._gradient = None
self._frog_iteration = 0
# Update MCMC iteration count
self._mcmc_iteration += 1
# Update acceptance rate (only used for output!)
self._mcmc_acceptance = (
(self._mcmc_iteration * self._mcmc_acceptance + accept) /
(self._mcmc_iteration + 1))
self._current.setflags(write=False)
return self._current
# Accept/reject
else:
r = np.exp(current_U - proposed_U + current_K - proposed_K)
if np.random.uniform(0, 1) < r:
accept = 1
self._current = self._position
self._current_energy = energy
self._current_gradient = gradient
# Mark current as read-only, so it can be safely returned
self._current.setflags(write=False)
# Reset leapfrog mechanism
self._momentum = self._position = self._gradient = None
self._frog_iteration = 0
# Update MCMC iteration count
self._mcmc_iteration += 1
# Update acceptance rate (only used for output!)
self._mcmc_acceptance = (
(self._mcmc_iteration * self._mcmc_acceptance + accept) /
(self._mcmc_iteration + 1))
# Return current position as next sample in the chain
return self._current
| bsd-3-clause |
miptliot/edx-platform | common/djangoapps/util/json_request.py | 24 | 3842 | import decimal
import json
from functools import wraps
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.http import HttpResponse, HttpResponseBadRequest
class EDXJSONEncoder(DjangoJSONEncoder):
"""
Encoder for Decimal object, other objects will be encoded as per DjangoJSONEncoder default implementation.
NOTE:
Please see https://docs.djangoproject.com/en/1.8/releases/1.5/#system-version-of-simplejson-no-longer-used
DjangoJSONEncoder will now use the Python's json module but Python's json module don't know about how to
encode Decimal object, so as per default implementation Decimal objects will be encoded to `str` which we don't
want and also this is different from Django 1.4, In Django 1.4 if Decimal object has zeros after the decimal
point then object will be serialized as `int` else `float`, so we are keeping this behavior.
"""
def default(self, o): # pylint: disable=method-hidden
"""
Encode Decimal objects. If decimal object has zeros after the
decimal point then object will be serialized as `int` else `float`
"""
if isinstance(o, decimal.Decimal):
if o == o.to_integral():
return int(o)
return float(o)
else:
return super(EDXJSONEncoder, self).default(o)
def expect_json(view_function):
"""
View decorator for simplifying handing of requests that expect json. If the request's
CONTENT_TYPE is application/json, parses the json dict from request.body, and updates
request.POST with the contents.
"""
@wraps(view_function)
def parse_json_into_request(request, *args, **kwargs):
# cdodge: fix postback errors in CMS. The POST 'content-type' header can include additional information
# e.g. 'charset', so we can't do a direct string compare
if "application/json" in request.META.get('CONTENT_TYPE', '') and request.body:
try:
request.json = json.loads(request.body)
except ValueError:
return JsonResponseBadRequest({"error": "Invalid JSON"})
else:
request.json = {}
return view_function(request, *args, **kwargs)
return parse_json_into_request
class JsonResponse(HttpResponse):
"""
Django HttpResponse subclass that has sensible defaults for outputting JSON.
"""
def __init__(self, resp_obj=None, status=None, encoder=EDXJSONEncoder,
*args, **kwargs):
if resp_obj in (None, ""):
content = ""
status = status or 204
elif isinstance(resp_obj, QuerySet):
content = serialize('json', resp_obj)
else:
content = json.dumps(resp_obj, cls=encoder, indent=2, ensure_ascii=True)
kwargs.setdefault("content_type", "application/json")
if status:
kwargs["status"] = status
super(JsonResponse, self).__init__(content, *args, **kwargs)
class JsonResponseBadRequest(HttpResponseBadRequest):
"""
Subclass of HttpResponseBadRequest that defaults to outputting JSON.
Use this to send BadRequestResponse & some Json object along with it.
Defaults:
dictionary: empty dictionary
status: 400
encoder: DjangoJSONEncoder
"""
def __init__(self, obj=None, status=400, encoder=DjangoJSONEncoder, *args, **kwargs):
if obj in (None, ""):
content = ""
else:
content = json.dumps(obj, cls=encoder, indent=2, ensure_ascii=False)
kwargs.setdefault("content_type", "application/json")
kwargs["status"] = status
super(JsonResponseBadRequest, self).__init__(content, *args, **kwargs)
| agpl-3.0 |
hargup/sympy | sympy/polys/tests/test_injections.py | 126 | 1795 | """Tests for functions that inject symbols into the global namespace. """
from sympy.polys.rings import vring
from sympy.polys.fields import vfield
from sympy.polys.domains import QQ
from sympy.utilities.pytest import raises
# make r1 with call-depth = 1
def _make_r1():
return vring("r1", QQ)
# make r2 with call-depth = 2
def __make_r2():
return vring("r2", QQ)
def _make_r2():
return __make_r2()
def test_vring():
R = vring("r", QQ)
assert r == R.gens[0]
R = vring("rb rbb rcc rzz _rx", QQ)
assert rb == R.gens[0]
assert rbb == R.gens[1]
assert rcc == R.gens[2]
assert rzz == R.gens[3]
assert _rx == R.gens[4]
R = vring(['rd', 're', 'rfg'], QQ)
assert rd == R.gens[0]
assert re == R.gens[1]
assert rfg == R.gens[2]
# see if vring() really injects into global namespace
raises(NameError, lambda: r1)
R = _make_r1()
assert r1 == R.gens[0]
raises(NameError, lambda: r2)
R = _make_r2()
assert r2 == R.gens[0]
# make f1 with call-depth = 1
def _make_f1():
return vfield("f1", QQ)
# make f2 with call-depth = 2
def __make_f2():
return vfield("f2", QQ)
def _make_f2():
return __make_f2()
def test_vfield():
F = vfield("f", QQ)
assert f == F.gens[0]
F = vfield("fb fbb fcc fzz _fx", QQ)
assert fb == F.gens[0]
assert fbb == F.gens[1]
assert fcc == F.gens[2]
assert fzz == F.gens[3]
assert _fx == F.gens[4]
F = vfield(['fd', 'fe', 'ffg'], QQ)
assert fd == F.gens[0]
assert fe == F.gens[1]
assert ffg == F.gens[2]
# see if vfield() really injects into global namespace
raises(NameError, lambda: f1)
F = _make_f1()
assert f1 == F.gens[0]
raises(NameError, lambda: f2)
F = _make_f2()
assert f2 == F.gens[0]
| bsd-3-clause |
QijunPan/ansible | lib/ansible/inventory/script.py | 26 | 7096 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import subprocess
import sys
from collections import Mapping
from ansible.compat.six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible.module_utils._text import to_native, to_text
class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
if groups is None:
groups = dict()
self._loader = loader
self.groups = groups
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
self.filename = os.path.abspath(filename)
cmd = [ self.filename, "--list" ]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
# make sure script output is unicode so that json loader will output
# unicode strings itself
try:
self.data = to_text(stdout, errors="strict")
except Exception as e:
raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_native(self.filename), to_native(e)))
# see comment about _meta below
self.host_vars_from_top = None
self._parse(stderr)
def _parse(self, err):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
try:
self.raw = self._loader.load(self.data)
except Exception as e:
sys.stderr.write(to_native(err) + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_native(self.filename), to_native(e)))
if not isinstance(self.raw, Mapping):
sys.stderr.write(to_native(err) + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_native(self.filename)))
group = None
for (group_name, data) in self.raw.items():
# in Ansible 1.3 and later, a "_meta" subelement may contain
# a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each
# host. This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
if group_name == '_meta':
if 'hostvars' in data:
self.host_vars_from_top = data['hostvars']
continue
if group_name not in self.groups:
group = self.groups[group_name] = Group(group_name)
group = self.groups[group_name]
host = None
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts','vars','children')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
if hostname not in all_hosts:
all_hosts[hostname] = Host(hostname)
host = all_hosts[hostname]
group.add_host(host)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in iteritems(data['vars']):
group.set_variable(k, v)
# Separate loop to ensure all groups are defined
for (group_name, data) in self.raw.items():
if group_name == '_meta':
continue
if isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
if child_name in self.groups:
self.groups[group_name].add_child_group(self.groups[child_name])
# Finally, add all top-level groups as children of 'all'.
# We exclude ungrouped here because it was already added as a child of
# 'all' at the time it was created.
for group in self.groups.values():
if group.depth == 0 and group.name not in ('all', 'ungrouped'):
self.groups['all'].add_child_group(group)
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
if self.host_vars_from_top is not None:
try:
got = self.host_vars_from_top.get(host.name, {})
except AttributeError as e:
raise AnsibleError("Improperly formatted host information for %s: %s" % (host.name,to_native(e)))
return got
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
| gpl-3.0 |
iAmMrinal0/CouchPotatoServer | libs/suds/wsse.py | 195 | 5981 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{wsse} module provides WS-Security.
"""
from logging import getLogger
from suds import *
from suds.sudsobject import Object
from suds.sax.element import Element
from suds.sax.date import UTC
from datetime import datetime, timedelta
try:
from hashlib import md5
except ImportError:
# Python 2.4 compatibility
from md5 import md5
dsns = \
('ds',
'http://www.w3.org/2000/09/xmldsig#')
wssens = \
('wsse',
'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd')
wsuns = \
('wsu',
'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd')
wsencns = \
('wsenc',
'http://www.w3.org/2001/04/xmlenc#')
class Security(Object):
"""
WS-Security object.
@ivar tokens: A list of security tokens
@type tokens: [L{Token},...]
@ivar signatures: A list of signatures.
@type signatures: TBD
@ivar references: A list of references.
@type references: TBD
@ivar keys: A list of encryption keys.
@type keys: TBD
"""
def __init__(self):
""" """
Object.__init__(self)
self.mustUnderstand = True
self.tokens = []
self.signatures = []
self.references = []
self.keys = []
def xml(self):
"""
Get xml representation of the object.
@return: The root node.
@rtype: L{Element}
"""
root = Element('Security', ns=wssens)
root.set('mustUnderstand', str(self.mustUnderstand).lower())
for t in self.tokens:
root.append(t.xml())
return root
class Token(Object):
""" I{Abstract} security token. """
@classmethod
def now(cls):
return datetime.now()
@classmethod
def utc(cls):
return datetime.utcnow()
@classmethod
def sysdate(cls):
utc = UTC()
return str(utc)
def __init__(self):
Object.__init__(self)
class UsernameToken(Token):
"""
Represents a basic I{UsernameToken} WS-Secuirty token.
@ivar username: A username.
@type username: str
@ivar password: A password.
@type password: str
@ivar nonce: A set of bytes to prevent reply attacks.
@type nonce: str
@ivar created: The token created.
@type created: L{datetime}
"""
def __init__(self, username=None, password=None):
"""
@param username: A username.
@type username: str
@param password: A password.
@type password: str
"""
Token.__init__(self)
self.username = username
self.password = password
self.nonce = None
self.created = None
def setnonce(self, text=None):
"""
Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str
"""
if text is None:
s = []
s.append(self.username)
s.append(self.password)
s.append(Token.sysdate())
m = md5()
m.update(':'.join(s))
self.nonce = m.hexdigest()
else:
self.nonce = text
def setcreated(self, dt=None):
"""
Set I{created}.
@param dt: The created date & time.
Set as datetime.utc() when I{None}.
@type dt: L{datetime}
"""
if dt is None:
self.created = Token.utc()
else:
self.created = dt
def xml(self):
"""
Get xml representation of the object.
@return: The root node.
@rtype: L{Element}
"""
root = Element('UsernameToken', ns=wssens)
u = Element('Username', ns=wssens)
u.setText(self.username)
root.append(u)
p = Element('Password', ns=wssens)
p.setText(self.password)
root.append(p)
if self.nonce is not None:
n = Element('Nonce', ns=wssens)
n.setText(self.nonce)
root.append(n)
if self.created is not None:
n = Element('Created', ns=wsuns)
n.setText(str(UTC(self.created)))
root.append(n)
return root
class Timestamp(Token):
"""
Represents the I{Timestamp} WS-Secuirty token.
@ivar created: The token created.
@type created: L{datetime}
@ivar expires: The token expires.
@type expires: L{datetime}
"""
def __init__(self, validity=90):
"""
@param validity: The time in seconds.
@type validity: int
"""
Token.__init__(self)
self.created = Token.utc()
self.expires = self.created + timedelta(seconds=validity)
def xml(self):
root = Element("Timestamp", ns=wsuns)
created = Element('Created', ns=wsuns)
created.setText(str(UTC(self.created)))
expires = Element('Expires', ns=wsuns)
expires.setText(str(UTC(self.expires)))
root.append(created)
root.append(expires)
return root | gpl-3.0 |
ukanga/SickRage | lib/bs4/builder/_html5lib.py | 39 | 12788 | __all__ = [
'HTML5TreeBuilder',
]
from pdb import set_trace
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
NamespacedAttribute,
whitespace_re,
)
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
NAME = "html5lib"
features = [NAME, PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding,
document_declared_encoding=None, exclude_encodings=None):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
# document_declared_encoding and exclude_encodings aren't used
# ATM because the html5lib TreeBuilder doesn't use
# UnicodeDammit.
if exclude_encodings:
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
# If this attribute is a multi-valued attribute for this element,
# turn its value into a list.
list_attr = HTML5TreeBuilder.cdata_list_attributes
if (name in list_attr['*']
or (self.element.name in list_attr
and name in list_attr[self.element.name])):
# A node that is being cloned may have already undergone
# this procedure.
if not isinstance(value, list):
value = whitespace_re.split(value)
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
elif self.element.next_element is not None:
# Something from further ahead in the parse tree is
# being inserted into this earlier element. This is
# very annoying because it means an expensive search
# for the last element in the tree.
most_recent_element = self.soup._last_descendant()
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
# print "MOVE", self.element.contents
# print "FROM", self.element
# print "TO", new_parent.element
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent_element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
if new_parents_last_descendant:
first_child.previous_element = new_parents_last_descendant
else:
first_child.previous_element = new_parent_element
first_child.previous_sibling = new_parents_last_child
if new_parents_last_descendant:
new_parents_last_descendant.next_element = first_child
else:
new_parent_element.next_element = first_child
if new_parents_last_child:
new_parents_last_child.next_sibling = first_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
if new_parents_last_descendant_next_element:
new_parents_last_descendant_next_element.previous_element = last_child
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
# print "DONE WITH MOVE"
# print "FROM", self.element
# print "TO", new_parent_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| gpl-3.0 |
luisgg/iteexe | nevow/compyCompat.py | 14 | 2955 | # Copyright (c) 2004 Divmod.
# See LICENSE for details.
"""Compatibility wrapper over new twisted.python.components,
so that nevow works with it. This module shadows nevow.compy
when using the new Twisted's Zope components system.
"""
import warnings
from twisted.python.components import *
warnings.filterwarnings('ignore', category=ComponentsDeprecationWarning)
from twisted.python.reflect import namedAny as _namedAny
CannotAdapt = TypeError
class _NamedAnyError(Exception):
'Internal error for when importing fails.'
_registerAdapter = registerAdapter
def registerAdapter(adapterFactory, origInterface, *interfaceClasses):
class RandomClazz(object):
pass
def namedAny(name):
if name == '__builtin__.function':
name='types.FunctionType'
elif name == '__builtin__.method':
return RandomClazz # Hack
elif name == '__builtin__.instancemethod':
name='types.MethodType'
elif name == '__builtin__.NoneType':
name='types.NoneType'
elif name == '__builtin__.generator':
name='types.GeneratorType'
try:
return _namedAny(name)
except (AttributeError, ImportError):
raise _NamedAnyError("Name %s not found." % name)
isStr = type(adapterFactory) is str
if (type(origInterface) is str) != isStr:
raise ValueError("Either all arguments must be strings or all must be objects.")
for interfaceClass in interfaceClasses:
if (type(interfaceClass) is str) != isStr:
raise ValueError("Either all arguments must be strings or all must be objects.")
if isStr:
try:
# print "registerAdapter:",adapterFactory, origInterface, interfaceClasses
adapterFactory = namedAny(adapterFactory)
origInterface = namedAny(origInterface)
interfaceClasses = [namedAny(x) for x in interfaceClasses]
except _NamedAnyError, nae:
print 'NamedAnyError:', nae
return
# print "_registerAdapter:",adapterFactory, origInterface, interfaceClasses
if 'nevow.inevow.ISerializable' in interfaceClasses or filter(
lambda o: getattr(o, '__name__', None) == 'ISerializable', interfaceClasses):
warnings.warn("ISerializable is deprecated. Please use nevow.flat.registerFlattener instead.", stacklevel=2)
from nevow import flat
flat.registerFlattener(adapterFactory, origInterface)
_registerAdapter(adapterFactory, origInterface, *interfaceClasses)
class IComponentized(Interface):
pass
_Componentized = Componentized
class Componentized(_Componentized):
__implements__ = (IComponentized,)
def __init__(self, adapterCache=None):
_Componentized.__init__(self)
if adapterCache:
for k, v in adapterCache.items():
self.setComponent(k, v)
from zope.interface import implements as newImplements
| gpl-2.0 |
cgar/servo | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-ruby-tests.py | 829 | 3042 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-ruby-001 ~ 004 which tests
emphasis marks with ruby in four directions. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-ruby-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="emphasis marks are drawn outside the ruby">
<link rel="match" href="text-emphasis-ruby-{index:03}-ref.html">
<p>Pass if the emphasis marks are outside the ruby:</p>
<div style="line-height: 5; writing-mode: {wm}; ruby-position: {ruby_pos}; text-emphasis-position: {posval}">ルビ<span style="text-emphasis: circle">と<ruby>圏<rt>けん</rt>点<rt>てん</rt></ruby>を</span>同時</div>
'''
REF_FILE = 'text-emphasis-ruby-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rtc {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are outside the ruby:</p>
<div style="line-height: 5; writing-mode: {wm}; ruby-position: {posval}">ルビ<ruby>と<rtc>●</rtc>圏<rt>けん</rt><rtc>●</rtc>点<rt>てん</rt><rtc>●</rtc>を<rtc>●</rtc></ruby>同時</div>
'''
TEST_CASES = [
('top', 'horizontal-tb', 'over', [
('horizontal-tb', 'over right')]),
('bottom', 'horizontal-tb', 'under', [
('horizontal-tb', 'under right')]),
('right', 'vertical-rl', 'over', [
('vertical-rl', 'over right'),
('vertical-lr', 'over right')]),
('left', 'vertical-rl', 'under', [
('vertical-rl', 'over left'),
('vertical-lr', 'over left')]),
]
SUFFIXES = ['', 'a']
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for pos, ref_wm, ruby_pos, subtests in TEST_CASES:
idx += 1
ref_file = REF_FILE.format(idx)
ref_content = REF_TEMPLATE.format(pos=pos, wm=ref_wm, posval=ruby_pos)
write_file(ref_file, ref_content)
suffix = iter(SUFFIXES)
for wm, posval in subtests:
test_file = TEST_FILE.format(idx, next(suffix))
test_content = TEST_TEMPLATE.format(
wm=wm, pos=pos, index=idx, ruby_pos=ruby_pos, posval=posval)
write_file(test_file, test_content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
| mpl-2.0 |
Pantynopants/pyGraph | algorithms_edgearr.py | 1 | 13630 | # -*- coding=utf-8 -*-
from models import *
import utils
import copy
from heapq import heappop, heappush
"""
some algorithms below using models.EdgesetArray
"""
def TopoSort(DGlist):
"""
can not act as a visitor to visit this nodes, to get circle path,
because it has been confirmed: computer cannot judge the dead loop,
by Turing
result may different since the start point is not same every time
Parameters: sequent route, with directed; matrix
list, dataframe
Returns: circle of route
list
"""
# DGlist = list(set(DGlist))
# route_martix = graph.loc[DGlist, DGlist]
# route_martix = route_martix.copy()
edarray = EdgesetArray(v = list(set(DGlist)) )
edarray = edarray.route_to_edgeSetArray(DGlist)
# temp = edarray.route_to_edgeSetArray(DGlist).get_indegrees()
# edge_matrix = edarray.get_all_edges().copy()
indegree_list = edarray.get_indegrees()
print(indegree_list)
stack = []
for k,v in zip(list(indegree_list.columns.values), indegree_list.values[0]):
if v == 0:
stack.append(k)
if len(stack) == 0:
stack.append(DGlist[0]) # init start point
del_list = []
while stack: #O(v*e)
del_poi = stack.pop()
# print("del" + del_poi) # can ont delete twice
if del_poi in del_list:
continue
del_list.append(del_poi)
edarray = edarray.del_vertex(del_poi)
indegree_list = edarray.get_indegrees()
# map(list,zip(*indegree_list.values))
for k,v in zip(list(indegree_list.columns.values), indegree_list.values[0]):
if v == 0:
stack.append(k)
# TODO using hash instead of for loop
print("circle road is")
print(indegree_list)
circle_path = []
for i in indegree_list.columns.values:
circle_path.append(i)
return circle_path
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('ALGraph')
def kruskal(graph):
"""
an algorithm for gengrate MST
time complexity O(E * log2 E)
Parameters:
EdgesetArray
Returns:
set( {()} )
example return
>>> minimum_spanning_tree = set([
('A', 'B', 1),
('B', 'D', 2),
('C', 'D', 1),
])
"""
parent = dict()
rank = dict()
def make_set(vertice):
parent[vertice] = vertice
rank[vertice] = 0
def find(vertice):
if parent[vertice] != vertice:
parent[vertice] = find(parent[vertice])
return parent[vertice]
def union(vertice1, vertice2):
root1 = find(vertice1)
root2 = find(vertice2)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
if rank[root1] == rank[root2]: rank[root2] += 1
for vertice in graph.get_all_vertexes():
make_set(vertice)
minimum_spanning_tree = set()
newgraph = copy.deepcopy(graph)
newgraph = newgraph.sort_weight([u"weight"])
edges = newgraph.as_matrix()
for edge in edges:
vertice1, vertice2, weight = edge
if find(vertice1) != find(vertice2):
union(vertice1, vertice2)
minimum_spanning_tree.add((vertice1, vertice2, weight))
return minimum_spanning_tree
"""
some algorithms below using models.ALGraph
"""
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('EdgesetArray')
def prim(graph):
"""an algorithm for gengrate MST
O(v^2)
Parameters
------
ALGraph
Returns
-------
dict
simple input:
>>>graph = {
... 0: {1:1, 2:3, 3:4},
... 1: {0:1, 2:5},
... 2: {0:3, 1:5, 3:2},
... 3: {2:2, 0:4}
... }
simple output:
>>> {0: None, 1: 0, 2: 0, 3: 2}
---
"""
start = graph.keys()[0]
result, Q = {}, [(0, None, start)]
while Q:
_, p, u = heappop(Q)
if u in result: continue
result[u] = p
for v, w in graph[u].items():
#weight, predecessor node, node
heappush(Q, (w, u, v))
return result
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('EdgesetArray')
def kruskal_ALGraph(graph):
"""an algorithm for gengrate MST
time complexity O(E * log2 E)
Parameters:
ALGraph
Returns:
set
simple input:
>>> graph = {
... 0: {1:1, 2:3, 3:4},
... 1: {2:5},
... 2: {3:2},
... 3: set()
... }
output:
>>> print list(kruskal_ALGraph(graph))
... #[(0, 1), (2, 3), (0, 2)]
"""
def find(parent, u):
if parent[u] != u:
parent[u] = find(parent, parent[u]) # Path compression
return parent[u]
def union(parent, rank, u, v):
u, v = find(parent, u), find(parent, v)
if rank[u] > rank[v]: # Union by rank
parent[v] = u
else:
parent[u] = v
if rank[u] == rank[v]: # Move v up a level
rank[v] += 1
E = [(graph[u][v],u,v) for u in graph for v in graph[u]] # (weight, start, end)
result = set()
parent, rank = {u:u for u in graph}, {u:0 for u in graph} # comp. reps and ranks
for _, u, v in sorted(E):
if find(parent, u) != find(parent, v):
result.add((u, v))
union(parent, rank, u, v)
return result
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('EdgesetArray')
def bellman_ford(graph, start = None):
"""
also work in negative weight
this is the advantage compired with dijkstra
time complexity O(|V|*|E|)O(|V|*|E|)
Parameters
------
graph:ALGraph
start_poi: unicode
Returns
-------
dict, dict
simple use:
>>> s, t, x, y, z = range(5)
>>> W = {
s: {t:10, y:5},
t: {x:1, y:2},
x: {z:4},
y: {t:3, x:9, z:2},
z: {x:6, s:7}
}
>>> D, P = bellman_ford(W, s)
>>> print [D[v] for v in [s, t, x, y, z]] # [0, 2, 4, 7, -2]
>>> print s not in P # True
>>> print [P[v] for v in [t, x, y, z]] == [x, y, s, t] # True
"""
if start == None:
start = graph.keys()[0]
distance, path = {start:0}, {}
for rnd in graph:
changed = False
for from_node in graph:
for to_node in graph[from_node]:
if relax(graph, from_node, to_node, distance, path):
changed = True
if not changed: break
else:
raise ValueError('negative cycle')
return distance, path
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('EdgesetArray')
def dijkstra(graph, start = None):
"""get 1 point to others points' shortest path
time complexity O(n^2)
Parameters:
ALGraph, unicode(start point)
Returns:
dict, dict
simple use
>>> s, t, x, y, z = range(5)
>>> W = {
>>> s: {t:10, y:5},
>>> t: {x:1, y:2},
>>> x: {z:4},
>>> y: {t:3, x:9, z:2},
>>> z: {x:6, s:7}
>>> }
>>> D, P = dijkstra(W, start)
>>> print [D[v] for v in [s, t, x, y, z]] # [0, 8, 9, 5, 7]
>>> print s not in P # True
>>> print [P[v] for v in [t, x, y, z]] == [y, t, s, y] # True
"""
if start == None:
start = graph.keys()[0]
distance, path, Q, S = {start:0}, {}, [(0,start)], set()
while Q:
_, u = heappop(Q) # Node with lowest estimate
if u in S: continue # Already visited
S.add(u)
if u not in graph:
continue # visited it
for v in graph[u]: # Go through all its neighbors
relax(graph, u, v, distance, path) # Relax the out-edge
heappush(Q, (distance[v], v))
return distance, path
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('EdgesetArray')
def johnson(graph):
"""
johnson: combine Bellman-Ford with Dijkstra
solve all point's short path in graph
perform well in sparse graph
time complexity : O(V * E * lg(V))
Parameters
-----
graph:ALGraph
Returns
--------
dict{dict{}}, dict{dict{}}
simple use:
>>> a, b, c, d, e = range(5)
>>> W = {
a: {c:1, d:7},
b: {a:4},
c: {b:-5, e:2},
d: {c:6},
e: {a:3, b:8, d:-4}
}
>>> D, P = johnson(W)
>>> print [D[a][v] for v in [a, b, c, d, e]] # [0, -4, 1, -1, 3]
>>> print [D[b][v] for v in [a, b, c, d, e]] # [4, 0, 5, 3, 7]
>>> print [D[c][v] for v in [a, b, c, d, e]] # [-1, -5, 0, -2, 2]
>>> print [D[d][v] for v in [a, b, c, d, e]] # [5, 1, 6, 0, 8]
>>> print [D[e][v] for v in [a, b, c, d, e]] # [1, -3, 2, -4, 0]
"""
graph = copy.deepcopy(graph)
s = graph.keys()[0]
print("#"*40)
print(s)
graph[s] = {v:0 for v in graph}
h, _ = bellman_ford(graph, s) # h[v]: Shortest dist from s
del graph[s]
for u in graph: # The weigraphht from u...
for v in graph[u]: # from u to v...
graph[u][v] += h[u] - h[v]
distance, path = {}, {}
for u in graph:
distance[u], path[u] = dijkstra(graph, u) # ... find the shortest paths
for v in graph:
distance[u][v] += h[v] - h[u] # ... readjust the distance
return distance, path
@utils.not_implemented_for('DataFrame')
@utils.not_implemented_for('EdgesetArray')
def floyd_warshall1(graph):
"""solve all point's shortest path
O(|V|^3)
get shortest path of all vertex
Parameters
------
graph: ALGraph
Returns
-------
dict{dict{}}
example:
>>> a, b, c, d, e = range(1,6) # One-based
>>> W = {
>>> a: {c:1, d:7},
>>> b: {a:4},
>>> c: {b:-5, e:2},
>>> d: {c:6},
>>> e: {a:3, b:8, d:-4}
>>> }
>>> for u in W:
>>> for v in W:
>>> if u == v: W[u][v] = 0
>>> if v not in W[u]: W[u][v] = utils.INF
>>> D = floyd_warshall1(W)
>>> print [D[a][v] for v in [a, b, c, d, e]] # [0, -4, 1, -1, 3]
>>> print [D[b][v] for v in [a, b, c, d, e]] # [4, 0, 5, 3, 7]
>>> print [D[c][v] for v in [a, b, c, d, e]] # [-1, -5, 0, -2, 2]
>>> print [D[d][v] for v in [a, b, c, d, e]] # [5, 1, 6, 0, 8]
>>> print [D[e][v] for v in [a, b, c, d, e]] # [1, -3, 2, -4, 0]
"""
distance = copy.deepcopy(graph)
for k in distance:
for u in distance:
for v in distance:
a, b = 0,0
try:
a = distance[u][v]
except :
a = utils.INF
try:
b = distance[u][k] + distance[k][v]
except :
b = utils.INF
else:
utils.add_dict(distance, u, v, min(a,b))
return distance
def DFSTraverse(graph, start = None):
"""
Parameters
-----
ALGraph,str/int
Returns
------
list
>>> graph = {'A': set(['B', 'C']),
>>> 'B': set(['A', 'D', 'E']),
>>> 'C': set(['A', 'F']),
>>> 'D': set(['B']),
>>> 'E': set(['B', 'F']),
>>> 'F': set(['C', 'E'])}
ref
-----
.. [1] http://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/
"""
if start == None:
start = graph.keys()[0]
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(set(graph[vertex].keys()) - visited)
return visited
def BFSTraverse(graph, start = None):
if start == None:
start = graph.keys()[0]
visited, queue = set(), [start]
while queue:
vertex = queue.pop(0)
if vertex not in visited:
visited.add(vertex)
queue.extend(set(graph[vertex].keys()) - visited)
return visited
################# helper func #################
def relax(W, u, v, D, P):
"""
reference
----------
.. [1] http://python.jobbole.com/81467/
"""
# print (W[u][v].encode('utf-8'))
# print type(D.get(u, utils.INF))
d = D.get(u, utils.INF) + int(W[u][v])
if d < D.get(v, utils.INF):
D[v], P[v] = d, u
return True | mit |
flotre/sickbeard-vfvo | bs4/tests/test_tree.py | 77 | 65254 | # -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
CData,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup(u'<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(text=u'Räksmörgås'), u'Räksmörgås')
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(text="bar"), [u"bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
[u"Foo", u"bar", u'\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
[u"Foo", u"bar", u'\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self):
soup = self.soup("<a></a>")
# Create a self-referential list.
l = []
l.append(l)
# Without special code in _normalize_search_value, this would cause infinite
# recursion.
self.assertEqual([], soup.find_all(l))
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_name_and_text(self):
self.assertSelects(
self.tree.find_all('a', text='First tag.'), ['First tag.'])
self.assertSelects(
self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.'])
self.assertSelects(
self.tree.find_all('a', text=re.compile("tag")),
['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_utf8_attribute_value(self):
peace = u"םולש".encode("utf8")
data = u'<a title="םולש"></a>'.encode("utf8")
soup = self.soup(data)
self.assertEqual([soup.a], soup.find_all(title=peace))
self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8")))
self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"]))
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
# Passing in the class_ keyword argument will search against
# the 'class' attribute.
self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.'])
self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.'])
# Passing in a string to 'attrs' will also search the CSS class.
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
f = tree.find_all("gar", class_=re.compile("o"))
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", class_=re.compile("a"))
self.assertSelects(f, ["Found it"])
# Since the class is not the string "foo bar", but the two
# strings "foo" and "bar", this will not find anything.
f = tree.find_all("gar", class_=re.compile("o b"))
self.assertSelects(f, [])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the class as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", class_="foo bar"))
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_atribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<div>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</div>""")
div = tree.div
for i, element in enumerate(div.contents):
self.assertEqual(i, div.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_unwrap_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.unwrap()
self.assertEqual(a, new_a)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_append_child_thats_already_at_the_end(self):
data = "<a><b></b></a>"
soup = self.soup(data)
soup.a.append(soup.b)
self.assertEqual(data, soup.decode())
def test_move_tag_to_beginning_of_parent(self):
data = "<a><b></b><c></c><d></d></a>"
soup = self.soup(data)
soup.a.insert(0, soup.d)
self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode())
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_exception_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(NotImplementedError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(NotImplementedError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_replace_first_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.b.replace_with(soup.c)
self.assertEqual("<a><c></c></a>", soup.decode())
def test_replace_last_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.c.replace_with(soup.b)
self.assertEqual("<a><b></b></a>", soup.decode())
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_unwrap(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.unwrap()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_wrap(self):
soup = self.soup("I wish I was bold.")
value = soup.string.wrap(soup.new_tag("b"))
self.assertEqual(value.decode(), "<b>I wish I was bold.</b>")
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_extracts_tag_from_elsewhere(self):
soup = self.soup("<b></b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_puts_new_contents_at_the_end(self):
soup = self.soup("<b>I like being bold.</b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(2, len(soup.b.contents))
self.assertEqual(
soup.decode(), self.document_for(
"<b>I like being bold.I wish I was bold.</b>"))
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertFalse(hasattr(em, "contents"))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
def test_string_set_does_not_affect_original_string(self):
soup = self.soup("<a><b>foo</b><c>bar</c>")
soup.b.string = soup.c.string
self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>")
def test_set_string_preserves_class_of_string(self):
soup = self.soup("<a></a>")
cdata = CData("foo")
soup.a.string = cdata
self.assertTrue(isinstance(soup.a.string, CData))
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = u"<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for(u"<b><FOO></b><b>BAR</b>"))
def test_formatter_is_run_on_attribute_values(self):
markup = u'<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_minimal, a.decode())
self.assertEqual(expect_minimal, a.decode(formatter="minimal"))
expect_html = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_html, a.decode(formatter="html"))
self.assertEqual(markup, a.decode(formatter=None))
expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>'
self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper()))
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>")
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(unicode, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
u"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
def test_decode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents())
def test_encode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents(
encoding="utf8"))
def test_deprecated_renderContents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
self.assertEqual(
b"<![CDATA[<><><>]]>", soup.encode(formatter=increment))
self.assertEqual(1, self.count)
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<div id="main">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
</span>
</div>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML)
def assertSelects(self, selector, expected_ids):
el_ids = [el['id'] for el in self.soup.select(selector)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, [u'The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 3)
for div in els:
self.assertEqual(div.name, 'div')
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['main', 'inner', 'footer'])
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertEqual(len(self.soup.select('tag%t')), 0)
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse(els[0].has_key('class'))
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']),
('div[id$="1"]', []),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('a[href*="http://"]', ['bob', 'me']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']),
('div[id*="1"]', []),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
)
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner'])
| gpl-3.0 |
Foxugly/MyTaxAccountant | fileupload/response.py | 1 | 1480 | # encoding: utf-8
#
# Copyright 2015, Foxugly. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
from django.http import HttpResponse
import json
MIMEANY = '*/*'
MIMEJSON = 'application/json'
MIMETEXT = 'text/plain'
def response_mimetype(request):
"""response_mimetype -- Return a proper response mimetype, accordingly to
what the client accepts, as available in the `HTTP_ACCEPT` header.
request -- a HttpRequest instance.
"""
can_json = MIMEJSON in request.META['HTTP_ACCEPT']
can_json |= MIMEANY in request.META['HTTP_ACCEPT']
return MIMEJSON if can_json else MIMETEXT
class JSONResponse(HttpResponse):
"""JSONResponse -- Extends HTTPResponse to handle JSON format response.
This response can be used in any view that should return a json stream of
data.
Usage:
def a_iew(request):
content = {'key': 'value'}
return JSONResponse(content, mimetype=response_mimetype(request))
"""
def __init__(self, obj='', json_opts=None, mimetype=MIMEJSON, *args, **kwargs):
json_opts = json_opts if isinstance(json_opts, dict) else {}
content = json.dumps(obj, **json_opts)
super(JSONResponse, self).__init__(content, mimetype, *args, **kwargs)
| agpl-3.0 |
mikehankey/fireball_camera | mknewdevice.py | 1 | 3098 | #!/usr/bin/python3
import requests, json
import sys
import netifaces
import os
import os.path
import settings
from datetime import datetime
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), './config')))
from config_func import add_to_config, read_config, get_device_info_and_config
output = ""
# Test if config exists - create it if it doesnt
# (this way we don't have to create the config file manually)
if(os.path.isfile('config.txt')):
config = read_config()
output = output + os.linesep + 'WARNING: config.txt already exists'
else:
open('config.txt', 'a')
output = output + os.linesep + 'CREATION of config.txt'
# Get info to Register new device with AMS
eth0_mac = netifaces.ifaddresses('eth0')[netifaces.AF_LINK][0]['addr']
wlan0_mac = netifaces.ifaddresses('wlan0')[netifaces.AF_LINK][0]['addr']
try:
eth0_ip = netifaces.ifaddresses('eth0')[netifaces.AF_INET][0]['addr']
except:
eth0_ip = "0.0.0.0"
try:
wlan0_ip= netifaces.ifaddresses('wlan0')[netifaces.AF_INET][0]['addr']
except:
wlan0_ip = "0.0.0.0"
output = output + os.linesep + "Device/Network Info"
output = output + os.linesep + "ETH0 MAC: " + eth0_mac
output = output + os.linesep + "WLAN MAC: " + wlan0_mac
output = output + os.linesep + "ETH0 IP (LAN IP): " + eth0_ip
output = output + os.linesep + "WLAN IP: " + wlan0_ip
try:
r = requests.get(settings.API_SERVER + 'members/api/cam_api/mkdevice?format=json&LAN_MAC=' + eth0_mac + '&WLAN_MAC=' + wlan0_mac + '&lan_ip=' + eth0_ip + 'wlan_ip=' + wlan0_ip)
fp = open("register.txt", "w+")
fp.write(r.text)
fp.close()
except:
output = output + os.linesep + "mknewdevice failed"
data = json.loads(r.text)
try:
if data['errors']['Invalid_data'] == 'LAN_MAC WLAN_MAC combination must be unique.':
output = output + os.linesep + "Device already exist!"
else:
output = output + os.linesep + "Device created."
except:
output = output + os.linesep + "Device Created."
#LOG IP OF DEVICE.
msg = "lan_ip=" + eth0_ip + ":wlan_ip=" + wlan0_ip
r = requests.post(settings.API_SERVER + 'members/api/cam_api/addLog', data={'LAN_MAC': eth0_mac, 'WLAN_MAC': wlan0_mac, 'msg': msg})
res = r.text
x, id = res.split("device_id: ")
hostname = "ams" + id.rstrip("\n")
out = open("/home/pi/fireball_camera/host", "w+")
out.write(hostname)
out.close()
os.system("sudo cp /home/pi/fireball_camera/host /etc/hostname")
output = output + os.linesep + 'Hostname updated:'+ " ams" + id.rstrip("\n")
# Here we populate the config file with the info we got (and we need...)
add_to_config('lan_ip',eth0_ip)
add_to_config('device_id',id.rstrip("\n"))
add_to_config('hd',0)
add_to_config('wlan_ip',wlan0_ip)
add_to_config('wlan_mac',wlan0_mac)
add_to_config('lan_mac',eth0_mac)
i = datetime.now()
add_to_config('reg_date',i.strftime('%Y-%m-%d %H:%M:%S'))
p = read_config();
output = output + os.linesep + "Config file updated."
# Get Info from the API in case the cam already has info in the database
# update the config file accordingly
get_device_info_and_config()
print(output) | gpl-3.0 |
GirlsCodePy/girlscode-coursebuilder | tests/functional/model_data_sources.py | 3 | 26154 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests exercising the analytics internals (not individual analytics)."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import time
from webtest import app
from common import catch_and_log
from common import crypto
from common import utils as common_utils
from models import data_sources
from models import entities
from models import transforms
from models.data_sources import utils as data_sources_utils
from google.appengine.ext import db
# Data source must be registered before we import actions; actions imports
# 'main', which does all setup and registration in package scope.
class Character(entities.BaseEntity):
user_id = db.StringProperty(indexed=True)
goal = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
age = db.IntegerProperty(indexed=False)
rank = db.IntegerProperty(indexed=True)
_PROPERTY_EXPORT_BLACKLIST = [name]
def for_export(self, transform_fn):
model = super(Character, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class CharacterDataSource(data_sources.AbstractDbTableRestDataSource):
@classmethod
def get_name(cls):
return 'character'
@classmethod
def get_entity_class(cls):
return Character
data_sources.Registry.register(CharacterDataSource)
from tests.functional import actions
class DataSourceTest(actions.TestBase):
def setUp(self):
super(DataSourceTest, self).setUp()
with common_utils.Namespace(self.NAMESPACE):
self.characters = [
Character(
user_id='001', goal='L', rank=4, age=8, name='Charlie'),
Character(
user_id='002', goal='L', rank=6, age=6, name='Sally'),
Character(
user_id='003', goal='L', rank=0, age=8, name='Lucy'),
Character(
user_id='004', goal='G', rank=2, age=7, name='Linus'),
Character(
user_id='005', goal='G', rank=8, age=8, name='Max'),
Character(
user_id='006', goal='G', rank=1, age=8, name='Patty'),
Character(
user_id='007', goal='R', rank=9, age=35, name='Othmar'),
Character(
user_id='008', goal='R', rank=5, age=2, name='Snoopy'),
Character(
user_id='009', goal='R', rank=7, age=8, name='Pigpen'),
Character(
user_id='010', goal='R', rank=3, age=8, name='Violet'),
]
for c in self.characters:
c.put()
def tearDown(self):
with common_utils.Namespace(self.NAMESPACE):
db.delete(Character.all(keys_only=True).run())
super(DataSourceTest, self).tearDown()
class PiiExportTest(DataSourceTest):
COURSE_NAME = 'test_course'
ADMIN_EMAIL = 'admin@foo.com'
NAMESPACE = 'ns_' + COURSE_NAME
def setUp(self):
super(PiiExportTest, self).setUp()
self.app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'The Course')
self.data_source_context = (
CharacterDataSource.get_context_class().build_blank_default({}, 20))
def test_get_non_pii_data(self):
data = self._get_page_data(0)
self.assertEquals(10, len(data))
for item in data:
self.assertNotIn('name', item)
def test_get_non_pii_schema(self):
schema = self._get_schema()
self.assertNotIn('name', schema)
def test_get_pii_data(self):
self.data_source_context.send_uncensored_pii_data = True
data = self._get_page_data(0)
self.assertEquals(10, len(data))
for item in data:
self.assertIn('name', item)
def test_get_pii_schema(self):
self.data_source_context.send_uncensored_pii_data = True
schema = self._get_schema()
self.assertIn('name', schema)
def _get_schema(self):
log = catch_and_log.CatchAndLog()
schema = CharacterDataSource.get_schema(
self.app_context, log, self.data_source_context)
return schema
def _get_page_data(self, page_number):
log = catch_and_log.CatchAndLog()
schema = self._get_schema()
data, _ = CharacterDataSource.fetch_values(
self.app_context, self.data_source_context, schema, log,
page_number)
return data
class PaginatedTableTest(DataSourceTest):
"""Verify operation of paginated access to AppEngine DB tables."""
NAMESPACE = ''
def test_simple_read(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get('/rest/data/character/items').body)
self.assertIn('data', response)
self._verify_data(self.characters, response['data'])
self.assertIn('schema', response)
self.assertIn('user_id', response['schema'])
self.assertIn('age', response['schema'])
self.assertIn('rank', response['schema'])
self.assertNotIn('name', response['schema']) # blacklisted
self.assertIn('log', response)
self.assertIn('source_context', response)
self.assertIn('params', response)
self.assertEquals([], response['params']['filters'])
self.assertEquals([], response['params']['orderings'])
def test_admin_required(self):
with self.assertRaisesRegexp(app.AppError, 'Bad response: 403'):
self.get('/rest/data/character/items')
def test_filtered_read(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
# Single greater-equal filter
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank>=7').body)
self.assertEquals(3, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] >= 7)
# Single less-than filter
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank<7').body)
self.assertEquals(7, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] < 7)
# Multiple filters finding some rows
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank<5&filters=goal=L').body)
self.assertEquals(2, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] < 5)
self.assertTrue(character['goal'] == 'L')
def test_ordered_read(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
# Single ordering by rank
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=rank').body)
self.assertEquals(10, len(response['data']))
prev_rank = -1
for character in response['data']:
self.assertTrue(character['rank'] > prev_rank)
prev_rank = character['rank']
# Single ordering by rank, descending
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=-rank').body)
self.assertEquals(10, len(response['data']))
prev_rank = 10
for character in response['data']:
self.assertTrue(character['rank'] < prev_rank)
prev_rank = character['rank']
# Order by goal then rank
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=goal&ordering=rank').body)
self.assertEquals(10, len(response['data']))
prev_goal = 'A'
prev_rank = -1
for character in response['data']:
self.assertTrue(character['goal'] >= prev_goal)
if character['goal'] != prev_goal:
prev_rank = -1
prev_goal = character['goal']
else:
self.assertTrue(character['rank'] > prev_rank)
prev_rank = character['rank']
def test_filtered_and_ordered(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank<7&ordering=rank').body)
self.assertEquals(7, len(response['data']))
prev_rank = -1
for character in response['data']:
self.assertTrue(character['rank'] > prev_rank)
self.assertTrue(character['rank'] < 7)
def test_illegal_filters_and_orderings(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filters=foo').body)
self._assert_have_critical_error(
response,
'Filter specification "foo" is not of the form: <name><op><value>')
response = transforms.loads(self.get(
'/rest/data/character/items?filters=foo=9').body)
self._assert_have_critical_error(
response,
'field "foo" which is not in the schema for type "Character"')
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank=kitten').body)
self._assert_have_critical_error(
response,
'invalid literal for int() with base 10: \'kitten\'')
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank<<7').body)
self._assert_have_critical_error(
response,
'"rank<<7" uses an unsupported comparison operation "<<"')
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=foo').body)
self._assert_have_critical_error(
response,
'Invalid property name \'foo\'')
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=age').body)
self._assert_have_critical_error(
response,
'Property \'age\' is not indexed')
response = transforms.loads(self.get(
'/rest/data/character/items?filters=age>5').body)
self._assert_have_critical_error(
response,
'Property \'age\' is not indexed')
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank<7&ordering=goal').body)
self._assert_have_critical_error(
response,
'First ordering property must be the same as inequality filter')
def _assert_have_critical_error(self, response, expected_message):
email = 'admin@google.com'
actions.login(email, is_admin=True)
for log in response['log']:
if (log['level'] == 'critical' and
expected_message in log['message']):
return
self.fail('Expected a critical error containing "%s"' %
expected_message)
def test_pii_encoding(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
token = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
response = transforms.loads(self.get('/rest/data/character/items').body)
for d in response['data']:
# Ensure that field marked as needing transformation is cleared
# when we don't pass in an XSRF token used for generating a secret
# for encrypting.
self.assertEquals('None', d['user_id'])
self.assertEquals(str(db.Key.from_path(Character.kind(), 'None')),
d['key'])
# Ensure that field marked for blacklist is suppressed.
self.assertFalse('name' in d)
response = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token).body)
for d in response['data']:
# Ensure that field marked as needing transformation is cleared
# when we don't pass in an XSRF token used for generating a secret
# for encrypting.
self.assertIsNotNone(d['user_id'])
self.assertNotEquals('None', d['key'])
# Ensure that field marked for blacklist is still suppressed.
self.assertFalse('name' in d)
def test_pii_encoding_changes(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
token1 = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
time.sleep(1) # Legit: XSRF token is time-based, so will change.
token2 = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
self.assertNotEqual(token1, token2)
response1 = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token1).body)
response2 = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token2).body)
for c1, c2 in zip(response1['data'], response2['data']):
self.assertNotEquals(c1['user_id'], c2['user_id'])
self.assertNotEquals(c1['key'], c2['key'])
def test_sequential_pagination(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=0').body)
source_context = response['source_context']
self.assertEquals(0, response['page_number'])
self._verify_data(self.characters[:3], response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=1'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(1, response['page_number'])
self._verify_data(self.characters[3:6], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=2'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(2, response['page_number'])
self._verify_data(self.characters[6:9], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 2 start cursor present; end cursor missing',
'fetch page 2 using limit 3',
'fetch page 2 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=3'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(3, response['page_number'])
self._verify_data(self.characters[9:], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 3 start cursor present; end cursor missing',
'fetch page 3 using limit 3',
'fetch page 3 is partial; not saving end cursor',
])
def test_non_present_page_request(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=9&page_number=5').body)
self._verify_data(self.characters[9:], response['data'])
self.assertEquals(1, response['page_number'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 9',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 9',
'fetch page 1 is partial; not saving end cursor',
'Fewer pages available than requested. Stopping at last page 1',
])
def test_empty_last_page_request(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=10&page_number=3').body)
self._verify_data([], response['data'])
self.assertEquals(1, response['page_number'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 10',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 10',
'fetch page 1 is partial; not saving end cursor',
'Fewer pages available than requested. Stopping at last page 1',
])
def test_nonsequential_pagination(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=2').body)
source_context = response['source_context']
self.assertEquals(2, response['page_number'])
self._verify_data(self.characters[6:9], response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 saving end cursor',
'fetch page 2 start cursor present; end cursor missing',
'fetch page 2 using limit 3',
'fetch page 2 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=1'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self._verify_data(self.characters[3:6], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 1 start cursor present; end cursor present',
])
def test_pagination_filtering_and_ordering(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank>=5&ordering=rank'
'&chunk_size=3&page_number=1').body)
source_context = response['source_context']
self.assertEquals(1, response['page_number'])
self._verify_data([self.characters[4], self.characters[6]],
response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 is partial; not saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank>=5&ordering=rank'
'&chunk_size=3&page_number=0'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(0, response['page_number'])
self._verify_data([self.characters[7], self.characters[1],
self.characters[8]], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 0 start cursor missing; end cursor present',
])
def test_parameters_can_be_omitted_if_using_source_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank>=5&ordering=rank'
'&chunk_size=3&page_number=1').body)
source_context = response['source_context']
self._verify_data([self.characters[4], self.characters[6]],
response['data'])
# This should load identical items, without having to respecify
# filters, ordering, chunk_size.
response = transforms.loads(self.get(
'/rest/data/character/items?page_number=1'
'&source_context=%s' % source_context).body)
self.assertEquals(1, response['page_number'])
self._verify_data([self.characters[4], self.characters[6]],
response['data'])
self._assert_have_only_logs(response, [
'Continuing use of existing context',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 is partial; not saving end cursor',
])
def test_build_default_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get('/rest/data/character/items').body)
self._assert_have_only_logs(response, [
'Building new default context',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 10000',
'fetch page 0 is partial; not saving end cursor',
])
def test_change_filtering_invalidates_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank>=5'
'&chunk_size=3&page_number=0').body)
source_context = response['source_context']
response = transforms.loads(self.get(
'/rest/data/character/items?filters=rank<5'
'&chunk_size=3&page_number=0'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self._verify_data([self.characters[2], self.characters[5],
self.characters[3]], response['data'])
self._assert_have_only_logs(response, [
'Existing context and parameters mismatch; '
'discarding existing and creating new context.',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
def test_change_ordering_invalidates_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=rank'
'&chunk_size=3&page_number=0').body)
source_context = response['source_context']
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=-rank'
'&chunk_size=3&page_number=0'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self._verify_data([self.characters[6], self.characters[4],
self.characters[8]], response['data'])
self._assert_have_only_logs(response, [
'Existing context and parameters mismatch; '
'discarding existing and creating new context.',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
def _assert_have_only_logs(self, response, messages):
for message in messages:
found_index = -1
for index, log in enumerate(response['log']):
if message in log['message']:
found_index = index
break
if found_index < 0:
self.fail('Expected to find message "%s" in logs' % message)
else:
del response['log'][found_index]
if response['log']:
self.fail('Unexpected message "%s"' % response['log'][0])
def _verify_data(self, characters, data):
for c, d in zip(characters, data):
self.assertEquals(c.rank, d['rank'])
self.assertEquals(c.age, d['age'])
| gpl-3.0 |
DmitryADP/diff_qc750 | external/webkit/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py | 15 | 2325 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import BuilderToPort, Rebaseline
from webkitpy.tool.mocktool import MockTool
class RebaselineTest(unittest.TestCase):
# This just makes sure the code runs without exceptions.
def test_tests_to_update(self):
command = Rebaseline()
command.bind_to_tool(MockTool())
build = Mock()
OutputCapture().assert_outputs(self, command._tests_to_update, [build])
class BuilderToPortTest(unittest.TestCase):
def test_port_for_builder(self):
converter = BuilderToPort()
port = converter.port_for_builder("Leopard Intel Debug (Tests)")
self.assertEqual(port.name(), "mac-leopard")
| gpl-2.0 |
XiaosongWei/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py | 62 | 2696 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
from webkitpy.common.system import outputcapture
from webkitpy.common.system import stack_utils
def current_thread_id():
thread_id, _ = sys._current_frames().items()[0]
return thread_id
class StackUtilsTest(unittest.TestCase):
def test_find_thread_stack_found(self):
thread_id = current_thread_id()
found_stack = stack_utils._find_thread_stack(thread_id)
self.assertIsNotNone(found_stack)
def test_find_thread_stack_not_found(self):
found_stack = stack_utils._find_thread_stack(0)
self.assertIsNone(found_stack)
def test_log_thread_state(self):
msgs = []
def logger(msg):
msgs.append(msg)
thread_id = current_thread_id()
stack_utils.log_thread_state(logger, "test-thread", thread_id,
"is tested")
self.assertTrue(msgs)
def test_log_traceback(self):
msgs = []
def logger(msg):
msgs.append(msg)
try:
raise ValueError
except:
stack_utils.log_traceback(logger, sys.exc_info()[2])
self.assertTrue(msgs)
| bsd-3-clause |
sjlehtin/django | tests/queries/tests.py | 1 | 160524 | import datetime
import pickle
import unittest
from collections import OrderedDict
from operator import attrgetter
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, F, Q
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import (
FK1, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, Company, Cover, CustomPk, CustomPkTag, Detail, DumbCategory,
Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job,
JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel,
Member, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node,
Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory,
Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program,
ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related,
RelatedIndividual, RelatedObject, Report, ReportComment, ReservedName,
Responsibility, School, SharedConnection, SimpleCategory, SingleObject,
SpecialCategory, Staff, StaffUser, Student, Tag, Task, Teacher,
Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B,
Ticket23605C, TvChef, Valid, X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
i4.tags.set([t4])
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced twice.
self.assertTrue(str(qs4.query).lower().count('u0'), 2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name', 'foo')
.distinct()
.count()
),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name')
.distinct()
.count()
),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.alias_map if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by('name').get(pk=self.a1.pk)
self.assertNotIn('order by', captured_queries[0]['sql'].lower())
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([
x for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = 127
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RuntimeError, msg):
for i in range(local_recursion_limit * 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}])
# ...or use the field name.
self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}])
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use an OrderedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if list({'a': 1, 'b': 2}) == ['a', 'b']:
s.reverse()
params.reverse()
d = Item.objects.extra(select=OrderedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': 'one', 'b': 'two'})
# Order by the number of tags attached to an item.
qs = (
Item.objects
.extra(select={
'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'
})
.order_by('-count')
)
self.assertEqual([o.count for o in qs], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name = "one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertSequenceEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)
),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'):
q.extra(select={'foo': "1"})
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix(qs.query)
first = qs[0]
self.assertEqual(list(qs), list(range(first, first + 5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0))
self.assertIn('SELECT', str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
Item.objects.datetimes('name', 'month')
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo')
n2 = Note.objects.create(note='n2', misc='bar')
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note='ANTH', misc='100')
math101 = tag.note_set.create(note='MATH', misc='101')
s1 = tag.annotation_set.create(name='1')
s2 = tag.annotation_set.create(name='2')
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100])
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.'
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = (
Item.objects
.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1'))
.order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name='e1')).order_by() |
Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name='a1')
ranking_start = Ranking.objects.get(author__name='a1')
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank=4636),
1
)
r = Ranking.objects.get(author__name='a1')
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q() | ~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q() & ~Q()),
['<Note: n1>', '<Note: n2>']
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
Annotation.objects.create(name='a2', tag=t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
class RawQueriesTests(TestCase):
def setUp(self):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql']
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('num_notes').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name='first')
NamedCategory.objects.create(id=2, name='second')
NamedCategory.objects.create(id=3, name='third')
NamedCategory.objects.create(id=4, name='fourth')
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('name')[0:2],
).order_by('name').values_list('name', flat=True), ['first', 'fourth']
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2],
).order_by('name').values_list('name', flat=True), ['second', 'third']
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(
double_id=F('id') * 2
).order_by('id').distinct().values('double_id')[0:2],
).order_by('id').values_list('id', flat=True), [2, 4]
)
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertQuerysetEqual(q.values(), [])
self.assertQuerysetEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'])
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'])
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list('name__foo')
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
for i in range(1, 8):
Article.objects.create(
name="Article {}".format(i), created=some_date)
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertQuerysetEqual(
self.get_ordered_articles()[1:3],
["<Article: Article 2>", "<Article: Article 3>"]
)
def test_slicing_with_steps_can_be_used(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[::2], [
"<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"
]
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[0:5][0:2],
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][0:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(
self.get_ordered_articles()[5:],
["<Article: Article 6>", "<Article: Article 7>"]
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."):
Article.objects.all()[0:5].filter(id=1, )
def test_slicing_cannot_reorder_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."):
Article.objects.all()[0:5].order_by('id', )
def test_slicing_cannot_combine_queries_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[0:-5]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertQuerysetEqual(
(s1 | s2).order_by('name'),
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
food = Food.objects.create(name='spam')
Eaten.objects.create(meal='spam with eggs', food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'):
Article.objects.all()[:0].latest('created')
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))),
{lunch}
)
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))),
set()
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))),
{apple}
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name='pk')
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull])
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
Eaten.objects.create(meal="lunch")
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=False),
['<Eaten: apple at lunch>']
)
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=True),
['<Eaten: None at lunch>']
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(TestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(TestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
msg = "'ManyToOneRel' object has no attribute 'attname'"
with self.assertRaisesMessage(AttributeError, msg):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
msg = "Cannot resolve keyword 'invalid_column' into field."
with self.assertRaisesMessage(FieldError, msg):
list(qs)
with self.assertRaisesMessage(FieldError, msg):
list(qs)
def test_invalid_order_by(self):
msg = "Invalid order_by arguments: ['*']"
with self.assertRaisesMessage(FieldError, msg):
list(Article.objects.order_by('*'))
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertSequenceEqual(qs.order_by('name'), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3])
self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2])
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name='Potion')
Classroom.objects.create(school=springfield_elementary, name='Main')
qs = Student.objects.filter(
~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None))
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text='pa2')
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(TestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1])
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(TestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = ['<ObjectA: oa>', ]
out_b = ['<ObjectB: ob>', '<ObjectB: pob>']
out_c = ['<ObjectC: >']
# proxy model objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertQuerysetEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'),
out_b
)
# parent objects
self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name='ob')
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name='pob')
pob.num = pob.objecta.pk
pob.save()
self.assertQuerysetEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('num')
).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>'])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3])
self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1])
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(TestCase):
def test_invalid_values(self):
msg = "invalid literal for int() with base 10: 'abc'"
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag='abc')
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4])
self.assertSequenceEqual(
Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'),
[i1, i2, i3]
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = (
Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
qy = (
Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)),
set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True))
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
| bsd-3-clause |
chrisfranzen/django | django/utils/tree.py | 109 | 5851 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, Node) and (node.connector == conn_type or
len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector,
not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector,
self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children,
self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
| bsd-3-clause |
pombredanne/pulp | server/test/unit/plugins/file/test_distributor.py | 4 | 13531 | from os import readlink
import copy
import csv
import errno
import os
import shutil
import tempfile
import unittest
from mock import Mock, MagicMock, patch
from pulp.common.plugins.distributor_constants import MANIFEST_FILENAME
from pulp.devel.mock_distributor import get_publish_conduit
from pulp.plugins.file.distributor import FileDistributor, FilePublishProgressReport, BUILD_DIRNAME
from pulp.plugins.model import Repository, Unit
DATA_DIR = os.path.realpath("../../../data/")
SAMPLE_RPM = 'pulp-test-package-0.3.1-1.fc11.x86_64.rpm'
SAMPLE_FILE = 'test-override-pulp.conf'
class FileDistributorTest(unittest.TestCase):
"""
Tests the file distributor base class
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.temp_dir, "target")
self.repo = MagicMock(spec=Repository)
self.repo.id = "foo"
self.repo.working_dir = self.temp_dir
self.unit = Unit('RPM', {'name': SAMPLE_RPM, 'size': 1, 'checksum': 'sum1'}, {},
os.path.join(DATA_DIR, SAMPLE_RPM))
self.publish_conduit = get_publish_conduit(existing_units=[self.unit, ])
def tearDown(self):
shutil.rmtree(self.temp_dir)
def create_distributor_with_mocked_api_calls(self):
distributor = FileDistributor()
distributor.get_hosting_locations = Mock()
distributor.get_hosting_locations.return_value = [self.target_dir, ]
distributor.post_repo_publish = Mock()
return distributor
def test_metadata_not_implemented(self):
self.assertRaises(NotImplementedError, FileDistributor.metadata)
def test_validate_config_not_implemented(self):
distributor = FileDistributor()
self.assertRaises(NotImplementedError, distributor.validate_config, None, None, None)
def test_get_hosting_locations_not_implemented(self):
distributor = FileDistributor()
host_locations = distributor.get_hosting_locations(None, None)
self.assertEquals(0, len(host_locations))
def test_post_repo_publish_not_implemented(self):
distributor = FileDistributor()
# ensure that this doesn't raise an error
distributor.post_repo_publish(None, None)
def test_repo_publish_api_calls(self):
distributor = self.create_distributor_with_mocked_api_calls()
result = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(result.success_flag)
self.assertTrue(distributor.get_hosting_locations.called)
self.assertTrue(distributor.post_repo_publish.called)
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as complete
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_COMPLETE)
def test_repo_publish_files_placed_properly(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# test if the link points to the correct place
link_target = os.readlink(target_file)
self.assertEquals(link_target, os.path.join(DATA_DIR, SAMPLE_RPM))
def test_repo_publish_metadata_writing(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
with open(os.path.join(self.target_dir, MANIFEST_FILENAME), 'rb') as f:
reader = csv.reader(f)
row = reader.next()
self.assertEquals(row[0], self.unit.unit_key['name'])
self.assertEquals(row[1], self.unit.unit_key['checksum'])
self.assertEquals(row[2], str(self.unit.unit_key['size']))
@patch('pulp.plugins.file.distributor._logger')
def test_repo_publish_handles_errors(self, mock_logger):
"""
Make sure that publish() does the right thing with the report when there is an error.
"""
distributor = self.create_distributor_with_mocked_api_calls()
distributor.post_repo_publish.side_effect = Exception('Rawr!')
report = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(mock_logger.exception.called)
self.assertFalse(report.success_flag)
self.assertEqual(report.summary['state'], FilePublishProgressReport.STATE_FAILED)
self.assertEqual(report.summary['error_message'], 'Rawr!')
self.assertTrue('Rawr!' in report.summary['traceback'])
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as failed
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_FAILED)
def test_republish_after_unit_removal(self):
"""
This test checks for an issue[0] we had where publishing an ISO repository, removing an ISO,
and then republishing would leave that removed ISO's symlink in the repository even though
it had been removed from the manifest. This test asserts that the republished repository no
longer contains the removed ISO.
[0] https://bugzilla.redhat.com/show_bug.cgi?id=970795
:param delete_protected_repo: The mocked version of delete_protected_repo
:type delete_protected_repo: function
"""
# Publish a repository
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# publish a new repo with a different unit in it
cloned_unit = copy.deepcopy(self.unit)
cloned_unit.unit_key['name'] = 'foo.rpm'
new_conduit = get_publish_conduit(existing_units=[cloned_unit, ])
distributor.publish_repo(self.repo, new_conduit, {})
# Make sure the new rpm is linked
self.assertTrue(os.path.islink(os.path.join(self.target_dir, 'foo.rpm')))
# Ensure the old rpm is no longer included
self.assertFalse(os.path.islink(target_file))
def test_distributor_removed_calls_unpublish(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.unpublish_repo = Mock()
distributor.distributor_removed(self.repo, {})
self.assertTrue(distributor.unpublish_repo.called)
def test_unpublish_repo(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(os.path.exists(self.target_dir))
distributor.unpublish_repo(self.repo, {})
self.assertFalse(os.path.exists(self.target_dir))
def test__rmtree_if_exists(self):
"""
Let's just make sure this simple thing doesn't barf.
"""
a_directory = os.path.join(self.temp_dir, 'a_directory')
test_filename = os.path.join(a_directory, 'test.txt')
os.makedirs(a_directory)
with open(test_filename, 'w') as test:
test.write("Please don't barf.")
# This should not cause any problems, and test.txt should still exist
distributor = self.create_distributor_with_mocked_api_calls()
distributor._rmtree_if_exists(os.path.join(self.temp_dir, 'fake_path'))
self.assertTrue(os.path.exists(test_filename))
# Now let's remove a_directory
distributor._rmtree_if_exists(a_directory)
self.assertFalse(os.path.exists(a_directory))
def test__symlink_units(self):
"""
Make sure that the _symlink_units creates all the correct symlinks.
"""
distributor = self.create_distributor_with_mocked_api_calls()
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
os.symlink('/some/weird/path',
os.path.join(build_dir, self.unit.unit_key['name']))
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name'], ])
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
self.assertEqual(os.path.realpath(expected_symlink_path), expected_symlink_destination)
@patch('os.symlink', side_effect=os.symlink)
def test__symlink_units_existing_correct_link(self, symlink):
"""
Make sure that the _symlink_units handles an existing correct link well.
"""
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
# Now let's reset the Mock so that we can make sure it doesn't get called during _symlink
symlink.reset_mock()
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# The call count for symlink should be 0, because the _symlink_units call should have
# noticed that the symlink was already correct and thus should have skipped it
self.assertEqual(symlink.call_count, 0)
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
self.assertEqual(os.path.realpath(expected_symlink_path),
os.path.realpath(expected_symlink_destination))
@patch('os.readlink')
def test__symlink_units_os_error(self, readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
doesn't raise EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.ENOSPC
readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
try:
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
self.fail('An OSError should have been raised, but was not!')
except OSError, e:
self.assertEqual(e.errno, errno.ENOSPC)
@patch('os.readlink')
def test__symlink_units_EINVAL_os_error(self, mock_readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
raises EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.EINVAL
mock_readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
original_link = os.path.join(build_dir, self.unit.unit_key['name'])
old_target = os.path.join(DATA_DIR, SAMPLE_FILE)
os.symlink(old_target, original_link)
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# make sure the symlink was deleted
self.assertTrue(os.path.islink(original_link))
created_link = readlink(original_link)
self.assertNotEqual(old_target, created_link)
| gpl-2.0 |
tangfeixiong/nova | nova/virt/disk/mount/nbd.py | 47 | 4736 | # Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting images with qemu-nbd."""
import os
import random
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _, _LE, _LI, _LW
from nova import utils
from nova.virt.disk.mount import api
LOG = logging.getLogger(__name__)
nbd_opts = [
cfg.IntOpt('timeout_nbd',
default=10,
help='Amount of time, in seconds, to wait for NBD '
'device start up.'),
]
CONF = cfg.CONF
CONF.register_opts(nbd_opts)
NBD_DEVICE_RE = re.compile('nbd[0-9]+')
class NbdMount(api.Mount):
"""qemu-nbd support disk images."""
mode = 'nbd'
def _detect_nbd_devices(self):
"""Detect nbd device files."""
return filter(NBD_DEVICE_RE.match, os.listdir('/sys/block/'))
def _find_unused(self, devices):
for device in devices:
if not os.path.exists(os.path.join('/sys/block/', device, 'pid')):
if not os.path.exists('/var/lock/qemu-nbd-%s' % device):
return device
else:
LOG.error(_LE('NBD error - previous umount did not '
'cleanup /var/lock/qemu-nbd-%s.'), device)
LOG.warning(_LW('No free nbd devices'))
return None
def _allocate_nbd(self):
if not os.path.exists('/sys/block/nbd0'):
LOG.error(_LE('nbd module not loaded'))
self.error = _('nbd unavailable: module not loaded')
return None
devices = self._detect_nbd_devices()
random.shuffle(devices)
device = self._find_unused(devices)
if not device:
# really want to log this info, not raise
self.error = _('No free nbd devices')
return None
return os.path.join('/dev', device)
@utils.synchronized('nbd-allocation-lock')
def _inner_get_dev(self):
device = self._allocate_nbd()
if not device:
return False
# NOTE(mikal): qemu-nbd will return an error if the device file is
# already in use.
LOG.debug('Get nbd device %(dev)s for %(imgfile)s',
{'dev': device, 'imgfile': self.image.path})
_out, err = utils.trycmd('qemu-nbd', '-c', device,
self.image.path,
run_as_root=True)
if err:
self.error = _('qemu-nbd error: %s') % err
LOG.info(_LI('NBD mount error: %s'), self.error)
return False
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuing
pidfile = "/sys/block/%s/pid" % os.path.basename(device)
for _i in range(CONF.timeout_nbd):
if os.path.exists(pidfile):
self.device = device
break
time.sleep(1)
else:
self.error = _('nbd device %s did not show up') % device
LOG.info(_LI('NBD mount error: %s'), self.error)
# Cleanup
_out, err = utils.trycmd('qemu-nbd', '-d', device,
run_as_root=True)
if err:
LOG.warning(_LW('Detaching from erroneous nbd device returned '
'error: %s'), err)
return False
self.error = ''
self.linked = True
return True
def get_dev(self):
"""Retry requests for NBD devices."""
return self._get_dev_retry_helper()
def unget_dev(self):
if not self.linked:
return
LOG.debug('Release nbd device %s', self.device)
utils.execute('qemu-nbd', '-d', self.device, run_as_root=True)
self.linked = False
self.device = None
def flush_dev(self):
"""flush NBD block device buffer."""
# Perform an explicit BLKFLSBUF to support older qemu-nbd(s).
# Without this flush, when a nbd device gets re-used the
# qemu-nbd intermittently hangs.
if self.device:
utils.execute('blockdev', '--flushbufs',
self.device, run_as_root=True)
| apache-2.0 |
jeffreyliu3230/osf.io | website/addons/dataverse/tests/test_views.py | 29 | 16116 | # -*- coding: utf-8 -*-
import nose
from nose.tools import * # noqa
import mock
import httplib as http
from tests.factories import AuthUserFactory
from dataverse.exceptions import UnauthorizedError
from framework.auth.decorators import Auth
from website.util import api_url_for
from website.addons.dataverse.serializer import DataverseSerializer
from website.addons.dataverse.tests.utils import (
create_mock_connection, DataverseAddonTestCase, create_external_account,
)
from website.oauth.models import ExternalAccount
class TestDataverseViewsAuth(DataverseAddonTestCase):
def test_deauthorize(self):
url = api_url_for('dataverse_remove_user_auth',
pid=self.project._primary_key)
self.app.delete(url, auth=self.user.auth)
self.node_settings.reload()
assert_false(self.node_settings.dataverse_alias)
assert_false(self.node_settings.dataverse)
assert_false(self.node_settings.dataset_doi)
assert_false(self.node_settings.dataset)
assert_false(self.node_settings.user_settings)
# Log states that node was deauthorized
self.project.reload()
last_log = self.project.logs[-1]
assert_equal(last_log.action, 'dataverse_node_deauthorized')
log_params = last_log.params
assert_equal(log_params['node'], self.project._primary_key)
assert_equal(log_params['project'], None)
def test_user_config_get(self):
url = api_url_for('dataverse_user_config_get')
res = self.app.get(url, auth=self.user.auth)
result = res.json.get('result')
assert_false(result['userHasAuth'])
assert_in('hosts', result)
assert_in('create', result['urls'])
# userHasAuth is true with external accounts
self.user.external_accounts.append(create_external_account())
self.user.save()
res = self.app.get(url, auth=self.user.auth)
result = res.json.get('result')
assert_true(result['userHasAuth'])
class TestDataverseViewsConfig(DataverseAddonTestCase):
@mock.patch('website.addons.dataverse.views.config.client.connect_from_settings')
def test_dataverse_get_datasets(self, mock_connection):
mock_connection.return_value = create_mock_connection()
url = api_url_for('dataverse_get_datasets', pid=self.project._primary_key)
params = {'alias': 'ALIAS1'}
res = self.app.post_json(url, params, auth=self.user.auth)
assert_equal(len(res.json['datasets']), 3)
first = res.json['datasets'][0]
assert_equal(first['title'], 'Example (DVN/00001)')
assert_equal(first['doi'], 'doi:12.3456/DVN/00001')
def test_dataverse_get_user_accounts(self):
external_account = create_external_account()
self.user.external_accounts.append(external_account)
self.user.external_accounts.append(create_external_account())
self.user.save()
url = api_url_for('dataverse_get_user_accounts')
res = self.app.get(url, auth=self.user.auth)
accounts = res.json['accounts']
assert_equal(len(accounts), 2)
serializer = DataverseSerializer(user_settings=self.user_settings)
assert_equal(
accounts[0], serializer.serialize_account(external_account),
)
def test_dataverse_get_user_accounts_no_accounts(self):
url = api_url_for('dataverse_get_user_accounts')
res = self.app.get(url, auth=self.user.auth)
accounts = res.json['accounts']
assert_equal(len(accounts), 0)
@mock.patch('website.addons.dataverse.views.config.client._connect')
def test_dataverse_add_external_account(self, mock_connection):
mock_connection.return_value = create_mock_connection()
host = 'myfakehost.data.verse'
token = 'api-token-here'
url = api_url_for('dataverse_add_user_account')
params = {'host': host, 'api_token': token}
self.app.post_json(url, params, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.external_accounts), 1)
external_account = self.user.external_accounts[0]
assert_equal(external_account.provider, 'dataverse')
assert_equal(external_account.oauth_key, host)
assert_equal(external_account.oauth_secret, token)
@mock.patch('website.addons.dataverse.views.config.client._connect')
def test_dataverse_add_external_account_fail(self, mock_connection):
mock_connection.side_effect = UnauthorizedError('Bad credentials!')
host = 'myfakehost.data.verse'
token = 'api-token-here'
url = api_url_for('dataverse_add_user_account')
params = {'host': host, 'api_token': token}
res = self.app.post_json(
url, params, auth=self.user.auth, expect_errors=True,
)
self.user.reload()
assert_equal(len(self.user.external_accounts), 0)
assert_equal(res.status_code, http.UNAUTHORIZED)
@mock.patch('website.addons.dataverse.views.config.client._connect')
def test_dataverse_add_external_account_twice(self, mock_connection):
mock_connection.return_value = create_mock_connection()
host = 'myfakehost.data.verse'
token = 'api-token-here'
url = api_url_for('dataverse_add_user_account')
params = {'host': host, 'api_token': token}
self.app.post_json(url, params, auth=self.user.auth)
self.app.post_json(url, params, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.external_accounts), 1)
external_account = self.user.external_accounts[0]
assert_equal(external_account.provider, 'dataverse')
assert_equal(external_account.oauth_key, host)
assert_equal(external_account.oauth_secret, token)
@mock.patch('website.addons.dataverse.views.config.client._connect')
def test_dataverse_add_external_account_existing(self, mock_connection):
mock_connection.return_value = create_mock_connection()
host = 'myfakehost.data.verse'
token = 'dont-use-this-token-in-other-tests'
display_name = 'loaded_version'
# Save an existing version
external_account = ExternalAccount(
provider='dataverse',
provider_name='Dataverse',
display_name=display_name,
oauth_key=host,
oauth_secret=token,
provider_id=token,
)
external_account.save()
url = api_url_for('dataverse_add_user_account')
params = {'host': host, 'api_token': token}
self.app.post_json(url, params, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.external_accounts), 1)
external_account = self.user.external_accounts[0]
assert_equal(external_account.provider, 'dataverse')
assert_equal(external_account.oauth_key, host)
assert_equal(external_account.oauth_secret, token)
# Ensure we got the loaded version
assert_equal(external_account.display_name, display_name)
@mock.patch('website.addons.dataverse.views.config.client.connect_from_settings')
def test_set_dataverse_and_dataset(self, mock_connection):
mock_connection.return_value = create_mock_connection()
url = api_url_for('dataverse_set_config',
pid=self.project._primary_key)
params = {
'dataverse': {'alias': 'ALIAS3'},
'dataset': {'doi': 'doi:12.3456/DVN/00003'},
}
# Select a different dataset
self.app.post_json(url, params, auth=self.user.auth)
self.project.reload()
self.node_settings.reload()
# New dataset was selected
assert_equal(self.node_settings.dataverse_alias, 'ALIAS3')
assert_equal(self.node_settings.dataset, 'Example (DVN/00003)')
assert_equal(self.node_settings.dataset_doi, 'doi:12.3456/DVN/00003')
assert_equal(self.node_settings.dataset_id, '18')
# Log states that a dataset was selected
last_log = self.project.logs[-1]
assert_equal(last_log.action, 'dataverse_dataset_linked')
log_params = last_log.params
assert_equal(log_params['node'], self.project._primary_key)
assert_is_none(log_params['project'])
assert_equal(log_params['dataset'], 'Example (DVN/00003)')
@mock.patch('website.addons.dataverse.views.config.client.connect_from_settings')
def test_set_dataverse_no_dataset(self, mock_connection):
mock_connection.return_value = create_mock_connection()
num_old_logs = len(self.project.logs)
url = api_url_for('dataverse_set_config',
pid=self.project._primary_key)
params = {
'dataverse': {'alias': 'ALIAS3'},
'dataset': {}, # The dataverse has no datasets
}
# Select a different dataset
res = self.app.post_json(url, params, auth=self.user.auth,
expect_errors=True)
self.node_settings.reload()
# Old settings did not change
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(self.node_settings.dataverse_alias, 'ALIAS2')
assert_equal(self.node_settings.dataset, 'Example (DVN/00001)')
assert_equal(self.node_settings.dataset_doi, 'doi:12.3456/DVN/00001')
# Nothing was logged
self.project.reload()
assert_equal(len(self.project.logs), num_old_logs)
class TestDataverseViewsHgrid(DataverseAddonTestCase):
@mock.patch('website.addons.dataverse.views.hgrid.connect_from_settings')
@mock.patch('website.addons.dataverse.views.hgrid.get_files')
def test_dataverse_root_published(self, mock_files, mock_connection):
mock_connection.return_value = create_mock_connection()
mock_files.return_value = ['mock_file']
self.project.set_privacy('public')
self.project.save()
alias = self.node_settings.dataverse_alias
doi = self.node_settings.dataset_doi
external_account = create_external_account()
self.user.external_accounts.append(external_account)
self.user.save()
self.node_settings.set_auth(external_account, self.user)
self.node_settings.dataverse_alias = alias
self.node_settings.dataset_doi = doi
self.node_settings.save()
url = api_url_for('dataverse_root_folder_public',
pid=self.project._primary_key)
# Contributor can select between states, current state is correct
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json[0]['permissions']['edit'])
assert_true(res.json[0]['hasPublishedFiles'])
assert_equal(res.json[0]['version'], 'latest-published')
# Non-contributor gets published version, no options
user2 = AuthUserFactory()
res = self.app.get(url, auth=user2.auth)
assert_false(res.json[0]['permissions']['edit'])
assert_true(res.json[0]['hasPublishedFiles'])
assert_equal(res.json[0]['version'], 'latest-published')
@mock.patch('website.addons.dataverse.views.hgrid.connect_from_settings')
@mock.patch('website.addons.dataverse.views.hgrid.get_files')
def test_dataverse_root_not_published(self, mock_files, mock_connection):
mock_connection.return_value = create_mock_connection()
mock_files.return_value = []
self.project.set_privacy('public')
self.project.save()
alias = self.node_settings.dataverse_alias
doi = self.node_settings.dataset_doi
external_account = create_external_account()
self.user.external_accounts.append(external_account)
self.user.save()
self.node_settings.set_auth(external_account, self.user)
self.node_settings.dataverse_alias = alias
self.node_settings.dataset_doi = doi
self.node_settings.save()
url = api_url_for('dataverse_root_folder_public',
pid=self.project._primary_key)
# Contributor gets draft, no options
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json[0]['permissions']['edit'])
assert_false(res.json[0]['hasPublishedFiles'])
assert_equal(res.json[0]['version'], 'latest')
# Non-contributor gets nothing
user2 = AuthUserFactory()
res = self.app.get(url, auth=user2.auth)
assert_equal(res.json, [])
@mock.patch('website.addons.dataverse.views.hgrid.connect_from_settings')
@mock.patch('website.addons.dataverse.views.hgrid.get_files')
def test_dataverse_root_no_connection(self, mock_files, mock_connection):
mock_connection.return_value = create_mock_connection()
mock_files.return_value = ['mock_file']
url = api_url_for('dataverse_root_folder_public',
pid=self.project._primary_key)
mock_connection.return_value = None
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json, [])
def test_dataverse_root_incomplete(self):
self.node_settings.dataset_doi = None
self.node_settings.save()
url = api_url_for('dataverse_root_folder_public',
pid=self.project._primary_key)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json, [])
class TestDataverseViewsCrud(DataverseAddonTestCase):
@mock.patch('website.addons.dataverse.views.crud.connect_from_settings_or_401')
@mock.patch('website.addons.dataverse.views.crud.publish_dataset')
@mock.patch('website.addons.dataverse.views.crud.publish_dataverse')
def test_dataverse_publish_dataset(self, mock_publish_dv, mock_publish_ds, mock_connection):
mock_connection.return_value = create_mock_connection()
url = api_url_for('dataverse_publish_dataset',
pid=self.project._primary_key)
self.app.put_json(url, params={'publish_both': False}, auth=self.user.auth)
# Only dataset was published
assert_false(mock_publish_dv.called)
assert_true(mock_publish_ds.called)
@mock.patch('website.addons.dataverse.views.crud.connect_from_settings_or_401')
@mock.patch('website.addons.dataverse.views.crud.publish_dataset')
@mock.patch('website.addons.dataverse.views.crud.publish_dataverse')
def test_dataverse_publish_both(self, mock_publish_dv, mock_publish_ds, mock_connection):
mock_connection.return_value = create_mock_connection()
url = api_url_for('dataverse_publish_dataset',
pid=self.project._primary_key)
self.app.put_json(url, params={'publish_both': True}, auth=self.user.auth)
# Both Dataverse and dataset were published
assert_true(mock_publish_dv.called)
assert_true(mock_publish_ds.called)
class TestDataverseRestrictions(DataverseAddonTestCase):
def setUp(self):
super(DataverseAddonTestCase, self).setUp()
# Nasty contributor who will try to access content that he shouldn't
# have access to
self.contrib = AuthUserFactory()
self.project.add_contributor(self.contrib, auth=Auth(self.user))
self.project.save()
@mock.patch('website.addons.dataverse.views.config.client.connect_from_settings')
def test_restricted_set_dataset_not_owner(self, mock_connection):
mock_connection.return_value = create_mock_connection()
# Contributor has dataverse auth, but is not the node authorizer
self.contrib.add_addon('dataverse')
self.contrib.save()
url = api_url_for('dataverse_set_config', pid=self.project._primary_key)
params = {
'dataverse': {'alias': 'ALIAS1'},
'dataset': {'doi': 'doi:12.3456/DVN/00002'},
}
res = self.app.post_json(url, params, auth=self.contrib.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
if __name__ == '__main__':
nose.run()
| apache-2.0 |
atsolakid/edx-platform | common/djangoapps/cache_toolbox/relation.py | 239 | 3483 | """
Caching instances via ``related_name``
--------------------------------------
``cache_relation`` adds utility methods to a model to obtain ``related_name``
instances via the cache.
Usage
~~~~~
::
from django.db import models
from django.contrib.auth.models import User
class Foo(models.Model):
user = models.OneToOneField(
User,
primary_key=True,
related_name='foo',
)
name = models.CharField(max_length=20)
cache_relation(User.foo)
::
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache miss - hits the database
<Foo: >
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache hit - no database access
<Foo: >
>>> user = User.objects.get(pk=2)
>>> user.foo # Regular lookup - hits the database
<Foo: >
>>> user.foo_cache # Special-case: Will not hit cache or database.
<Foo: >
Accessing ``user_instance.foo_cache`` (note the "_cache" suffix) will now
obtain the related ``Foo`` instance via the cache. Accessing the original
``user_instance.foo`` attribute will perform the lookup as normal.
Invalidation
~~~~~~~~~~~~
Upon saving (or deleting) the instance, the cache is cleared. For example::
>>> user = User.objects.get(pk=1)
>>> foo = user.foo_cache # (Assume cache hit from previous session)
>>> foo.name = "New name"
>>> foo.save() # Cache is cleared on save
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache miss.
<Foo: >
Manual invalidation may also be performed using the following methods::
>>> user_instance.foo_cache_clear()
>>> User.foo_cache_clear_fk(user_instance_pk)
Manual invalidation is required if you use ``.update()`` methods which the
``post_save`` and ``post_delete`` hooks cannot intercept.
Support
~~~~~~~
``cache_relation`` currently only works with ``OneToOneField`` fields. Support
for regular ``ForeignKey`` fields is planned.
"""
from django.db.models.signals import post_save, post_delete
from .core import get_instance, delete_instance
def cache_relation(descriptor, timeout=None):
rel = descriptor.related
related_name = '%s_cache' % rel.field.related_query_name()
@property
def get(self):
# Always use the cached "real" instance if available
try:
return getattr(self, descriptor.cache_name)
except AttributeError:
pass
# Lookup cached instance
try:
return getattr(self, '_%s_cache' % related_name)
except AttributeError:
pass
# import logging
# log = logging.getLogger("tracking")
# log.info( "DEBUG: "+str(str(rel.model)+"/"+str(self.pk) ))
instance = get_instance(rel.model, self.pk, timeout)
setattr(self, '_%s_cache' % related_name, instance)
return instance
setattr(rel.parent_model, related_name, get)
# Clearing cache
def clear(self):
delete_instance(rel.model, self)
@classmethod
def clear_pk(cls, *instances_or_pk):
delete_instance(rel.model, *instances_or_pk)
def clear_cache(sender, instance, *args, **kwargs):
delete_instance(rel.model, instance)
setattr(rel.parent_model, '%s_clear' % related_name, clear)
setattr(rel.parent_model, '%s_clear_pk' % related_name, clear_pk)
post_save.connect(clear_cache, sender=rel.model, weak=False)
post_delete.connect(clear_cache, sender=rel.model, weak=False)
| agpl-3.0 |
SAM-IT-SA/odoo | addons/l10n_bo/__openerp__.py | 259 | 1698 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Bolivia Localization Chart Account",
"version": "1.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_bo_chart.xml",
"account_tax.xml",
"l10n_bo_wizard.xml",
],
"demo_xml": [
],
"data": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nagual13/wal-e | wal_e/worker/base.py | 9 | 21516 | import gevent
import re
from gevent import queue
from wal_e import exception
from wal_e import log_help
from wal_e import storage
logger = log_help.WalELogger(__name__)
generic_weird_key_hint_message = ('This means an unexpected key was found in '
'a WAL-E prefix. It can be harmless, or '
'the result a bug or misconfiguration.')
class _Deleter(object):
def __init__(self):
# Allow enqueuing of several API calls worth of work, which
# right now allow 1000 key deletions per job.
self.PAGINATION_MAX = 1000
self._q = queue.JoinableQueue(self.PAGINATION_MAX * 10)
self._worker = gevent.spawn(self._work)
self._parent_greenlet = gevent.getcurrent()
self.closing = False
def close(self):
self.closing = True
self._q.join()
self._worker.kill(block=True)
def delete(self, key):
if self.closing:
raise exception.UserCritical(
msg='attempt to delete while closing Deleter detected',
hint='This should be reported as a bug.')
self._q.put(key)
def _work(self):
try:
while True:
# If _cut_batch has an error, it is responsible for
# invoking task_done() the appropriate number of
# times.
page = self._cut_batch()
# If nothing was enqueued, yield and wait around a bit
# before looking for work again.
if not page:
gevent.sleep(1)
continue
# However, in event of success, the jobs are not
# considered done until the _delete_batch returns
# successfully. In event an exception is raised, it
# will be propagated to the Greenlet that created the
# Deleter, but the tasks are marked done nonetheless.
try:
self._delete_batch(page)
finally:
for i in xrange(len(page)):
self._q.task_done()
except KeyboardInterrupt, e:
# Absorb-and-forward the exception instead of using
# gevent's link_exception operator, because in gevent <
# 1.0 there is no way to turn off the alarming stack
# traces emitted when an exception propagates to the top
# of a greenlet, linked or no.
#
# Normally, gevent.kill is ill-advised because it results
# in asynchronous exceptions being raised in that
# greenlet, but given that KeyboardInterrupt is nominally
# asynchronously raised by receiving SIGINT to begin with,
# there nothing obvious being lost from using kill() in
# this case.
gevent.kill(self._parent_greenlet, e)
def _cut_batch(self):
# Attempt to obtain as much work as possible, up to the
# maximum able to be processed by S3 at one time,
# PAGINATION_MAX.
page = []
try:
for i in xrange(self.PAGINATION_MAX):
page.append(self._q.get_nowait())
except queue.Empty:
pass
except:
# In event everything goes sideways while dequeuing,
# carefully un-lock the queue.
for i in xrange(len(page)):
self._q.task_done()
raise
return page
class _BackupList(object):
def __init__(self, conn, layout, detail):
self.conn = conn
self.layout = layout
self.detail = detail
def find_all(self, query):
"""A procedure to assist in finding or detailing specific backups
Currently supports:
* a backup name (base_number_number)
* the psuedo-name LATEST, which finds the backup with the most
recent modification date
"""
match = re.match(storage.BASE_BACKUP_REGEXP, query)
if match is not None:
for backup in iter(self):
if backup.name == query:
yield backup
elif query == 'LATEST':
all_backups = list(iter(self))
if not all_backups:
return
assert len(all_backups) > 0
all_backups.sort(key=lambda bi: bi.last_modified)
yield all_backups[-1]
else:
raise exception.UserException(
msg='invalid backup query submitted',
detail='The submitted query operator was "{0}."'
.format(query))
def _backup_list(self):
raise NotImplementedError()
def __iter__(self):
# Try to identify the sentinel file. This is sort of a drag, the
# storage format should be changed to put them in their own leaf
# directory.
#
# TODO: change storage format
sentinel_depth = self.layout.basebackups().count('/')
matcher = re.compile(storage.COMPLETE_BASE_BACKUP_REGEXP).match
for key in self._backup_list(self.layout.basebackups()):
key_name = self.layout.key_name(key)
# Use key depth vs. base and regexp matching to find
# sentinel files.
key_depth = key_name.count('/')
if key_depth == sentinel_depth:
backup_sentinel_name = key_name.rsplit('/', 1)[-1]
match = matcher(backup_sentinel_name)
if match:
# TODO: It's necessary to use the name of the file to
# get the beginning wal segment information, whereas
# the ending information is encoded into the file
# itself. Perhaps later on it should all be encoded
# into the name when the sentinel files are broken out
# into their own directory, so that S3 listing gets
# all commonly useful information without doing a
# request-per.
groups = match.groupdict()
info = storage.get_backup_info(
self.layout,
name='base_{filename}_{offset}'.format(**groups),
last_modified=self.layout.key_last_modified(key),
wal_segment_backup_start=groups['filename'],
wal_segment_offset_backup_start=groups['offset'])
if self.detail:
try:
# This costs one web request
info.load_detail(self.conn)
except gevent.Timeout:
pass
yield info
class _DeleteFromContext(object):
def __init__(self, conn, layout, dry_run):
self.conn = conn
self.dry_run = dry_run
self.layout = layout
self.deleter = None # Must be set by subclass
assert self.dry_run in (True, False)
def _container_name(self, key):
pass
def _maybe_delete_key(self, key, type_of_thing):
key_name = self.layout.key_name(key)
url = '{scheme}://{bucket}/{name}'.format(
scheme=self.layout.scheme, bucket=self._container_name(key),
name=key_name)
log_message = dict(
msg='deleting {0}'.format(type_of_thing),
detail='The key being deleted is {url}.'.format(url=url))
if self.dry_run is False:
logger.info(**log_message)
self.deleter.delete(key)
elif self.dry_run is True:
log_message['hint'] = ('This is only a dry run -- no actual data '
'is being deleted')
logger.info(**log_message)
else:
assert False
def _groupdict_to_segment_number(self, d):
return storage.base.SegmentNumber(log=d['log'], seg=d['seg'])
def _delete_if_before(self, delete_horizon_segment_number,
scanned_segment_number, key, type_of_thing):
if scanned_segment_number.as_an_integer < \
delete_horizon_segment_number.as_an_integer:
self._maybe_delete_key(key, type_of_thing)
def _delete_base_backups_before(self, segment_info):
base_backup_sentinel_depth = self.layout.basebackups().count('/') + 1
version_depth = base_backup_sentinel_depth + 1
volume_backup_depth = version_depth + 1
# The base-backup sweep, deleting bulk data and metadata, but
# not any wal files.
for key in self._backup_list(prefix=self.layout.basebackups()):
key_name = self.layout.key_name(key)
url = '{scheme}://{bucket}/{name}'.format(
scheme=self.layout.scheme, bucket=self._container_name(key),
name=key_name)
key_parts = key_name.split('/')
key_depth = len(key_parts)
if key_depth not in (base_backup_sentinel_depth, version_depth,
volume_backup_depth):
# Check depth (in terms of number of
# slashes/delimiters in the key); if there exists a
# key with an unexpected depth relative to the
# context, complain a little bit and move on.
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=(
'The unexpected key is "{0}", and it appears to be '
'at an unexpected depth.'.format(url)),
hint=generic_weird_key_hint_message)
elif key_depth == base_backup_sentinel_depth:
# This is a key at the base-backup-sentinel file
# depth, so check to see if it matches the known form.
match = re.match(storage.COMPLETE_BASE_BACKUP_REGEXP,
key_parts[-1])
if match is None:
# This key was at the level for a base backup
# sentinel, but doesn't match the known pattern.
# Complain about this, and move on.
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=('The unexpected key is "{0}", and it appears '
'not to match the base-backup sentinel '
'pattern.'.format(url)),
hint=generic_weird_key_hint_message)
else:
# This branch actually might delete some data: the
# key is at the right level, and matches the right
# form. The last check is to make sure it's in
# the range of things to delete, and if that is
# the case, attempt deletion.
assert match is not None
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a base backup sentinel file')
elif key_depth == version_depth:
match = re.match(
storage.BASE_BACKUP_REGEXP, key_parts[-2])
if match is None or key_parts[-1] != 'extended_version.txt':
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=('The unexpected key is "{0}", and it appears '
'not to match the extended-version backup '
'pattern.'.format(url)),
hint=generic_weird_key_hint_message)
else:
assert match is not None
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a extended version metadata file')
elif key_depth == volume_backup_depth:
# This has the depth of a base-backup volume, so try
# to match the expected pattern and delete it if the
# pattern matches and the base backup part qualifies
# properly.
assert len(key_parts) >= 2, ('must be a logical result of the '
's3 storage layout')
match = re.match(
storage.BASE_BACKUP_REGEXP, key_parts[-3])
if match is None or key_parts[-2] != 'tar_partitions':
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=(
'The unexpected key is "{0}", and it appears '
'not to match the base-backup partition pattern.'
.format(url)),
hint=generic_weird_key_hint_message)
else:
assert match is not None
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a base backup volume')
else:
assert False
def _delete_wals_before(self, segment_info):
"""
Delete all WAL files before segment_info.
Doesn't delete any base-backup data.
"""
wal_key_depth = self.layout.wal_directory().count('/') + 1
for key in self._backup_list(prefix=self.layout.wal_directory()):
key_name = self.layout.key_name(key)
bucket = self._container_name(key)
url = '{scm}://{bucket}/{name}'.format(scm=self.layout.scheme,
bucket=bucket,
name=key_name)
key_parts = key_name.split('/')
key_depth = len(key_parts)
if key_depth != wal_key_depth:
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=(
'The unexpected key is "{0}", and it appears to be '
'at an unexpected depth.'.format(url)),
hint=generic_weird_key_hint_message)
elif key_depth == wal_key_depth:
segment_match = (re.match(storage.SEGMENT_REGEXP + r'\.lzo',
key_parts[-1]))
label_match = (re.match(storage.SEGMENT_REGEXP +
r'\.[A-F0-9]{8,8}.backup.lzo',
key_parts[-1]))
history_match = re.match(r'[A-F0-9]{8,8}\.history',
key_parts[-1])
all_matches = [segment_match, label_match, history_match]
non_matches = len(list(m for m in all_matches if m is None))
# These patterns are intended to be mutually
# exclusive, so either one should match or none should
# match.
assert non_matches in (len(all_matches) - 1, len(all_matches))
if non_matches == len(all_matches):
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=('The unexpected key is "{0}", and it appears '
'not to match the WAL file naming pattern.'
.format(url)),
hint=generic_weird_key_hint_message)
elif segment_match is not None:
scanned_sn = self._groupdict_to_segment_number(
segment_match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a wal file')
elif label_match is not None:
scanned_sn = self._groupdict_to_segment_number(
label_match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a backup history file')
elif history_match is not None:
# History (timeline) files do not have any actual
# WAL position information, so they are never
# deleted.
pass
else:
assert False
else:
assert False
def delete_everything(self):
"""Delete everything in a storage layout
Named provocatively for a reason: can (and in fact intended
to) cause irrecoverable loss of data. This can be used to:
* Completely obliterate data from old WAL-E versions
(i.e. layout.VERSION is an obsolete version)
* Completely obliterate all backups (from a decommissioned
database, for example)
"""
for k in self._backup_list(prefix=self.layout.basebackups()):
self._maybe_delete_key(k, 'part of a base backup')
for k in self._backup_list(prefix=self.layout.wal_directory()):
self._maybe_delete_key(k, 'part of wal logs')
if self.deleter:
self.deleter.close()
def delete_before(self, segment_info):
"""
Delete all base backups and WAL before a given segment
This is the most commonly-used deletion operator; to delete
old backups and WAL.
"""
# This will delete all base backup data before segment_info.
self._delete_base_backups_before(segment_info)
# This will delete all WAL segments before segment_info.
self._delete_wals_before(segment_info)
if self.deleter:
self.deleter.close()
def delete_with_retention(self, num_to_retain):
"""
Retain the num_to_retain most recent backups and delete all data
before them.
"""
base_backup_sentinel_depth = self.layout.basebackups().count('/') + 1
# Sweep over base backup files, collecting sentinel files from
# completed backups.
completed_basebackups = []
for key in self._backup_list(prefix=self.layout.basebackups()):
key_name = self.layout.key_name(key)
key_parts = key_name.split('/')
key_depth = len(key_parts)
url = '{scheme}://{bucket}/{name}'.format(
scheme=self.layout.scheme,
bucket=self._container_name(key),
name=key_name)
if key_depth == base_backup_sentinel_depth:
# This is a key at the depth of a base-backup-sentinel file.
# Check to see if it matches the known form.
match = re.match(storage.COMPLETE_BASE_BACKUP_REGEXP,
key_parts[-1])
# If this isn't a base-backup-sentinel file, just ignore it.
if match is None:
continue
# This key corresponds to a base-backup-sentinel file and
# represents a completed backup. Grab its segment number.
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
completed_basebackups.append(dict(
scanned_sn=scanned_sn,
url=url))
# Sort the base backups from newest to oldest.
basebackups = sorted(
completed_basebackups,
key=lambda backup: backup['scanned_sn'].as_an_integer,
reverse=True)
last_retained = None
if len(basebackups) <= num_to_retain:
detail = None
if len(basebackups) == 0:
msg = 'Not deleting any data.'
detail = 'No existing base backups.'
elif len(basebackups) == 1:
last_retained = basebackups[-1]
msg = 'Retaining existing base backup.'
else:
last_retained = basebackups[-1]
msg = "Retaining all %d base backups." % len(basebackups)
else:
last_retained = basebackups[num_to_retain - 1]
num_deleting = len(basebackups) - num_to_retain
msg = "Deleting %d oldest base backups." % num_deleting
detail = "Found %d total base backups." % len(basebackups)
log_message = dict(msg=msg)
if detail is not None:
log_message['detail'] = detail
if last_retained is not None:
log_message['hint'] = \
"Deleting keys older than %s." % last_retained['url']
logger.info(**log_message)
# This will delete all base backup and WAL data before
# last_retained['scanned_sn'].
if last_retained is not None:
self._delete_base_backups_before(last_retained['scanned_sn'])
self._delete_wals_before(last_retained['scanned_sn'])
if self.deleter:
self.deleter.close()
| bsd-3-clause |
jyotsna1820/django | django/core/management/commands/dumpdata.py | 53 | 11224 | import warnings
from collections import OrderedDict
from optparse import make_option
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from django.db import router, DEFAULT_DB_ALIAS
from django.utils.deprecation import RemovedInDjango19Warning
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.'),
make_option('-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).'),
make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available (deprecated: use --natural-foreign instead).'),
make_option('--natural-foreign', action='store_true', dest='use_natural_foreign_keys', default=False,
help='Use natural foreign keys if they are available.'),
make_option('--natural-primary', action='store_true', dest='use_natural_primary_keys', default=False,
help='Use natural primary keys if they are available.'),
make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager."),
make_option('--pks', dest='primary_keys',
help="Only dump objects with given primary keys. "
"Accepts a comma separated list of keys. "
"This option will only work when you specify one model."),
)
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
args = '[app_label app_label.ModelName ...]'
def handle(self, *app_labels, **options):
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
show_traceback = options.get('traceback')
use_natural_keys = options.get('use_natural_keys')
if use_natural_keys:
warnings.warn("``--natural`` is deprecated; use ``--natural-foreign`` instead.",
RemovedInDjango19Warning)
use_natural_foreign_keys = options.get('use_natural_foreign_keys') or use_natural_keys
use_natural_primary_keys = options.get('use_natural_primary_keys')
use_base_manager = options.get('use_base_manager')
pks = options.get('primary_keys')
if pks:
primary_keys = pks.split(',')
else:
primary_keys = []
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
try:
model = apps.get_model(exclude)
except LookupError:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model)
else:
try:
app_config = apps.get_app_config(exclude)
except LookupError:
raise CommandError('Unknown app in excludes: %s' % exclude)
excluded_apps.add(app_config)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict((app_config, None)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError:
raise CommandError("Unknown application: %s" % app_label)
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError:
raise CommandError("Unknown application: %s" % app_label)
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Collate the objects to be serialized.
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_migrate(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
for obj in queryset.iterator():
yield obj
try:
self.stdout.ending = None
serializers.serialize(format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=self.stdout)
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
vikitripathi/MB-MessApp-API | messApp/env/lib/python2.7/site-packages/pip/basecommand.py | 392 | 6578 | """Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
| apache-2.0 |
adamchainz/mysqlclient-python | MySQLdb/connections.py | 1 | 13156 | """
This module implements connections for MySQLdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from MySQLdb import cursors
from MySQLdb.compat import unicode, PY2
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
import _mysql
import re
if not PY2:
# See http://bugs.python.org/issue24870
_surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)]
def _fast_surroundescape(s):
return s.decode('latin1').translate(_surrogateescape_table)
def defaulterrorhandler(connection, cursor, errorclass, errorvalue):
"""
If cursor is not None, (errorclass, errorvalue) is appended to
cursor.messages; otherwise it is appended to
connection.messages. Then errorclass is raised with errorvalue as
the value.
You can override this with your own error handler by assigning it
to the instance.
"""
error = errorclass, errorvalue
if cursor:
cursor.messages.append(error)
else:
connection.messages.append(error)
del cursor
del connection
if isinstance(errorvalue, BaseException):
raise errorvalue
if errorclass is not None:
raise errorclass(errorvalue)
else:
raise Exception(errorvalue)
re_numeric_part = re.compile(r"^(\d+)")
def numeric_part(s):
"""Returns the leading numeric part of a string.
>>> numeric_part("20-alpha")
20
>>> numeric_part("foo")
>>> numeric_part("16b")
16
"""
m = re_numeric_part.match(s)
if m:
return int(m.group(1))
return None
class Connection(_mysql.connection):
"""MySQL Database Connection Object"""
default_cursor = cursors.Cursor
waiter = None
def __init__(self, *args, **kwargs):
"""
Create a connection to the database. It is strongly recommended
that you only use keyword parameters. Consult the MySQL C API
documentation for more information.
host
string, host to connect
user
string, user to connect as
passwd
string, password to use
db
string, database to use
port
integer, TCP/IP port to connect to
unix_socket
string, location of unix_socket to use
conv
conversion dictionary, see MySQLdb.converters
connect_timeout
number of seconds to wait before the connection attempt
fails.
compress
if set, compression is enabled
named_pipe
if set, a named pipe is used to connect (Windows only)
init_command
command which is run once the connection is created
read_default_file
file from which default client values are read
read_default_group
configuration group to use from the default file
cursorclass
class object, used to create cursors (keyword only)
use_unicode
If True, text-like columns are returned as unicode objects
using the connection's character set. Otherwise, text-like
columns are returned as strings. columns are returned as
normal strings. Unicode objects will always be encoded to
the connection's character set regardless of this setting.
Default to False on Python 2 and True on Python 3.
charset
If supplied, the connection character set will be changed
to this character set (MySQL-4.1 and newer). This implies
use_unicode=True.
sql_mode
If supplied, the session SQL mode will be changed to this
setting (MySQL-4.1 and newer). For more details and legal
values, see the MySQL documentation.
client_flag
integer, flags to use or 0
(see MySQL docs or constants/CLIENTS.py)
ssl
dictionary or mapping, contains SSL connection parameters;
see the MySQL documentation for more details
(mysql_ssl_set()). If this is set, and the client does not
support SSL, NotSupportedError will be raised.
local_infile
integer, non-zero enables LOAD LOCAL INFILE; zero disables
autocommit
If False (default), autocommit is disabled.
If True, autocommit is enabled.
If None, autocommit isn't set and server default is used.
waiter
Callable accepts fd as an argument. It is called after sending
query and before reading response.
This is useful when using with greenlet and async io.
There are a number of undocumented, non-standard methods. See the
documentation for the MySQL C API for some hints on what they do.
"""
from MySQLdb.constants import CLIENT, FIELD_TYPE
from MySQLdb.converters import conversions
from weakref import proxy
kwargs2 = kwargs.copy()
if 'conv' in kwargs:
conv = kwargs['conv']
else:
conv = conversions
conv2 = {}
for k, v in conv.items():
if isinstance(k, int) and isinstance(v, list):
conv2[k] = v[:]
else:
conv2[k] = v
kwargs2['conv'] = conv2
cursorclass = kwargs2.pop('cursorclass', self.default_cursor)
charset = kwargs2.pop('charset', '')
if charset or not PY2:
use_unicode = True
else:
use_unicode = False
use_unicode = kwargs2.pop('use_unicode', use_unicode)
sql_mode = kwargs2.pop('sql_mode', '')
client_flag = kwargs.get('client_flag', 0)
client_version = tuple([ numeric_part(n) for n in _mysql.get_client_info().split('.')[:2] ])
if client_version >= (4, 1):
client_flag |= CLIENT.MULTI_STATEMENTS
if client_version >= (5, 0):
client_flag |= CLIENT.MULTI_RESULTS
kwargs2['client_flag'] = client_flag
# PEP-249 requires autocommit to be initially off
autocommit = kwargs2.pop('autocommit', False)
self.waiter = kwargs2.pop('waiter', None)
super(Connection, self).__init__(*args, **kwargs2)
self.cursorclass = cursorclass
self.encoders = dict([ (k, v) for k, v in conv.items()
if type(k) is not int ])
self._server_version = tuple([ numeric_part(n) for n in self.get_server_info().split('.')[:2] ])
db = proxy(self)
def _get_string_literal():
# Note: string_literal() is called for bytes object on Python 3.
def string_literal(obj, dummy=None):
return db.string_literal(obj)
return string_literal
def _get_unicode_literal():
if PY2:
# unicode_literal is called for only unicode object.
def unicode_literal(u, dummy=None):
return db.literal(u.encode(unicode_literal.charset))
else:
# unicode_literal() is called for arbitrary object.
def unicode_literal(u, dummy=None):
return db.literal(str(u).encode(unicode_literal.charset))
return unicode_literal
def _get_string_decoder():
def string_decoder(s):
return s.decode(string_decoder.charset)
return string_decoder
string_literal = _get_string_literal()
self.unicode_literal = unicode_literal = _get_unicode_literal()
self.string_decoder = string_decoder = _get_string_decoder()
if not charset:
charset = self.character_set_name()
self.set_character_set(charset)
if sql_mode:
self.set_sql_mode(sql_mode)
if use_unicode:
self.converter[FIELD_TYPE.STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VAR_STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VARCHAR].append((None, string_decoder))
self.converter[FIELD_TYPE.BLOB].append((None, string_decoder))
self.encoders[bytes] = string_literal
self.encoders[unicode] = unicode_literal
self._transactional = self.server_capabilities & CLIENT.TRANSACTIONS
if self._transactional:
if autocommit is not None:
self.autocommit(autocommit)
self.messages = []
def autocommit(self, on):
on = bool(on)
if self.get_autocommit() != on:
_mysql.connection.autocommit(self, on)
def cursor(self, cursorclass=None):
"""
Create a cursor on which queries may be performed. The
optional cursorclass parameter is used to create the
Cursor. By default, self.cursorclass=cursors.Cursor is
used.
"""
return (cursorclass or self.cursorclass)(self)
def query(self, query):
if self.waiter is not None:
self.send_query(query)
self.waiter(self.fileno())
self.read_query_result()
else:
_mysql.connection.query(self, query)
def __enter__(self):
if self.get_autocommit():
self.query("BEGIN")
return self.cursor()
def __exit__(self, exc, value, tb):
if exc:
self.rollback()
else:
self.commit()
def literal(self, o):
"""If o is a single object, returns an SQL literal as a string.
If o is a non-string sequence, the items of the sequence are
converted and returned as a sequence.
Non-standard. For internal use; do not use this in your
applications.
"""
s = self.escape(o, self.encoders)
# Python 3(~3.4) doesn't support % operation for bytes object.
# We should decode it before using %.
# Decoding with ascii and surrogateescape allows convert arbitrary
# bytes to unicode and back again.
# See http://python.org/dev/peps/pep-0383/
if not PY2 and isinstance(s, (bytes, bytearray)):
return _fast_surroundescape(s)
return s
def begin(self):
"""Explicitly begin a connection. Non-standard.
DEPRECATED: Will be removed in 1.3.
Use an SQL BEGIN statement instead."""
from warnings import warn
warn("begin() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
self.query("BEGIN")
if not hasattr(_mysql.connection, 'warning_count'):
def warning_count(self):
"""Return the number of warnings generated from the
last query. This is derived from the info() method."""
info = self.info()
if info:
return int(info.split()[-1])
else:
return 0
def set_character_set(self, charset):
"""Set the connection character set to charset. The character
set can only be changed in MySQL-4.1 and newer. If you try
to change the character set from the current value in an
older version, NotSupportedError will be raised."""
if charset == "utf8mb4":
py_charset = "utf8"
else:
py_charset = charset
if self.character_set_name() != charset:
try:
super(Connection, self).set_character_set(charset)
except AttributeError:
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set charset")
self.query('SET NAMES %s' % charset)
self.store_result()
self.string_decoder.charset = py_charset
self.unicode_literal.charset = py_charset
def set_sql_mode(self, sql_mode):
"""Set the connection sql_mode. See MySQL documentation for
legal values."""
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set sql_mode")
self.query("SET SESSION sql_mode='%s'" % sql_mode)
self.store_result()
def show_warnings(self):
"""Return detailed information about warnings as a
sequence of tuples of (Level, Code, Message). This
is only supported in MySQL-4.1 and up. If your server
is an earlier version, an empty sequence is returned."""
if self._server_version < (4,1): return ()
self.query("SHOW WARNINGS")
r = self.store_result()
warnings = r.fetch_row(0)
return warnings
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
errorhandler = defaulterrorhandler
| gpl-2.0 |
nmospmos/mbed | workspace_tools/host_tests/host_tests_plugins/module_reset_mps2.py | 30 | 2470 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from host_test_plugins import HostTestPluginBase
# Note: This plugin is not fully functional, needs improvements
class HostTestPluginResetMethod_MPS2(HostTestPluginBase):
""" Plugin used to reset ARM_MPS2 platform
Supports:
reboot.txt - startup from standby state, reboots when in run mode.
shutdown.txt - shutdown from run mode.
reset.txt - reset FPGA during run mode.
"""
def touch_file(self, path):
""" Touch file and set timestamp to items
"""
with open(path, 'a'):
os.utime(path, None)
# Plugin interface
name = 'HostTestPluginResetMethod_MPS2'
type = 'ResetMethod'
capabilities = ['reboot.txt', 'shutdown.txt', 'reset.txt']
required_parameters = ['disk']
def setup(self, *args, **kwargs):
""" Prepare / configure plugin to work.
This method can receive plugin specific parameters by kwargs and
ignore other parameters which may affect other plugins.
"""
return True
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capabilitity, *args, **kwargs) is True:
if capabilitity == 'reboot.txt':
# TODO: Implement touch file for reboot
pass
elif capabilitity == 'shutdown.txt':
# TODO: Implement touch file for shutdown
pass
elif capabilitity == 'reset.txt':
# TODO: Implement touch file for reset
pass
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginResetMethod_MPS2()
| apache-2.0 |
ryangallen/django | django/core/files/uploadedfile.py | 471 | 4334 | """
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files import temp as tempfile
from django.core.files.base import File
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
| bsd-3-clause |
fhirschmann/penchy | penchy/node.py | 1 | 7834 | """
This module contains classes which deal with nodes
(that is, connecting via ssh, uploading the job, starting it...).
.. moduleauthor:: Fabian Hirschmann <fabian@hirschmann.email>
:copyright: PenchY Developers 2011-2012, see AUTHORS
:license: MIT License, see LICENSE
"""
import os
import logging
import atexit
from contextlib import contextmanager
import paramiko
log = logging.getLogger(__name__)
class NodeError(Exception):
"""
Raised when errors occur while dealing
with :class:`Node`.
"""
class Node(object): # pragma: no cover
"""
This class represents a node (a system on which the benchmark
will be run on) and provides basic ssh/sftp functionality.
"""
_LOGFILES = set(('penchy_bootstrap.log', 'penchy.log'))
def __init__(self, setting, compositions):
"""
Initialize the node.
:param setting: the node setting
:type setting: :class:`~penchy.jobs.job.NodeSetting`
:param compositions: the job module to execute
:type compositions: module
"""
self.setting = setting
self.log = logging.getLogger('.'.join([__name__,
self.setting.identifier]))
self.compositions = compositions
self.expected = list(compositions.job.compositions_for_node(
self.setting.identifier))
self.ssh = self._setup_ssh()
self.client_is_running = False
self.was_closed = False
self.sftp = None
def __eq__(self, other):
return isinstance(other, Node) and \
self.setting.identifier == other.setting.identifier
def __hash__(self):
return hash(self.setting.identifier)
def __str__(self):
return self.setting.identifier
def _setup_ssh(self):
"""
Initializes the SSH Connection.
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not self.setting.keyfile:
ssh.load_system_host_keys()
return ssh
@property
def received_all_results(self):
"""
Indicates wheter we are received all results. In other words,
this is False if we are still waiting for a job which is
running on a :class:`~penchy.jobs.job.SystemComposition`.
"""
return len(self.expected) == 0
def received(self, composition):
"""
Should be called when a :class:`~penchy.jobs.job.SystemComposition`
was received for this node.
:param composition: composition which was received
:type composition: :class:`~penchy.jobs.job.SystemComposition`
"""
if composition in self.expected:
self.expected.remove(composition)
def connect(self):
"""
Connect to node.
"""
self.log.debug('Connecting')
self.ssh.connect(self.setting.host, username=self.setting.username,
port=self.setting.ssh_port, password=self.setting.password,
key_filename=self.setting.keyfile)
self.sftp = self.ssh.open_sftp()
def disconnect(self):
"""
Disconnect from node.
"""
self.log.debug('Disconnecting')
self.sftp.close()
self.ssh.close()
@property
def connected(self):
"""
Indicates whether we are connected to this node.
"""
transport = self.ssh.get_transport()
if transport and transport.is_active():
return True
return False
@contextmanager
def connection_required(self):
"""
Contextmanager to make sure we are connected before
working on this node.
"""
if not self.connected:
try:
self.connect()
except paramiko.AuthenticationException as e:
self.log.error('Authentication Error: %s' % e)
self.expected = []
self.was_closed = True
raise
yield
if self.connected:
self.disconnect()
def close(self):
"""
Close node (disconnect, receive the logs and kill the
client if neccessary).
If we have not received all results from this node, the PenchY
client will be killed on this node.
"""
if self.was_closed:
return
with self.connection_required():
if not self.received_all_results:
self.kill()
self.expected = []
self.get_logs()
self.was_closed = True
def put(self, local, remote=None):
"""
Upload a file to the node
:param local: path to the local file
:type local: string
:param remote: path to the remote file
:type remote: string
"""
local = os.path.abspath(local)
if not remote:
remote = os.path.basename(local)
if not os.path.isabs(remote):
remote = os.path.join(self.setting.path, remote)
try:
self.sftp.mkdir(os.path.dirname(remote))
except IOError:
pass
self.log.debug('Copying file %s to %s' % (local, remote))
self.sftp.put(local, remote)
def get_logs(self):
"""
Read the client's log file and log it using the server's
logging capabilities.
"""
client_log = []
for filename in self.__class__._LOGFILES:
try:
filename = os.path.join(self.setting.path, filename)
logfile = self.sftp.open(filename)
client_log.append(logfile.read())
logfile.close()
except IOError:
log.error('Logfile %s could not be received from %s' % \
(filename, self))
log.info("""
%(separator)s Start log for %(identifier)s %(separator)s
%(client_log)s
%(separator)s End log for %(identifier)s %(separator)s
""" % {
'separator': '-' * 10,
'identifier': self.setting.identifier,
'client_log': ''.join(client_log)})
def execute(self, cmd):
"""
Executes command on node.
:param cmd: command to execute
:type cmd: string
"""
self.log.debug('Executing `%s`' % cmd)
return self.ssh.exec_command(cmd)
def execute_penchy(self, args):
"""
Executes penchy on node.
:param args: arguments to pass to penchy_bootstrap
:type args: string
"""
if self.client_is_running:
raise NodeError('You may not start penchy twice!')
self.log.info('Staring PenchY client')
self.execute('cd %s && python penchy_bootstrap %s' % (
self.setting.path, args))
self.client_is_running = True
atexit.register(self.close)
def kill_composition(self):
"""
Kill the current :class:`~penchy.jobs.job.SystemComposition`
on this node.
A pidfile named `penchy.pid` must exist on the node.
"""
pidfile_name = os.path.join(self.setting.path, 'penchy.pid')
pidfile = self.sftp.open(pidfile_name)
pid = pidfile.read()
pidfile.close()
self.execute('kill -SIGHUP ' + pid)
self.log.warn('Current composition was terminated')
def kill(self):
"""
Kill PenchY on node. This will kill all processes whose
parent id match penchy's id.
A pidfile named `penchy.pid` must exist on the node.
"""
pidfile_name = os.path.join(self.setting.path, 'penchy.pid')
pidfile = self.sftp.open(pidfile_name)
pid = pidfile.read()
pidfile.close()
self.execute('pkill -TERM -P ' + pid)
self.log.warn('PenchY was terminated')
| mit |
aaltinisik/OCBAltinkaya | addons/website_quote/controllers/main.py | 66 | 9178 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
from openerp.tools.translate import _
class sale_quote(http.Controller):
@http.route([
"/quote/<int:order_id>",
"/quote/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id, request.context)
now = time.strftime('%Y-%m-%d')
if token:
if token != order.access_token:
return request.website.render('website.404')
# Log only once a day
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by customer')
self.__message_post(body, order_id, type='comment')
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': days,
}
return request.website.render('website_quote.so_quotation', values)
@http.route(['/quote/accept'], type='json', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.state != 'sent':
return False
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'order_confirm', context=request.context)
message = _('Order signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return True
@http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.state != 'sent':
return werkzeug.utils.redirect("/quote/%s/%s?message=4" % (order_id, token))
request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token))
@http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
if token != order.access_token:
return request.website.render('website.404')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token))
def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=attachments
)
request.session.body = False
return True
@http.route(['/quote/update_line'], type='json', auth="public", website=True)
def update(self, line_id, remove=False, unlink=False, order_id=None, token=None, **post):
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id))
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ('draft','sent'):
return False
line_id=int(line_id)
if unlink:
request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context)
return False
number=(remove and -1 or 1)
order_line_obj = request.registry.get('sale.order.line')
order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0]
quantity = order_line_val['product_uom_qty'] + number
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context)
return [str(quantity), str(order.amount_total)]
@http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('website_quote.so_template', values)
@http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True)
def add(self, option_id, order_id, token, **post):
vals = {}
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ['draft', 'sent']:
return request.website.render('website.http_error', {'status_code': 'Forbidden', 'status_message': _('You cannot add options to a confirmed order.')})
option_obj = request.registry.get('sale.order.option')
option = option_obj.browse(request.cr, SUPERUSER_ID, option_id)
res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id,
False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id,
option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'),
False, order.fiscal_position.id, True, dict(request.context or {}, company_id=order.company_id.id))
vals = res.get('value', {})
if 'tax_id' in vals:
vals['tax_id'] = [(6, 0, vals['tax_id'])]
vals.update({
'price_unit': option.price_unit,
'website_description': option.website_description,
'name': option.name,
'order_id': order.id,
'product_id' : option.product_id.id,
'product_uos_qty': option.quantity,
'product_uos': option.uom_id.id,
'product_uom_qty': option.quantity,
'product_uom': option.uom_id.id,
'discount': option.discount,
})
line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context)
option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context)
return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
| agpl-3.0 |
wo3kie/pchGenerator | tests/test_dag.py | 1 | 10343 | import unittest
from dag import\
DfsNode,\
Dag,\
DagNode
#
# TestDfsNode
#
class TestDfsNode( unittest.TestCase ):
def test_init( self ):
node = DfsNode()
self.assertEqual( node.getColor(), DfsNode.White )
self.assertEqual( node.getPreVisit(), -1 )
self.assertEqual( node.getPostVisit(), -1 )
def test_preVisit( self ):
node = DfsNode()
node.setPreVisit( 11 )
self.assertEqual( node.getPreVisit(), 11 )
def test_postVisit( self ):
node = DfsNode()
node.setPostVisit( 11 )
self.assertEqual( node.getPostVisit(), 11 )
def test_pre_post( self ):
node = DfsNode()
node.setPreVisit( 11 )
node.setPostVisit( 22 )
self.assertEqual( node.getPreVisit(), 11 )
self.assertEqual( node.getPostVisit(), 22 )
def test_pre_post_raise( self ):
node = DfsNode()
node.setPreVisit( 11 )
self.assertRaises( Exception, node.setPostVisit, 10 )
self.assertRaises( Exception, node.setPostVisit, 11 )
#
# TestDagNode
#
class TestDagNode( unittest.TestCase ):
def test_init( self ):
node = DagNode( 11 )
self.assertEqual( node.getData(), 11 )
self.assertEqual( len( node.getChildren() ), 0 )
self.assertEqual( len( node.getParents() ), 0 )
def test_equal( self ):
a11 = DagNode( 11 )
b11 = DagNode( 11 )
a22 = DagNode( 22 )
b22 = DagNode( 22 )
self.assertTrue( a11 == b11 )
self.assertTrue( a22 == b22 )
self.assertFalse( a11 == a22 )
self.assertFalse( b11 == b22 )
def test_addChild( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node1.addChild( node2 )
self.assertTrue( node2 in node1.getChildren() )
node2.addChild( node1 )
self.assertTrue( node1 in node2.getChildren() )
def test_addParent( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node1.addParent( node2 )
self.assertTrue( node2 in node1.getParents() )
node2.addParent( node1 )
self.assertTrue( node1 in node2.getParents() )
def test_isRoot( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node3 = DagNode( 33 )
self.assertTrue( node1.isRoot() )
node1.addChild( node2 )
self.assertTrue( node1.isRoot() )
node3.addChild( node1 )
self.assertTrue( node1.isRoot() )
node1.addParent( node3 )
self.assertFalse( node1.isRoot() )
def test_isLeaf( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node3 = DagNode( 33 )
self.assertTrue( node1.isLeaf() )
self.assertTrue( node2.isLeaf() )
self.assertTrue( node3.isLeaf() )
node1.addChild( node2 )
self.assertFalse( node1.isLeaf() )
self.assertTrue( node2.isLeaf() )
node3.addChild( node1 )
self.assertFalse( node1.isLeaf() )
def test_setColorRecursively( self ):
node1 = DagNode( 11 )
node2 = DagNode( 22 )
node3 = DagNode( 33 )
node1.addChild( node2 );
node2.addChild( node3 );
self.assertEqual( node1.getColor(), DfsNode.White )
self.assertEqual( node2.getColor(), DfsNode.White )
self.assertEqual( node3.getColor(), DfsNode.White )
node1.setColorRecursively( DfsNode.Black )
self.assertEqual( node1.getColor(), DfsNode.Black )
self.assertEqual( node2.getColor(), DfsNode.Black )
self.assertEqual( node3.getColor(), DfsNode.Black )
#
# DAGTest
#
class DAGTest( unittest.TestCase ):
def test_add_raise( self ):
dag = Dag()
self.assertRaises( Exception, dag.add, -1, "filename" )
self.assertRaises( Exception, dag.add, 2, "filename" )
self.assertRaises( Exception, dag.add, 3, "filename" )
dag.add( 1, "filename" )
self.assertRaises( Exception, dag.add, 3, "filename" )
self.assertRaises( Exception, dag.add, 4, "filename" )
def test_add_1( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_3_1
# filename_2_2
# filename_3_2
# filename_1_2
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_3_1 = DagNode( "filename_3_1" )
filename_2_2 = DagNode( "filename_2_2" )
filename_3_2 = DagNode( "filename_3_2" )
filename_1_2 = DagNode( "filename_1_2" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_3_1" )
dag.add( 2, "filename_2_2" )
dag.add( 3, "filename_3_2" )
dag.add( 1, "filename_1_2" )
self.assertEqual( dag.getRoot().getChildren(), set( [ filename_1_1, filename_1_2 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1, filename_2_2 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_1_2" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_1_2" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set( [ filename_3_1, ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ dag.get( "filename_1_1" ) ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getChildren(), set( [ filename_3_2, ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getParents(), set( [ dag.get( "filename_1_1" ) ] ) )
self.assertEqual( dag.get( "filename_3_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_1" ).getParents(), set( [ dag.get( "filename_2_1" ) ] ) )
self.assertEqual( dag.get( "filename_3_2" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_2" ).getParents(), set( [ dag.get( "filename_2_2" ) ] ) )
def test_add_2( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_leaf
# filename_leaf
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_leaf = DagNode( "filename_leaf" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_leaf" )
dag.add( 1, "filename_leaf" )
self.assertEqual( dag.getRoot().getChildren(), set( [ filename_1_1, filename_leaf ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set( [ filename_leaf, ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_leaf" ).getChildren(), set() )
self.assertEqual( \
dag.get( "filename_leaf" ).getParents()\
, set( [ filename_2_1, dag.getRoot() ] )\
)
def test_add_3( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_3_1
# filename_2_2
# filename_3_1
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_3_1 = DagNode( "filename_3_1" )
filename_2_2 = DagNode( "filename_2_2" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_3_1" )
dag.add( 2, "filename_2_2" )
dag.add( 3, "filename_3_1" )
self.assertEqual( dag.getRoot().getChildren(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1, filename_2_2 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set( [ filename_3_1, ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getChildren(), set( [ filename_3_1, ] ) )
self.assertEqual( dag.get( "filename_2_2" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_3_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_1" ).getParents(), set( [ filename_2_1, filename_2_2 ] ) )
def test_cycle( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_1_1
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_1_1" )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
def test_one_node_twice( self ):
dag = Dag()
# filename_1_1
# filename_2_1
# filename_1_1
# filename_3_1
filename_1_1 = DagNode( "filename_1_1" )
filename_2_1 = DagNode( "filename_2_1" )
filename_3_1 = DagNode( "filename_3_1" )
dag.add( 1, "filename_1_1" )
dag.add( 2, "filename_2_1" )
dag.add( 3, "filename_1_1" )
dag.add( 4, "filename_3_1" )
self.assertEqual( dag.get( "filename_1_1" ).getChildren(), set( [ filename_2_1, filename_3_1 ] ) )
self.assertEqual( dag.get( "filename_1_1" ).getParents(), set( [ dag.getRoot() ] ) )
self.assertEqual( dag.get( "filename_2_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_2_1" ).getParents(), set( [ filename_1_1 ] ) )
self.assertEqual( dag.get( "filename_3_1" ).getChildren(), set() )
self.assertEqual( dag.get( "filename_3_1" ).getParents(), set( [ filename_1_1 ] ) )
#
# main
#
if __name__ == "__main__":
unittest.main()
| mit |
liorvh/golismero | tools/theHarvester/discovery/bingsearchapi.py | 8 | 1060 | from bingsearch import *
class search_bing_api(search_bing):
def __init__(self,word,options):
self.apiserver="api.search.live.net"
self.bing_api_key = ""
search_bing.__init__(self, word, options)
def do_search(self):
h = httplib.HTTP(self.apiserver)
h.putrequest('GET', "/xml.aspx?Appid="+ self.bing_api_key + "&query=%40" + self.word +"&sources=web&web.count=40&web.offset="+str(self.counter))
h.putheader('Host', "api.search.live.net")
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, response_headers = h.getreply()
encoding=response_headers['content-type'].split('charset=')[-1]
self.total_results+=unicode(h.getfile().read(), encoding)
def process(self):
print "[-] Searching Bing using API Key:"
if self.bing_api_key=="":
print "Cannot perform a Bing API Search without a Key in discovery/bingsearch.py"
return
while (self.counter < self.limit):
self.do_search()
time.sleep(0.3)
self.counter+=self.quantity
print "\r\tProcessed "+ str(self.counter) + " results..." | gpl-2.0 |
laperry1/android_external_chromium_org | build/android/buildbot/bb_annotations.py | 172 | 1057 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions to print buildbot messages."""
def PrintLink(label, url):
"""Adds a link with name |label| linking to |url| to current buildbot step.
Args:
label: A string with the name of the label.
url: A string of the URL.
"""
print '@@@STEP_LINK@%s@%s@@@' % (label, url)
def PrintMsg(msg):
"""Appends |msg| to the current buildbot step text.
Args:
msg: String to be appended.
"""
print '@@@STEP_TEXT@%s@@@' % msg
def PrintSummaryText(msg):
"""Appends |msg| to main build summary. Visible from waterfall.
Args:
msg: String to be appended.
"""
print '@@@STEP_SUMMARY_TEXT@%s@@@' % msg
def PrintError():
"""Marks the current step as failed."""
print '@@@STEP_FAILURE@@@'
def PrintWarning():
"""Marks the current step with a warning."""
print '@@@STEP_WARNINGS@@@'
def PrintNamedStep(step):
print '@@@BUILD_STEP %s@@@' % step
| bsd-3-clause |
damonkohler/sl4a | python/src/Lib/compiler/visitor.py | 267 | 3896 | from compiler import ast
# XXX should probably rename ASTVisitor to ASTWalker
# XXX can it be made even more generic?
class ASTVisitor:
"""Performs a depth-first walk of the AST
The ASTVisitor will walk the AST, performing either a preorder or
postorder traversal depending on which method is called.
methods:
preorder(tree, visitor)
postorder(tree, visitor)
tree: an instance of ast.Node
visitor: an instance with visitXXX methods
The ASTVisitor is responsible for walking over the tree in the
correct order. For each node, it checks the visitor argument for
a method named 'visitNodeType' where NodeType is the name of the
node's class, e.g. Class. If the method exists, it is called
with the node as its sole argument.
The visitor method for a particular node type can control how
child nodes are visited during a preorder walk. (It can't control
the order during a postorder walk, because it is called _after_
the walk has occurred.) The ASTVisitor modifies the visitor
argument by adding a visit method to the visitor; this method can
be used to visit a child node of arbitrary type.
"""
VERBOSE = 0
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
for child in node.getChildNodes():
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass, None)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
## if self.VERBOSE > 0:
## className = klass.__name__
## if self.VERBOSE == 1:
## if meth == 0:
## print "dispatch", className
## else:
## print "dispatch", className, (meth and meth.__name__ or '')
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
class ExampleASTVisitor(ASTVisitor):
"""Prints examples of the nodes that aren't visited
This visitor-driver is only useful for development, when it's
helpful to develop a visitor incrementally, and get feedback on what
you still have to do.
"""
examples = {}
def dispatch(self, node, *args):
self.node = node
meth = self._cache.get(node.__class__, None)
className = node.__class__.__name__
if meth is None:
meth = getattr(self.visitor, 'visit' + className, 0)
self._cache[node.__class__] = meth
if self.VERBOSE > 1:
print "dispatch", className, (meth and meth.__name__ or '')
if meth:
meth(node, *args)
elif self.VERBOSE > 0:
klass = node.__class__
if klass not in self.examples:
self.examples[klass] = klass
print
print self.visitor
print klass
for attr in dir(node):
if attr[0] != '_':
print "\t", "%-12.12s" % attr, getattr(node, attr)
print
return self.default(node, *args)
# XXX this is an API change
_walker = ASTVisitor
def walk(tree, visitor, walker=None, verbose=None):
if walker is None:
walker = _walker()
if verbose is not None:
walker.VERBOSE = verbose
walker.preorder(tree, visitor)
return walker.visitor
def dumpNode(node):
print node.__class__
for attr in dir(node):
if attr[0] != '_':
print "\t", "%-10.10s" % attr, getattr(node, attr)
| apache-2.0 |
FescueFungiShare/hydroshare | hs_app_netCDF/migrations/0001_initial.py | 1 | 7033 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import hs_core.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('pages', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('hs_core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NetcdfMetaData',
fields=[
('coremetadata_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='hs_core.CoreMetaData')),
],
options={
},
bases=('hs_core.coremetadata',),
),
migrations.CreateModel(
name='NetcdfResource',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')),
('comments_count', models.IntegerField(default=0, editable=False)),
('rating_count', models.IntegerField(default=0, editable=False)),
('rating_sum', models.IntegerField(default=0, editable=False)),
('rating_average', models.FloatField(default=0, editable=False)),
('public', models.BooleanField(default=True, help_text=b'If this is true, the resource is viewable and downloadable by anyone')),
('frozen', models.BooleanField(default=False, help_text=b'If this is true, the resource should not be modified')),
('do_not_distribute', models.BooleanField(default=False, help_text=b'If this is true, the resource owner has to designate viewers')),
('discoverable', models.BooleanField(default=True, help_text=b'If this is true, it will turn up in searches.')),
('published_and_frozen', models.BooleanField(default=False, help_text=b'Once this is true, no changes can be made to the resource')),
('content', models.TextField()),
('short_id', models.CharField(default=hs_core.models.short_id, max_length=32, db_index=True)),
('doi', models.CharField(help_text=b"Permanent identifier. Never changes once it's been set.", max_length=1024, null=True, db_index=True, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
('creator', models.ForeignKey(related_name='creator_of_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL, help_text=b'This is the person who first uploaded the resource')),
('edit_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can edit the resource', related_name='group_editable_hs_app_netcdf_netcdfresource', null=True, to='auth.Group', blank=True)),
('edit_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can edit the resource', related_name='user_editable_hs_app_netcdf_netcdfresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('last_changed_by', models.ForeignKey(related_name='last_changed_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL, help_text=b'The person who last changed the resource', null=True)),
('owners', models.ManyToManyField(help_text=b'The person who has total ownership of the resource', related_name='owns_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(related_name='netcdfresources', verbose_name='Author', to=settings.AUTH_USER_MODEL)),
('view_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can view the resource', related_name='group_viewable_hs_app_netcdf_netcdfresource', null=True, to='auth.Group', blank=True)),
('view_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can view the resource', related_name='user_viewable_hs_app_netcdf_netcdfresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Multidimensional (NetCDF)',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='OriginalCoverage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('_value', models.CharField(max_length=1024, null=True)),
('projection_string_type', models.CharField(max_length=20, null=True, choices=[(b'', b'---------'), (b'EPSG Code', b'EPSG Code'), (b'OGC WKT Projection', b'OGC WKT Projection'), (b'Proj4 String', b'Proj4 String')])),
('projection_string_text', models.TextField(null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_app_netcdf_originalcoverage_related', to='contenttypes.ContentType')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('name', models.CharField(max_length=100)),
('unit', models.CharField(max_length=100)),
('type', models.CharField(max_length=100, choices=[(b'Char', b'Char'), (b'Byte', b'Byte'), (b'Short', b'Short'), (b'Int', b'Int'), (b'Float', b'Float'), (b'Double', b'Double'), (b'Int64', b'Int64'), (b'Unsigned Byte', b'Unsigned Byte'), (b'Unsigned Short', b'Unsigned Short'), (b'Unsigned Int', b'Unsigned Int'), (b'Unsigned Int64', b'Unsigned Int64'), (b'String', b'String'), (b'User Defined Type', b'User Defined Type'), (b'Unknown', b'Unknown')])),
('shape', models.CharField(max_length=100)),
('descriptive_name', models.CharField(max_length=100, null=True, verbose_name=b'long name', blank=True)),
('method', models.TextField(null=True, verbose_name=b'comment', blank=True)),
('missing_value', models.CharField(max_length=100, null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_app_netcdf_variable_related', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='originalcoverage',
unique_together=set([('content_type', 'object_id')]),
),
]
| bsd-3-clause |
maplion/SPEED | testCalculations/time.py | 1 | 6057 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SPEED: Tests calculations for Time; those in the Time subclass in speedcalc.py
GitHub repository: https://github.com/maplion/SPEED
@author: Ryan Dammrose aka MapLion
"""
import unittest
import speedcalc
import test_logging
__author__ = "Ryan Dammrose"
__copyright__ = "Copyright 2015"
__license__ = "MIT"
log = test_logging.TestLogging()
con_Time = speedcalc.Time()
class TestCalculations_time(unittest.TestCase):
def test_secondToMinute(self):
"""
Are seconds correctly converted to minutes?
"""
testName = "test_secondToMinute"
try:
log.print_test_begin(testName)
# ------------------------------------
seconds = 60
minutes = con_Time.second_to_minute(seconds)
self.assertEquals(minutes, 1)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_minuteToSecond(self):
"""
Are minutes correctly converted to seconds?
"""
testName = "test_minuteToSecond"
try:
log.print_test_begin(testName)
# ------------------------------------
minutes = 1
seconds = con_Time.minute_to_second(minutes)
self.assertEquals(seconds, 60)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_minuteToHour(self):
"""
Are minutes correctly converted to hours?
"""
testName = "test_minuteToHour"
try:
log.print_test_begin(testName)
# ------------------------------------
minutes = 60
hours = con_Time.minute_to_hour(minutes)
self.assertEquals(hours, 1)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_hourToMinute(self):
"""
Are hours correctly converted to minutes?
"""
testName = "test_hourToMinute"
try:
log.print_test_begin(testName)
# ------------------------------------
hours = 1
minutes = con_Time.hour_to_minute(hours)
self.assertEquals(minutes, 60)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_secondToHour(self):
"""
Are seconds correctly converted to hours?
"""
testName = "test_secondToHour"
try:
log.print_test_begin(testName)
# ------------------------------------
seconds = 3600
hours = con_Time.second_to_hour(seconds)
self.assertEquals(hours, 1)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_hourToSecond(self):
"""
Are hours correctly converted to seconds?
"""
testName = "test_hourToSecond"
try:
log.print_test_begin(testName)
# ------------------------------------
hours = 1
seconds = con_Time.hour_to_second(hours)
self.assertEquals(seconds, 3600)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_secondToDay(self):
"""
Are hours correctly converted to seconds?
"""
testName = "test_secondToDay"
try:
log.print_test_begin(testName)
# ------------------------------------
seconds = 86400
days = con_Time.second_to_day(seconds)
self.assertEquals(days, 1)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_dayToSecond(self):
"""
Are hours correctly converted to seconds?
"""
testName = "test_dayToSecond"
try:
log.print_test_begin(testName)
# ------------------------------------
days = 1
seconds = con_Time.day_to_second(days)
self.assertEquals(seconds, 86400)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
##########################################################################################
def suite():
"""
Gather all the tests from this module in a test suite.
"""
_suite = unittest.TestSuite()
_suite.addTest(TestCalculations_time('test_secondToMinute'))
_suite.addTest(TestCalculations_time('test_minuteToSecond'))
_suite.addTest(TestCalculations_time('test_minuteToHour'))
_suite.addTest(TestCalculations_time('test_hourToMinute'))
_suite.addTest(TestCalculations_time('test_secondToHour'))
_suite.addTest(TestCalculations_time('test_hourToSecond'))
_suite.addTest(TestCalculations_time('test_secondToDay'))
_suite.addTest(TestCalculations_time('test_dayToSecond'))
return _suite
| mit |
motion2015/edx-platform | common/test/acceptance/pages/studio/asset_index.py | 102 | 2733 | """
The Files and Uploads page for a course in Studio
"""
import urllib
import os
from opaque_keys.edx.locator import CourseLocator
from . import BASE_URL
from .course_page import CoursePage
from bok_choy.javascript import wait_for_js, requirejs
@requirejs('js/views/assets')
class AssetIndexPage(CoursePage):
"""
The Files and Uploads page for a course in Studio
"""
url_path = "assets"
type_filter_element = '#js-asset-type-col'
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['course_org'],
self.course_info['course_num'],
self.course_info['course_run'],
deprecated=(default_store == 'draft')
)
url = "/".join([BASE_URL, self.url_path, urllib.quote_plus(unicode(course_key))])
return url if url[-1] is '/' else url + '/'
@wait_for_js
def is_browser_on_page(self):
return self.q(css='body.view-uploads').present
@wait_for_js
def type_filter_on_page(self):
"""
Checks that type filter is in table header.
"""
return self.q(css=self.type_filter_element).present
@wait_for_js
def type_filter_header_label_visible(self):
"""
Checks type filter label is added and visible in the pagination header.
"""
return self.q(css='span.filter-column').visible
@wait_for_js
def click_type_filter(self):
"""
Clicks type filter menu.
"""
self.q(css=".filterable-column .nav-item").click()
@wait_for_js
def select_type_filter(self, filter_number):
"""
Selects Type filter from dropdown which filters the results.
Returns False if no filter.
"""
self.wait_for_ajax()
if self.q(css=".filterable-column .nav-item").is_present():
if not self.q(css=self.type_filter_element + " .wrapper-nav-sub").visible:
self.q(css=".filterable-column > .nav-item").first.click()
self.wait_for_element_visibility(
self.type_filter_element + " .wrapper-nav-sub", "Type Filter promise satisfied.")
self.q(css=self.type_filter_element + " .column-filter-link").nth(filter_number).click()
self.wait_for_ajax()
return True
return False
def return_results_set(self):
"""
Returns the asset set from the page
"""
return self.q(css="#asset-table-body tr").results
| agpl-3.0 |
rvykydal/blivet | examples/lvm_non_linear.py | 4 | 2168 | import os
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
# dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="mirror", pvs=[pv, pv2])
dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="raid1", pvs=[pv, pv2])
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print(b.devicetree)
# write the new partitions to disk and format them as specified
b.do_it()
print(b.devicetree)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
| lgpl-2.1 |
Kortemme-Lab/kddg | kddg/deprecated/dbstats.py | 1 | 24839 | # This file exists as a placeholder for old code used to generate statistics. Yes, it's bad practice but it should
# save time later. The data will be pulled from the database rather than the raw files and used to create graphs.
# THESE FUNCTIONS SHOULD BE MERGED WITH analysis.py AND monomer_analysis.py TO FORM AN ANALYSIS LAYER OF THE API
def getEmptyMutationMatrix(self):
smallAminoAcids = set(['A', 'C', 'D', 'G', 'N', 'P', 'S', 'T', 'V'])
largeAminoAcids = set(["E", "F", "H", "I", "K", "L", "M", "Q", "R", "W", "Y"])
allAminoAcids = smallAminoAcids.union(largeAminoAcids)
mutationMatrix = {}
for aa1 in allAminoAcids:
mutationMatrix[aa1] = {}
for aa2 in allAminoAcids:
mutationMatrix[aa1][aa2] = 0
return mutationMatrix
def printMutationMatrix(self, mutationMatrix, divisor = 1.0):
smallAminoAcids = set(['A', 'C', 'D', 'G', 'N', 'P', 'S', 'T', 'V'])
largeAminoAcids = set(["E", "F", "H", "I", "K", "L", "M", "Q", "R", "W", "Y"])
allAminoAcids = smallAminoAcids.union(largeAminoAcids)
allAminoAcids = sorted(list(allAminoAcids))
print(",%s" % join(allAminoAcids,","))
for wt in allAminoAcids:
print("%s,%s" % (wt, join(map(str, [float(mutationMatrix[wt][mt])/float(divisor) for mt in allAminoAcids]),",")))
def getMutationTypesFromOtherDataSets(self):
PTids = set()
smallAminoAcids = set(['A', 'C', 'D', 'G', 'N', 'P', 'S', 'T', 'V'])
largeAminoAcids = set(["E", "F", "H", "I", "K", "L", "M", "Q", "R", "W", "Y"])
allAminoAcids = smallAminoAcids.union(largeAminoAcids)
print("Guerois")
mutationMatrix = self.getEmptyMutationMatrix()
mutationsToType = dict.fromkeys(list(allAminoAcids), 0)
mutationsFromType = dict.fromkeys(list(allAminoAcids), 0)
singleMutationsByType = {"s" : {"s" : 0, "l" : 0}, "l" : {"s" : 0, "l" : 0}}
lines = rosettahelper.readFileLines("../rawdata/guerois/guerois-annotated.csv")
assert(lines[0].split("\t")[7] == 'Mutations')
numrecords = 0
for line in lines:
if line[0] != "#" and line.strip():
mutation = line.split("\t")[7].strip()
if mutation.find(",") == -1:
assert(mutation[0] in allAminoAcids)
assert(mutation[-1] in allAminoAcids)
assert(mutation[1:-1].isdigit() or mutation[1:-2].isdigit())
fromsize = None
tosize = None
if mutation[0] in smallAminoAcids:
fromsize = "s"
else:
fromsize = "l"
if mutation[-1] in smallAminoAcids:
tosize = "s"
else:
tosize = "l"
mutationsFromType[mutation[0]] += 1
mutationsToType[mutation[-1]] += 1
mutationMatrix[mutation[0]][mutation[-1]] += 1
singleMutationsByType[fromsize][tosize] += 1
numrecords += 1
#print(singleMutationsByType)
#print("mutationsFromType:")
#for k, v in sorted(mutationsFromType.iteritems()):
# print("%s, %d" % (k, v))
#print(sum(mutationsFromType.values()))
#print("mutationsToType:")
#for k, v in sorted(mutationsToType.iteritems()):
# print("%s, %d" % (k, v))
#print(sum(mutationsToType.values()))
self.printMutationMatrix(mutationMatrix)
print("")
self.printMutationMatrix(mutationMatrix, divisor = float(numrecords)/100.0)
print("Liz")
mutationMatrix = self.getEmptyMutationMatrix()
mutationsToType = dict.fromkeys(list(allAminoAcids), 0)
mutationsFromType = dict.fromkeys(list(allAminoAcids), 0)
singleMutationsByType = {"s" : {"s" : 0, "l" : 0}, "l" : {"s" : 0, "l" : 0}}
lines = rosettahelper.readFileLines("../rawdata/liz_kellogg/ProteinsPaper-annotated.csv")
assert(lines[0].split("\t")[7] == 'WildTypeAA')
assert(lines[0].split("\t")[9] == 'MutantAA')
numrecords = 0
for line in lines:
if line[0] != "#" and line.strip():
wt = line.split("\t")[7].strip()
mt = line.split("\t")[9].strip()
assert(wt in allAminoAcids)
assert(mt in allAminoAcids)
fromsize = None
tosize = None
if wt in smallAminoAcids:
fromsize = "s"
else:
fromsize = "l"
if mt in smallAminoAcids:
tosize = "s"
else:
tosize = "l"
mutationsFromType[wt] += 1
mutationsToType[mt] += 1
mutationMatrix[wt][mt] += 1
singleMutationsByType[fromsize][tosize] += 1
numrecords += 1
#print(singleMutationsByType)
#print("mutationsFromType:")
#for k, v in sorted(mutationsFromType.iteritems()):
# print("%s, %d" % (k, v))
#print(sum(mutationsFromType.values()))
#print("mutationsToType:")
#for k, v in sorted(mutationsToType.iteritems()):
# print("%s, %d" % (k, v))
#print(sum(mutationsToType.values()))
self.printMutationMatrix(mutationMatrix)
print("")
self.printMutationMatrix(mutationMatrix, divisor = float(numrecords)/100.0)
print("Potapov")
mutationMatrix = self.getEmptyMutationMatrix()
mutationsToType = dict.fromkeys(list(allAminoAcids), 0)
mutationsFromType = dict.fromkeys(list(allAminoAcids), 0)
singleMutationsByType = {"s" : {"s" : 0, "l" : 0}, "l" : {"s" : 0, "l" : 0}}
lines = rosettahelper.readFileLines("../rawdata/potapov/mutants-annotated.csv")
assert(lines[0].split("\t")[6] == 'WildTypeAA')
assert(lines[0].split("\t")[8] == 'MutantAA')
numrecords = 0
for line in lines:
if line[0] != "#" and line.strip():
wt = line.split("\t")[6].strip()
mt = line.split("\t")[8].strip()
if mt != "LA":
assert(wt in allAminoAcids)
assert(mt in allAminoAcids)
fromsize = None
tosize = None
if wt in smallAminoAcids:
fromsize = "s"
else:
fromsize = "l"
if mt in smallAminoAcids:
tosize = "s"
else:
tosize = "l"
mutationsFromType[wt] += 1
mutationsToType[mt] += 1
mutationMatrix[wt][mt] += 1
singleMutationsByType[fromsize][tosize] += 1
numrecords += 1
#print(singleMutationsByType)
#print("mutationsFromType:")
#for k, v in sorted(mutationsFromType.iteritems()):
# print("%s, %d" % (k, v))
#print(sum(mutationsFromType.values()))
#print("mutationsToType:")
#for k, v in sorted(mutationsToType.iteritems()):
# print("%s, %d" % (k, v))
#print(sum(mutationsToType.values()))
self.printMutationMatrix(mutationMatrix)
print("")
self.printMutationMatrix(mutationMatrix, divisor = float(numrecords)/100.0)
def getDataForRosettaCon(self):
# Get data to determine whether or not to store parsed data
ddGdb = self.ddGdb
FieldNames = ddGdb.FieldNames
publicationSources = {}
for r in ddGdb.execute('SELECT * FROM %s' % FieldNames.Source._name):
publicationID = r[FieldNames.Source.ID]
publicationSources[publicationID] = r
publicationSources[publicationID]["DDGValueLocations"] = [location["Location"] for location in ddGdb.execute('SELECT * FROM SourceDDGValueLocation WHERE SourceID=%s', parameters=(publicationID,))]
ExistingDBIDs = {}
for r in ddGdb.execute('SELECT RecordNumber, DataSetDDGSource.ExperimentAssayID FROM DataSetDDG INNER JOIN DataSetDDGSource ON DataSetDDGID=DataSetDDG.ID INNER JOIN ExperimentAssayDDG ON DataSetDDGSource.ExperimentAssayID=ExperimentAssayDDG.ExperimentAssayID WHERE DataSetID="ProTherm_v25616_2011/12/21"', cursorClass = dbi.StdCursor):
ExistingDBIDs[int(r[0])] = int(r[1])
PublicationsToCheck = [r["SourceID"] for r in ddGdb.execute('SELECT SourceID FROM ProThermUnits WHERE DDGConvention IS NULL')]
PublicationsToCheckProThermRecords = dict.fromkeys(PublicationsToCheck)
for p in PublicationsToCheckProThermRecords.keys():
PublicationsToCheckProThermRecords[p] = []
PublicationsToCheck = set(PublicationsToCheck)
setOfSuccessfullyParsedIDs = set()
AllPDBIDs = {}
ID = None
mutation = {}
experiments = {}
count = 0
recordcount = 0
chains = {}
mutationsByType = {}
smallAminoAcids = set(['A', 'C', 'D', 'G', 'N', 'P', 'S', 'T', 'V'])
largeAminoAcids = set(["E", "F", "H", "I", "K", "L", "M", "Q", "R", "W", "Y"])
allAminoAcids = smallAminoAcids.union(largeAminoAcids)
singleMutationsByType = {"s" : {"s" : 0, "l" : 0}, "l" : {"s" : 0, "l" : 0}}
mutationsToType = dict.fromkeys(list(allAminoAcids), 0)
mutationsFromType = dict.fromkeys(list(allAminoAcids), 0)
# Variables for the experimental conditions and publications data
PMIDlist = {}
ptReader = ProThermReader(os.path.join("..", "rawdata", "ProTherm", "ProTherm25616.dat"))
#ptReader.ExistingScores = ExistingScores
if not ptReader.test():
return
ptReader.ExistingDBIDs = ExistingDBIDs
check_just_these_cases = []
check_these_cases = check_just_these_cases or ptReader.list_of_available_keys
colortext.message("Parsing ProTherm")
requiredProThermIDs = self.readProThermIDsFromOtherDataSets()
newPublicationsToCheck = set()
mutationMatrix = self.getEmptyMutationMatrix()
nummatrixrecords = 0
colortext.printf("|" + ("*" * (int(len(ptReader.list_of_available_keys)/1000) - 2)) + "|")
for ID in check_these_cases:
#Progress meter
if ID % 1000 == 0:
colortext.write(".", "green")
colortext.flush()
# Skip bad cases in ProTherm
if ID in ptReader.skipTheseCases:
continue
record = ptReader.readRecord(ID)
store = True
if record["ddG"] == None and record["ddG_H2O"] == None:
# "No ddG value for record %d. Skipping." % ID
continue
# *** Experiment records ***
# Get PDB ID
if record["PDB_wild"]:
pdbID = record["PDB_wild"].upper()
# Parse chain
chainID = None
if record["MUTATED_CHAIN"] and len(record["MUTATED_CHAIN"]) == 1:
chainID = record["MUTATED_CHAIN"]
assert(len(chainID) == 1)
chains[chainID] = True
else:
colortext.error("Error processing chain: ID %d, %s" % (ID, record["MUTATED_CHAIN"]))
store = False
# Parse mutant
mutantlist = []
if record["PDB_mutant"]:
mutantlist = record["PDB_mutant"].split(",")
for mutantID in mutantlist:
mutantID = mutantID.strip()
if not len(mutantID) == 4:
raise Exception('Error parsing mutant list "%s" in record %d: ' % (mutantlist, ID))
AllPDBIDs[mutantID] = True
# Parse mutations
mutations = None
try:
mutations = ptReader.getMutations(ID, record)
if not mutations:
store = False
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
colortext.warning("An exception occurred parsing the mutation '%s' in record %d." % (record["MUTATION"], ID))
continue
raise Exception("An exception occurred parsing the mutation '%s' in record %d." % (record["MUTATION"], ID))
# We have enough information to create the Experiment records
if mutations and store:
nummut = len(mutations)
mutationsByType[nummut] = mutationsByType.get(nummut, {"ddG" : 0, "ddG_H2O" : 0})
fromsize = None
tosize = None
if nummut == 1:
if mutations[0].WildTypeAA in smallAminoAcids:
fromsize = "s"
else:
fromsize = "l"
if mutations[0].MutantAA in smallAminoAcids:
tosize = "s"
else:
tosize = "l"
if record["ddG"] != None:
mutationsByType[nummut]["ddG"] += 1
if nummut == 1:
singleMutationsByType[fromsize][tosize] += 1
mutationsFromType[mutations[0].WildTypeAA] += 1
mutationsToType[mutations[0].MutantAA] += 1
mutationMatrix[mutations[0].WildTypeAA][mutations[0].MutantAA] += 1
nummatrixrecords += 1
recordcount += 1
if record["ddG_H2O"] != None:
if nummut == 1:
singleMutationsByType[fromsize][tosize] += 1
mutationsFromType[mutations[0].WildTypeAA] += 1
mutationsToType[mutations[0].MutantAA] += 1
mutationMatrix[mutations[0].WildTypeAA][mutations[0].MutantAA] += 1
nummatrixrecords += 1
mutationsByType[nummut]["ddG_H2O"] += 1
recordcount += 1
# *** ExperimentAssay records ***
# Parse references
record["dbReferencePK"] = None
try:
referenceID = ptReader.getReference(ID, record)
record["dbReferencePK"] = "PMID:%s" % referenceID
if not referenceID:
store = False
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
raise Exception("An exception occurred parsing the reference '%s' in record %d." % (record["REFERENCE"], ID))
# Parse ddG and ddG_H2O
ddG = None
ddG_H2O = None
dbExperimentDDGAssay = None
dbExperimentDDGH2OAssay = None
assert(record.get("dbReferencePK"))
if record["ddG"]:
try:
ddG = ptReader.getDDGInKcal(ID, record, useRosettaConvention = True)
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
#ptReader.printRecord(ID)
#raise Exception("An exception occurred parsing the ddG '%s' in record %d." % (record["ddG"], ID))
if record["ddG_H2O"]:
try:
ddG_H2O = ptReader.getDDGH2OInKcal(ID, record, useRosettaConvention = True)
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
#ptReader.printRecord(ID)
#raise Exception("An exception occurred parsing the ddG_H2O '%s' in record %d." % (record["ddG_H2O"], ID))
#if ExistingScores.get(ID) and (abs(ddG - ExistingScores[ID]) > 0.000001):
# colortext.error("ProTherm record %s exists as ExperimentScore entry %s but the values disagree (%s and %s respectively)." % (ID, ExistingDBIDs[ID], ddG, ExistingScores[ID]))
if ddG == None and ddG_H2O == None:
continue
for k, v in sorted(mutationsByType.iteritems()):
print("#mutations: %d" % k)
print("#ddG: %d" % v["ddG"])
print("#ddG_H2O: %d" % v["ddG_H2O"])
print("")
print("ProTherm:")
print(recordcount)
print(singleMutationsByType)
print("mutationsFromType:")
for k, v in sorted(mutationsFromType.iteritems()):
print("%s, %d" % (k, v))
print(sum(mutationsFromType.values()))
print("mutationsToType:")
for k, v in sorted(mutationsToType.iteritems()):
print("%s, %d" % (k, v))
print(sum(mutationsToType.values()))
print("printMutationMatrix")
self.printMutationMatrix(mutationMatrix)
self.printMutationMatrix(mutationMatrix, divisor = float(nummatrixrecords)/100.0)
class ExperimentSet(DBObject):
def __init__(self, ddGdb, pdbid, source, interface = None):
#todo: delete
raise Exception("Out of date.")
self.ddGdb = ddGdb
FieldNames_ = ddGdb.FlatFieldNames
self.dict = {
FieldNames_.Structure : pdbid,
FieldNames_.Source : source,
FieldNames_.ScoreVariance: None,
"Interface" : interface,
"Mutants" : {},
"Mutations" : [],
"ExperimentChains" : [],
"ExperimentScores" : [],
"StdDeviation" : None,
"WithinStdDeviation" : None
}
def addMutant(self, mutant):
self.dict["Mutants"][mutant] = True
def addMutation(self, chainID, residueID, wildtypeAA, mutantAA, ID = None, SecondaryStructureLocation=None):
errors = []
residueID = ("%s" % residueID).strip()
if not chainID in AllowedChainLetters:
errors.append("The chain '%s' is invalid." % chainID)
if not wildtypeAA in AllowedAminoAcids:
errors.append("The wildtype amino acid '%s' is invalid." % wildtypeAA)
if not mutantAA in AllowedAminoAcids:
errors.append("The mutant amino acid '%s' is invalid." % mutantAA)
if not residueID.isdigit():
if not residueID[0:-1].isdigit():
errors.append("The residue '%s' is invalid." % residueID)
elif residueID[-1] not in AllowedInsertionCodes:
errors.append("The insertion code '%s' of residue '%s' is invalid." % (residue[-1], residueID))
if errors:
ID = ID or ""
if ID:
ID = ", ID %s" % ID
errors = join(['\t%s\n' % e for e in errors], "")
raise Exception("An exception occurred processing a mutation in the dataset %s%s.\n%s" % (self.dict[FieldNames_.Source], ID, errors))
self.dict["Mutations"].append({
FieldNames_.Chain : chainID,
FieldNames_.ResidueID : residueID,
FieldNames_.WildTypeAA : wildtypeAA,
FieldNames_.MutantAA : mutantAA
})
def addChain(self, chainID, ID = ""):
if not chainID in AllowedChainLetters:
raise Exception("An exception occurred processing a chain in the dataset %s%s.\n\tThe chain '%s' is invalid." % (self.dict[FieldNames_.Source], ID, errors, chainID))
self.dict["ExperimentChains"].append(chainID)
def getChains(self):
return self.dict["ExperimentChains"]
def setMutantIfUnset(self, mutant):
if not self.dict[FieldNames_.Mutant]:
self.dict[FieldNames_.Mutant] = mutant
def addExperimentalScore(self, sourceID, ddG, pdbID, numMeasurements = 1):
if pdbID != self.dict[FieldNames_.Structure]:
raise colortext.Exception("Adding experimental score related to PDB structure %s to an experiment whose PDB structure should be %s." % (pdbID, self.dict[FieldNames_.Structure]))
self.dict["ExperimentScores"].append({
FieldNames_.SourceID : sourceID,
FieldNames_.ddG : ddG,
FieldNames_.NumberOfMeasurements : numMeasurements
})
def mergeScores(self, maxStdDeviation = 1.0):
d = self.dict
n = len(d["ExperimentScores"])
if n > 1:
n = float(n)
sum = 0
for experimentalResult in d["ExperimentScores"]:
if experimentalResult[FieldNames_.NumberOfMeasurements] != 1:
raise Exception("Cannot merge scores when individual scores are from more than one measurement. Need to add logic to do proper weighting.")
sum += experimentalResult[FieldNames_.ddG]
mean = sum / n
squaredsum = 0
for experimentalResult in d["ExperimentScores"]:
diff = (experimentalResult[FieldNames_.ddG] - mean)
squaredsum += diff * diff
variance = squaredsum / n
d[FieldNames_.ScoreVariance] = variance
stddev = sqrt(variance)
d["StdDeviation"] = stddev
d["WithinStdDeviation"] = stddev <= maxStdDeviation
else:
d[FieldNames_.ScoreVariance] = 0
d["WithinStdDeviation"] = True
def isEligible(self):
d = self.dict
if d["WithinStdDeviation"] == None:
raise Exception("Standard deviation not yet computed.")
else:
return d["WithinStdDeviation"]
def __repr__(self):
raise Exception('''This is unlikely to work as I have not tested it in a while. In particular, ddG is not a string anymore.''')
d = self.dict
str = []
str.append("%s: %s" % (FieldNames_.Structure, d[FieldNames_.Structure]))
str.append("%ss: %s" % (FieldNames_.Mutant, join(d["Mutants"].keys(), ', ')))
str.append("%s: %s" % (FieldNames_.Source, d[FieldNames_.Source]))
str.append("Chains: %s" % (join([chain for chain in d["ExperimentChains"]], ", ")))
str.append("Mutations:")
for mutation in d["Mutations"]:
str.append("\t%s%s: %s -> %s" % (mutation[FieldNames_.Chain], mutation[FieldNames_.ResidueID], mutation[FieldNames_.WildTypeAA], mutation[FieldNames_.MutantAA]))
str.append("Experimental Scores:")
for score in d["ExperimentScores"]:
n = score[FieldNames_.NumberOfMeasurements]
if n > 1:
str.append("\t%s\t%0.2f (%d measurements)" % (score[FieldNames_.SourceID], score[FieldNames_.ddG], score[FieldNames_.NumberOfMeasurements]))
else:
str.append("\t%s\t%0.2f" % (score[FieldNames_.SourceID], score[FieldNames_.ddG]))
return join(str, "\n")
def commit(self, testonly = False, pdbPath = None, mutationAllowedToBeStoredDespiteMissingCoordinates = False):
'''Commits the set of experiments associated with the mutation to the database. Returns the unique ID of the associated Experiment record.'''
d = self.dict
failed = False
for score in d["ExperimentScores"]:
scoresPresent = True
results = self.ddGdb.locked_execute("SELECT Source, SourceID FROM Experiment INNER JOIN ExperimentScore ON Experiment.ID=ExperimentID WHERE Source=%s AND SourceID=%s", parameters = (d[FieldNames_.Source], score[FieldNames_.SourceID]))
if results:
return
if len(d["ExperimentScores"]) == 0:
raise Exception("This experiment has no associated scores.")
if not d[FieldNames_.ScoreVariance]:
self.mergeScores()
if d["Mutants"]:
for mutant in d["Mutants"].keys():
MutantStructure = PDBStructure(self.ddGdb, mutant)
results = self.ddGdb.execute("SELECT PDB_ID FROM Structure WHERE PDB_ID = %s", parameters = (MutantStructure.dict[self.ddGdb.FlatFieldNames.PDB_ID]))
if not results:
MutantStructure.commit()
# Sanity check that the chain information is correct (ProTherm has issues)
pdbID = d[FieldNames_.Structure]
associatedRecords = sorted([score[FieldNames_.SourceID] for score in d["ExperimentScores"]])
associatedRecordsStr = "%s (records: %s)" % (d[FieldNames_.Source], join(map(str, sorted([score[FieldNames_.SourceID] for score in d["ExperimentScores"]])),", "))
chainsInPDB = PDBChains.get(d[FieldNames_.Structure])
if not chainsInPDB:
raise Exception("The chains for %s were not read in properly." % associatedRecordsStr)
for c in self.dict["ExperimentChains"]:
if not c in chainsInPDB:
if len(chainsInPDB) == 1 and len(self.dict["ExperimentChains"]) == 1:
colortext.warning("%s: Chain '%s' of %s does not exist in the PDB %s. Chain %s exists. Use that chain instead." % (pdbID, c, associatedRecordsStr, pdbID, chainsInPDB[0]))
self.ddGdb.addChainWarning(pdbID, associatedRecords, c)
failed = True
else:
self.ddGdb.addChainError(pdbID, c)
raise colortext.Exception("Error committing experiment:\n%s: Chain '%s' of %s does not exist in the PDB %s. Chains %s exist." % (pdbID, c, associatedRecordsStr, pdbID, join(chainsInPDB, ", ")))
# Sanity check that the wildtypes of all mutations are correct
if pdbPath:
WildTypeStructure = PDBStructure(self.ddGdb, pdbID, filepath = os.path.join(pdbPath, "%s.pdb" % pdbID))
else:
WildTypeStructure = PDBStructure(self.ddGdb, pdbID)
contents = WildTypeStructure.getPDBContents()
pdb = PDB(contents.split("\n"))
badResidues = ["CSE", "MSE"]
foundRes = pdb.CheckForPresenceOf(badResidues)
if foundRes:
colortext.warning("The PDB %s contains residues which could affect computation (%s)." % (pdbID, join(foundRes, ", ")))
failed = True
for res in foundRes:
colortext.warning("The PDB %s contains %s. Check." % (pdbID, res))
for mutation in d["Mutations"]:
foundMatch = False
for resid, wtaa in sorted(pdb.ProperResidueIDToAAMap().iteritems()):
c = resid[0]
resnum = resid[1:].strip()
if mutation[FieldNames_.Chain] == c and mutation[FieldNames_.ResidueID] == resnum and mutation[FieldNames_.WildTypeAA] == wtaa:
foundMatch = True
if not foundMatch and not(mutationAllowedToBeStoredDespiteMissingCoordinates):
#raise colortext.Exception("%s: Could not find a match for mutation %s %s:%s -> %s in %s." % (pdbID, mutation[FieldNames_.Chain], mutation[FieldNames_.ResidueID], mutation[FieldNames_.WildTypeAA], mutation[FieldNames_.MutantAA], associatedRecordsStr ))
colortext.error("%s: Could not find a match for mutation %s %s:%s -> %s in %s." % (pdbID, mutation[FieldNames_.Chain], mutation[FieldNames_.ResidueID], mutation[FieldNames_.WildTypeAA], mutation[FieldNames_.MutantAA], associatedRecordsStr ))
failed = True
#raise Exception(colortext.make_error("%s: Could not find a match for mutation %s %s:%s -> %s in %s." % (pdbID, mutation[FieldNames_.Chain], mutation[FieldNames_.ResidueID], mutation[FieldNames_.WildTypeAA], mutation[FieldNames_.MutantAA], associatedRecordsStr )))
# To disable adding new experiments: return here
if failed:
return False
SQL = 'INSERT INTO Experiment (Structure, Source) VALUES (%s, %s);'
vals = (d[FieldNames_.Structure], d[FieldNames_.Source])
#print(SQL % vals)
if not testonly:
self.ddGdb.locked_execute(SQL, parameters = vals)
self.databaseID = self.ddGdb.getLastRowID()
ExperimentID = self.databaseID
#print(ExperimentID)
else:
ExperimentID = None
for chain in d["ExperimentChains"]:
SQL = 'INSERT INTO ExperimentChain (ExperimentID, Chain) VALUES (%s, %s);'
vals = (ExperimentID, chain)
#print(SQL % vals)
if not testonly:
self.ddGdb.locked_execute(SQL, parameters = vals)
interface = d["Interface"]
if interface:
SQL = 'INSERT INTO ExperimentInterface (ExperimentID, Interface) VALUES (%s, %s);'
vals = (ExperimentID, interface)
#print(SQL % vals)
if not testonly:
self.ddGdb.locked_execute(SQL, parameters = vals)
for mutant in d["Mutants"].keys():
SQL = 'INSERT INTO ExperimentMutant (ExperimentID, Mutant) VALUES (%s, %s);'
vals = (ExperimentID, mutant)
#print(SQL % vals)
if not testonly:
self.ddGdb.locked_execute(SQL, parameters = vals)
for mutation in d["Mutations"]:
SQL = 'INSERT INTO ExperimentMutation (ExperimentID, Chain, ResidueID, WildTypeAA, MutantAA) VALUES (%s, %s, %s, %s, %s);'
vals = (ExperimentID, mutation[FieldNames_.Chain], mutation[FieldNames_.ResidueID], mutation[FieldNames_.WildTypeAA], mutation[FieldNames_.MutantAA])
#print(SQL % vals)
if not testonly:
self.ddGdb.locked_execute(SQL, parameters = vals)
for score in d["ExperimentScores"]:
SQL = 'INSERT INTO ExperimentScore (ExperimentID, SourceID, ddG, NumberOfMeasurements) VALUES (%s, %s, %s, %s);'
vals = (ExperimentID, score[FieldNames_.SourceID], score[FieldNames_.ddG], score[FieldNames_.NumberOfMeasurements])
#print(SQL % vals)
if not testonly:
self.ddGdb.locked_execute(SQL, parameters = vals)
if not testonly:
return self.databaseID
else:
return None
| mit |
zzxuanyuan/root-compressor-dummy | interpreter/llvm/src/tools/clang/tools/scan-build-py/tests/unit/test_report.py | 22 | 5508 | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import libscanbuild.report as sut
import unittest
import os
import os.path
def run_bug_parse(content):
with libear.TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, 'test.html')
with open(file_name, 'w') as handle:
handle.writelines(content)
for bug in sut.parse_bug_html(file_name):
return bug
def run_crash_parse(content, preproc):
with libear.TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, preproc + '.info.txt')
with open(file_name, 'w') as handle:
handle.writelines(content)
return sut.parse_crash(file_name)
class ParseFileTest(unittest.TestCase):
def test_parse_bug(self):
content = [
"some header\n",
"<!-- BUGDESC Division by zero -->\n",
"<!-- BUGTYPE Division by zero -->\n",
"<!-- BUGCATEGORY Logic error -->\n",
"<!-- BUGFILE xx -->\n",
"<!-- BUGLINE 5 -->\n",
"<!-- BUGCOLUMN 22 -->\n",
"<!-- BUGPATHLENGTH 4 -->\n",
"<!-- BUGMETAEND -->\n",
"<!-- REPORTHEADER -->\n",
"some tails\n"]
result = run_bug_parse(content)
self.assertEqual(result['bug_category'], 'Logic error')
self.assertEqual(result['bug_path_length'], 4)
self.assertEqual(result['bug_line'], 5)
self.assertEqual(result['bug_description'], 'Division by zero')
self.assertEqual(result['bug_type'], 'Division by zero')
self.assertEqual(result['bug_file'], 'xx')
def test_parse_bug_empty(self):
content = []
result = run_bug_parse(content)
self.assertEqual(result['bug_category'], 'Other')
self.assertEqual(result['bug_path_length'], 1)
self.assertEqual(result['bug_line'], 0)
def test_parse_crash(self):
content = [
"/some/path/file.c\n",
"Some very serious Error\n",
"bla\n",
"bla-bla\n"]
result = run_crash_parse(content, 'file.i')
self.assertEqual(result['source'], content[0].rstrip())
self.assertEqual(result['problem'], content[1].rstrip())
self.assertEqual(os.path.basename(result['file']),
'file.i')
self.assertEqual(os.path.basename(result['info']),
'file.i.info.txt')
self.assertEqual(os.path.basename(result['stderr']),
'file.i.stderr.txt')
def test_parse_real_crash(self):
import libscanbuild.runner as sut2
import re
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('int main() { return 0')
# produce failure report
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'file': filename,
'output_dir': tmpdir,
'language': 'c',
'error_type': 'other_error',
'error_output': 'some output',
'exit_code': 13
}
sut2.report_failure(opts)
# find the info file
pp_file = None
for root, _, files in os.walk(tmpdir):
keys = [os.path.join(root, name) for name in files]
for key in keys:
if re.match(r'^(.*/)+clang(.*)\.i$', key):
pp_file = key
self.assertIsNot(pp_file, None)
# read the failure report back
result = sut.parse_crash(pp_file + '.info.txt')
self.assertEqual(result['source'], filename)
self.assertEqual(result['problem'], 'Other Error')
self.assertEqual(result['file'], pp_file)
self.assertEqual(result['info'], pp_file + '.info.txt')
self.assertEqual(result['stderr'], pp_file + '.stderr.txt')
class ReportMethodTest(unittest.TestCase):
def test_chop(self):
self.assertEqual('file', sut.chop('/prefix', '/prefix/file'))
self.assertEqual('file', sut.chop('/prefix/', '/prefix/file'))
self.assertEqual('lib/file', sut.chop('/prefix/', '/prefix/lib/file'))
self.assertEqual('/prefix/file', sut.chop('', '/prefix/file'))
def test_chop_when_cwd(self):
self.assertEqual('../src/file', sut.chop('/cwd', '/src/file'))
self.assertEqual('../src/file', sut.chop('/prefix/cwd',
'/prefix/src/file'))
class GetPrefixFromCompilationDatabaseTest(unittest.TestCase):
def test_with_different_filenames(self):
self.assertEqual(
sut.commonprefix(['/tmp/a.c', '/tmp/b.c']), '/tmp')
def test_with_different_dirnames(self):
self.assertEqual(
sut.commonprefix(['/tmp/abs/a.c', '/tmp/ack/b.c']), '/tmp')
def test_no_common_prefix(self):
self.assertEqual(
sut.commonprefix(['/tmp/abs/a.c', '/usr/ack/b.c']), '/')
def test_with_single_file(self):
self.assertEqual(
sut.commonprefix(['/tmp/a.c']), '/tmp')
def test_empty(self):
self.assertEqual(
sut.commonprefix([]), '')
| lgpl-2.1 |
aspose-barcode/Aspose.BarCode-for-Java | Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWithBarcode/UtilityFeatures/CodeText.py | 2 | 2123 | from asposebarcode import Settings
from com.aspose.barcode import BarCodeBuilder
from com.aspose.barcode import Symbology
from com.aspose.barcode import CodeLocation
from com.aspose.barcode import BarCodeImageFormat
from java.awt import Color
class CodeText:
def __init__(self):
# Set appearance of the code text
self.set_appearance()
# Set code text for Barcode
self.set_codetext()
def set_appearance(self):
dataDir = Settings.dataDir + 'WorkingWithBarcode/UtilityFeatures/CodeText/'
bb = BarCodeBuilder()
# Set up code text (data to be encoded)
bb.setCodeText("1234567")
# Set up code text color
color = Color
bb.setCodeTextColor(color.RED)
# Set the location of the code text to above the barcode
codeLocation= CodeLocation
bb.setCodeLocation(codeLocation.Above)
#Increase the space between code text and barcode to 1 point
bb.setCodeTextSpace(1.0)
# Set the symbology type to Code128
symbology= Symbology
bb.setSymbologyType(symbology.Code128)
# Save the image to your system and set its image format to Jpeg
barCodeImageFormat= BarCodeImageFormat
bb.save(dataDir + "barcode.jpg", barCodeImageFormat.Jpeg)
# Display Status
print "Barcode with custom appearance saved as JPEG image successfully."
def set_codetext(self):
dataDir = Settings.dataDir + 'WorkingWithBarcode/UtilityFeatures/CodeText/'
# Instantiate barcode object
bb = BarCodeBuilder()
# Set the code text for the barcode
bb.setCodeText("Aspose-123")
# Set the symbology type to Code128
symbology= Symbology
bb.setSymbologyType(symbology.Code128)
# Set the width of the bars to 0.5 millimeter
bb.setxDimension(0.5)
# save the barcode image to file
bb.save(dataDir + "codetext.out.jpg")
# Print message
print "Barcode image generated successfully."
if __name__ == '__main__':
CodeText() | mit |
mozilla/kitsune | kitsune/wiki/widgets.py | 1 | 2109 |
from django import forms
from django.template.loader import render_to_string
from kitsune.products.models import Topic
from kitsune.wiki.models import Document
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
class ProductTopicsAndSubtopicsWidget(forms.widgets.SelectMultiple):
"""A widget to render topics organized by product and with subtopics."""
def render(self, name, value, attrs=None, renderer=None):
topics_and_subtopics = Topic.objects.all()
topics = [t for t in topics_and_subtopics if t.parent_id is None]
for topic in topics:
self.process_topic(value, topic)
topic.my_subtopics = [t for t in topics_and_subtopics if t.parent_id == topic.id]
for subtopic in topic.my_subtopics:
self.process_topic(value, subtopic)
return render_to_string(
"wiki/includes/product_topics_widget.html",
{
"topics": topics,
"name": name,
},
)
def process_topic(self, value, topic):
if isinstance(value, int) and topic.id == value:
topic.checked = True
elif (
not isinstance(value, str)
and isinstance(value, Iterable)
and topic.id in value
):
topic.checked = True
else:
topic.checked = False
class RelatedDocumentsWidget(forms.widgets.SelectMultiple):
"""A widget to render the related documents list and search field."""
def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, int):
related_documents = Document.objects.filter(id__in=[value])
elif not isinstance(value, str) and isinstance(value, Iterable):
related_documents = Document.objects.filter(id__in=value)
else:
related_documents = Document.objects.none()
return render_to_string(
"wiki/includes/related_docs_widget.html",
{"related_documents": related_documents, "name": name},
)
| bsd-3-clause |
prodromou87/gem5 | src/mem/ruby/system/Sequencer.py | 11 | 3096 | # Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from MemObject import MemObject
class RubyPort(MemObject):
type = 'RubyPort'
abstract = True
cxx_header = "mem/ruby/system/RubyPort.hh"
slave = VectorSlavePort("CPU slave port")
master = VectorMasterPort("CPU master port")
version = Param.Int(0, "")
pio_port = MasterPort("Ruby_pio_port")
using_ruby_tester = Param.Bool(False, "")
using_network_tester = Param.Bool(False, "")
access_phys_mem = Param.Bool(False,
"should the rubyport atomically update phys_mem")
ruby_system = Param.RubySystem("")
system = Param.System(Parent.any, "system object")
support_data_reqs = Param.Bool(True, "data cache requests supported")
support_inst_reqs = Param.Bool(True, "inst cache requests supported")
class RubyPortProxy(RubyPort):
type = 'RubyPortProxy'
cxx_header = "mem/ruby/system/RubyPortProxy.hh"
access_phys_mem = True
class RubySequencer(RubyPort):
type = 'RubySequencer'
cxx_class = 'Sequencer'
cxx_header = "mem/ruby/system/Sequencer.hh"
icache = Param.RubyCache("")
dcache = Param.RubyCache("")
max_outstanding_requests = Param.Int(16,
"max requests (incl. prefetches) outstanding")
deadlock_threshold = Param.Cycles(500000,
"max outstanding cycles for a request before deadlock/livelock declared")
class DMASequencer(RubyPort):
type = 'DMASequencer'
cxx_header = "mem/ruby/system/DMASequencer.hh"
access_phys_mem = True
| bsd-3-clause |
ArchiveTeam/terroroftinytown | terroroftinytown/services/registry.py | 1 | 2430 | from __future__ import unicode_literals
from terroroftinytown.services.adjix import AdjixService
from terroroftinytown.services.awesm import AwesmService
from terroroftinytown.services.base import DefaultService
from terroroftinytown.services.bitly import BitlyService, Bitly6Service
from terroroftinytown.services.feedly import FeedlyService, Feedly8Service
from terroroftinytown.services.isgd import IsgdService, Isgd6Service
from terroroftinytown.services.myspac import MyspAcService
from terroroftinytown.services.owly import OwlyService
from terroroftinytown.services.sharedby import SharedBy6Service, SharedByService
from terroroftinytown.services.shares import SharesService
from terroroftinytown.services.snipurl import SnipurlService
from terroroftinytown.services.tighturl import TighturlService
from terroroftinytown.services.tinyurl import TinyurlService, Tinyurl7Service
from terroroftinytown.services.vgd import VgdService, Vgd6Service
from terroroftinytown.services.vitrue import VitrueService
from terroroftinytown.services.xco import XCOService
from terroroftinytown.services.yatuc import YatucService
from terroroftinytown.services.alturl import AlturlService
from terroroftinytown.services.tinyurlhu import TinyurlHu4Service,\
TinyurlHuService
from terroroftinytown.services.googl import GooglService
registry = {}
'''Mapping of unicode strings to BaseService classes.'''
registry['_default'] = DefaultService
registry['isgd'] = IsgdService
registry['isgd_6'] = Isgd6Service
registry['vgd'] = VgdService
registry['vgd_6'] = Vgd6Service
registry['bitly'] = BitlyService
registry['bitly_6'] = Bitly6Service
registry['xco'] = XCOService
registry['pub-vitrue-com'] = VitrueService
registry['tighturl-com'] = TighturlService
registry['tinyurl'] = TinyurlService
registry['tinyurl_7'] = Tinyurl7Service
registry['adjix'] = AdjixService
registry['yatuc'] = YatucService
registry['shar-es'] = SharesService
registry['feedly'] = FeedlyService
registry['feedly_8'] = Feedly8Service
registry['awe-sm'] = AwesmService
registry['ow-ly'] = OwlyService
registry['snipurl'] = SnipurlService
registry['snipurl_range2'] = SnipurlService
registry['sharedby-co'] = SharedByService
registry['sharedby-co_6'] = SharedBy6Service
registry['mysp-ac'] = MyspAcService
registry['alturl-com'] = AlturlService
registry['tinyurl-hu'] = TinyurlHuService
registry['tinyurl-hu_4'] = TinyurlHu4Service
registry['goo-gl'] = GooglService
| mit |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/encodings/cp1256.py | 272 | 12814 | """ Python Character Mapping Codec cp1256 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1256.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1256',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\u067e' # 0x81 -> ARABIC LETTER PEH
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0679' # 0x8A -> ARABIC LETTER TTEH
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\u0686' # 0x8D -> ARABIC LETTER TCHEH
'\u0698' # 0x8E -> ARABIC LETTER JEH
'\u0688' # 0x8F -> ARABIC LETTER DDAL
'\u06af' # 0x90 -> ARABIC LETTER GAF
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u06a9' # 0x98 -> ARABIC LETTER KEHEH
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0691' # 0x9A -> ARABIC LETTER RREH
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\u200c' # 0x9D -> ZERO WIDTH NON-JOINER
'\u200d' # 0x9E -> ZERO WIDTH JOINER
'\u06ba' # 0x9F -> ARABIC LETTER NOON GHUNNA
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u060c' # 0xA1 -> ARABIC COMMA
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u06be' # 0xAA -> ARABIC LETTER HEH DOACHASHMEE
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u061b' # 0xBA -> ARABIC SEMICOLON
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\u06c1' # 0xC0 -> ARABIC LETTER HEH GOAL
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0637' # 0xD8 -> ARABIC LETTER TAH
'\u0638' # 0xD9 -> ARABIC LETTER ZAH
'\u0639' # 0xDA -> ARABIC LETTER AIN
'\u063a' # 0xDB -> ARABIC LETTER GHAIN
'\u0640' # 0xDC -> ARABIC TATWEEL
'\u0641' # 0xDD -> ARABIC LETTER FEH
'\u0642' # 0xDE -> ARABIC LETTER QAF
'\u0643' # 0xDF -> ARABIC LETTER KAF
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\u0644' # 0xE1 -> ARABIC LETTER LAM
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0645' # 0xE3 -> ARABIC LETTER MEEM
'\u0646' # 0xE4 -> ARABIC LETTER NOON
'\u0647' # 0xE5 -> ARABIC LETTER HEH
'\u0648' # 0xE6 -> ARABIC LETTER WAW
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0649' # 0xEC -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xED -> ARABIC LETTER YEH
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\u064b' # 0xF0 -> ARABIC FATHATAN
'\u064c' # 0xF1 -> ARABIC DAMMATAN
'\u064d' # 0xF2 -> ARABIC KASRATAN
'\u064e' # 0xF3 -> ARABIC FATHA
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u064f' # 0xF5 -> ARABIC DAMMA
'\u0650' # 0xF6 -> ARABIC KASRA
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0651' # 0xF8 -> ARABIC SHADDA
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\u0652' # 0xFA -> ARABIC SUKUN
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/kombu/async/timer.py | 27 | 6546 | # -*- coding: utf-8 -*-
"""
kombu.async.timer
=================
Timer scheduling Python callbacks.
"""
from __future__ import absolute_import
import heapq
import sys
from collections import namedtuple
from datetime import datetime
from functools import wraps
from time import time
from weakref import proxy as weakrefproxy
from kombu.five import monotonic
from kombu.log import get_logger
from kombu.utils.compat import timedelta_seconds
try:
from pytz import utc
except ImportError:
utc = None
DEFAULT_MAX_INTERVAL = 2
EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
IS_PYPY = hasattr(sys, 'pypy_version_info')
logger = get_logger(__name__)
__all__ = ['Entry', 'Timer', 'to_timestamp']
scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))
def to_timestamp(d, default_timezone=utc):
if isinstance(d, datetime):
if d.tzinfo is None:
d = d.replace(tzinfo=default_timezone)
return timedelta_seconds(d - EPOCH)
return d
class Entry(object):
if not IS_PYPY: # pragma: no cover
__slots__ = (
'fun', 'args', 'kwargs', 'tref', 'cancelled',
'_last_run', '__weakref__',
)
def __init__(self, fun, args=None, kwargs=None):
self.fun = fun
self.args = args or []
self.kwargs = kwargs or {}
self.tref = weakrefproxy(self)
self._last_run = None
self.cancelled = False
def __call__(self):
return self.fun(*self.args, **self.kwargs)
def cancel(self):
try:
self.tref.cancelled = True
except ReferenceError: # pragma: no cover
pass
def __repr__(self):
return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format(
self.fun.__name__, self.args, self.kwargs)
def __hash__(self):
return hash((self.fun, repr(self.args), repr(self.kwargs)))
# must not use hash() to order entries
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
class Timer(object):
"""ETA scheduler."""
Entry = Entry
on_error = None
def __init__(self, max_interval=None, on_error=None, **kwargs):
self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)
self.on_error = on_error or self.on_error
self._queue = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stop()
def call_at(self, eta, fun, args=(), kwargs={}, priority=0):
return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)
def call_after(self, secs, fun, args=(), kwargs={}, priority=0):
return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)
def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0):
tref = self.Entry(fun, args, kwargs)
@wraps(fun)
def _reschedules(*args, **kwargs):
last, now = tref._last_run, monotonic()
lsince = (now - tref._last_run) if last else secs
try:
if lsince and lsince >= secs:
tref._last_run = now
return fun(*args, **kwargs)
finally:
if not tref.cancelled:
last = tref._last_run
next = secs - (now - last) if last else secs
self.enter_after(next, tref, priority)
tref.fun = _reschedules
tref._last_run = None
return self.enter_after(secs, tref, priority)
def enter_at(self, entry, eta=None, priority=0, time=time):
"""Enter function into the scheduler.
:param entry: Item to enter.
:keyword eta: Scheduled time as a :class:`datetime.datetime` object.
:keyword priority: Unused.
"""
if eta is None:
eta = time()
if isinstance(eta, datetime):
try:
eta = to_timestamp(eta)
except Exception as exc:
if not self.handle_error(exc):
raise
return
return self._enter(eta, priority, entry)
def enter_after(self, secs, entry, priority=0, time=time):
return self.enter_at(entry, time() + secs, priority)
def _enter(self, eta, priority, entry, push=heapq.heappush):
push(self._queue, scheduled(eta, priority, entry))
return entry
def apply_entry(self, entry):
try:
entry()
except Exception as exc:
if not self.handle_error(exc):
logger.error('Error in timer: %r', exc, exc_info=True)
def handle_error(self, exc_info):
if self.on_error:
self.on_error(exc_info)
return True
def stop(self):
pass
def __iter__(self, min=min, nowfun=time,
pop=heapq.heappop, push=heapq.heappush):
"""This iterator yields a tuple of ``(entry, wait_seconds)``,
where if entry is :const:`None` the caller should wait
for ``wait_seconds`` until it polls the schedule again."""
max_interval = self.max_interval
queue = self._queue
while 1:
if queue:
eventA = queue[0]
now, eta = nowfun(), eventA[0]
if now < eta:
yield min(eta - now, max_interval), None
else:
eventB = pop(queue)
if eventB is eventA:
entry = eventA[2]
if not entry.cancelled:
yield None, entry
continue
else:
push(queue, eventB)
else:
yield None, None
def clear(self):
self._queue[:] = [] # atomic, without creating a new list.
def cancel(self, tref):
tref.cancel()
def __len__(self):
return len(self._queue)
def __nonzero__(self):
return True
@property
def queue(self, _pop=heapq.heappop):
"""Snapshot of underlying datastructure."""
events = list(self._queue)
return [_pop(v) for v in [events] * len(events)]
@property
def schedule(self):
return self
| bsd-3-clause |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/webpagereplay/third_party/dns/version.py | 215 | 1267 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""dnspython release version information."""
MAJOR = 1
MINOR = 8
MICRO = 0
RELEASELEVEL = 0x0f
SERIAL = 0
if RELEASELEVEL == 0x0f:
version = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
elif RELEASELEVEL == 0x00:
version = '%d.%d.%dx%d' % \
(MAJOR, MINOR, MICRO, SERIAL)
else:
version = '%d.%d.%d%x%d' % \
(MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL)
hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \
SERIAL
| bsd-3-clause |
jmvrbanac/Specter | specter/parallel.py | 2 | 5071 | import multiprocessing as mp
import threading
from time import time
from coverage import coverage
from specter.spec import TestEvent, DescribeEvent
class ExecuteTestProcess(mp.Process):
def __init__(self, work_queue, all_cases, all_parents,
pipe, track_coverage=False, coverage_omit=None):
super(ExecuteTestProcess, self).__init__()
self.work_queue = work_queue
self.all_cases = all_cases
self.all_parents = all_parents
self.worked = mp.Value('i', 0)
self.pipe = pipe
self.coverage = None
if track_coverage:
self.coverage = coverage(omit=coverage_omit, data_suffix=True)
self.coverage._warn_no_data = False
def run(self): # pragma: no cover
""" Note: CI Coverage is turned off due to it not showing covered
even with there being tests that run this function.
"""
last_time = time()
completed = []
if self.coverage:
self.coverage.start()
while True:
# Get item and get real function to execute
case_wrapper = self.work_queue.get()
if case_wrapper == 'STOP':
# Make sure buffer is cleared
if len(completed) > 0:
self.pipe.send(completed)
self.completed = []
self.pipe.send(None)
if self.coverage:
self.coverage.stop()
self.coverage.save()
return
case_wrapper.case_func = self.all_cases[case_wrapper.case_func]
case_wrapper.parent = self.all_parents[case_wrapper.parent]
case_wrapper.parent._state.before_each()
case_wrapper.execute(case_wrapper.parent._state)
case_wrapper.parent._state.after_each()
self.worked.value += 1
completed.append(case_wrapper)
# Flush completed buffer to queue
if completed and time() >= (last_time + 0.01):
self.pipe.send(completed)
completed = []
last_time = time()
class ParallelManager(object):
def __init__(self, num_processes=6, track_coverage=False,
coverage_omit=None):
self.processes = []
self.num_processes = num_processes
self.stops_hit = 0
self.thead_lock = threading.Lock()
self.work_queue = mp.Queue()
self.active_pipes = []
self.case_functions = {}
self.case_parents = {}
self.track_coverage = track_coverage
self.coverage_omit = coverage_omit
def add_to_queue(self, case_wrapper):
self.work_queue.put(case_wrapper)
# Keep track of wrappers and parents
self.case_functions[case_wrapper.id] = case_wrapper.case_func
self.case_parents[case_wrapper.parent.id] = case_wrapper.parent
def sync_wrappers(self, wrapper_list):
for wrapper in wrapper_list:
parent_id = wrapper.parent
wrapper_id = wrapper.case_func
parent = wrapper.parent = self.case_parents[parent_id]
wrapper.case_func = self.case_functions[wrapper_id]
wrapper.parent.cases[wrapper_id] = wrapper
wrapper.parent.top_parent.dispatch(TestEvent(wrapper))
parent._num_completed_cases += 1
while parent:
if parent.complete:
evt = DescribeEvent(DescribeEvent.COMPLETE, parent)
parent._state.after_all()
parent.top_parent.dispatch(evt)
parent = parent.parent
else:
parent = None
def sync_wrappers_from_pipes(self):
stops = 0
while stops < self.num_processes:
for pipe in self.active_pipes:
if pipe.poll(0.01):
received = pipe.recv()
if received is None:
stops += 1
else:
self.sync_wrappers(received)
if stops >= self.num_processes:
break
def execute_all(self):
for i in range(0, self.num_processes):
parent_pipe, child_pipe = mp.Pipe(duplex=False)
test_process = ExecuteTestProcess(
self.work_queue, self.case_functions,
self.case_parents, child_pipe,
track_coverage=self.track_coverage,
coverage_omit=self.coverage_omit)
self.active_pipes.append(parent_pipe)
self.processes.append(test_process)
self.work_queue.put('STOP')
test_process.start()
self.sync_wrappers_from_pipes()
# Join processes for good measure
total_tests = 0
for test_process in list(self.processes):
test_process.join()
total_tests += test_process.worked.value
self.processes.remove(test_process)
for pipe in self.active_pipes:
pipe.close()
| mit |
zerobatu/edx-platform | common/djangoapps/student/tests/test_certificates.py | 39 | 4829 | """Tests for display of certificates on the student dashboard. """
import unittest
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error
from certificates.api import get_certificate_url # pylint: disable=import-error
# pylint: disable=no-member
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTest(ModuleStoreTestCase):
"""Tests display of certificates on the student dashboard. """
USERNAME = "test_user"
PASSWORD = "password"
DOWNLOAD_URL = "http://www.example.com/certificate.pdf"
def setUp(self):
super(CertificateDisplayTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
self.course = CourseFactory()
self.course.certificates_display_behavior = "early_with_info"
self.update_course(self.course, self.user.username)
@ddt.data('verified', 'professional')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate(self, enrollment_mode):
self._create_certificate(enrollment_mode)
self._check_can_download_certificate()
@ddt.data('verified', 'honor')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_display_download_certificate_button(self, enrollment_mode):
"""
Tests if CERTIFICATES_HTML_VIEW is True
and course has enabled web certificates via cert_html_view_enabled setting
and no active certificate configuration available
then any of the Download certificate button should not be visible.
"""
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
self._create_certificate(enrollment_mode)
self._check_can_not_download_certificate()
@ddt.data('verified')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_linked_student_to_web_view_credential(self, enrollment_mode):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id)
)
self._create_certificate(enrollment_mode)
certificates = [
{
'id': 0,
'name': 'Test Name',
'description': 'Test Description',
'is_active': True,
'signatories': [],
'version': 1
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save() # pylint: disable=no-member
self.store.update_item(self.course, self.user.id)
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'View Test_Certificate')
self.assertContains(response, test_url)
def _create_certificate(self, enrollment_mode):
"""Simulate that the user has a generated certificate. """
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode)
GeneratedCertificateFactory(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode,
download_url=self.DOWNLOAD_URL,
status="downloadable",
grade=0.98,
)
def _check_can_download_certificate(self):
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download Your ID Verified')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_not_download_certificate(self):
"""
Make sure response does not have any of the download certificate buttons
"""
response = self.client.get(reverse('dashboard'))
self.assertNotContains(response, u'View Test_Certificate')
self.assertNotContains(response, u'Download Your Test_Certificate (PDF)')
self.assertNotContains(response, u'Download Test_Certificate (PDF)')
self.assertNotContains(response, self.DOWNLOAD_URL)
| agpl-3.0 |
GenericStudent/home-assistant | script/translations/migrate.py | 21 | 12255 | """Migrate things."""
import json
import pathlib
from pprint import pprint
import re
from .const import CORE_PROJECT_ID, FRONTEND_PROJECT_ID, INTEGRATIONS_DIR
from .lokalise import get_api
FRONTEND_REPO = pathlib.Path("../frontend/")
def create_lookup(results):
"""Create a lookup table by key name."""
return {key["key_name"]["web"]: key for key in results}
def rename_keys(project_id, to_migrate):
"""Rename keys.
to_migrate is Dict[from_key] = to_key.
"""
updates = []
lokalise = get_api(project_id)
from_key_data = lokalise.keys_list({"filter_keys": ",".join(to_migrate)})
if len(from_key_data) != len(to_migrate):
print(
f"Lookin up keys in Lokalise returns {len(from_key_data)} results, expected {len(to_migrate)}"
)
return
from_key_lookup = create_lookup(from_key_data)
print("Gathering IDs")
for from_key, to_key in to_migrate.items():
updates.append(
{"key_id": from_key_lookup[from_key]["key_id"], "key_name": to_key}
)
pprint(updates)
print()
while input("Type YES to confirm: ") != "YES":
pass
print()
print("Updating keys")
pprint(lokalise.keys_bulk_update(updates))
def list_keys_helper(lokalise, keys, params={}, *, validate=True):
"""List keys in chunks so it doesn't exceed max URL length."""
results = []
for i in range(0, len(keys), 100):
filter_keys = keys[i : i + 100]
from_key_data = lokalise.keys_list(
{
**params,
"filter_keys": ",".join(filter_keys),
"limit": len(filter_keys) + 1,
}
)
if len(from_key_data) == len(filter_keys) or not validate:
results.extend(from_key_data)
continue
print(
f"Lookin up keys in Lokalise returns {len(from_key_data)} results, expected {len(keys)}"
)
searched = set(filter_keys)
returned = set(create_lookup(from_key_data))
print("Not found:", ", ".join(searched - returned))
raise ValueError
return results
def migrate_project_keys_translations(from_project_id, to_project_id, to_migrate):
"""Migrate keys and translations from one project to another.
to_migrate is Dict[from_key] = to_key.
"""
from_lokalise = get_api(from_project_id)
to_lokalise = get_api(to_project_id)
# Fetch keys in target
# We are going to skip migrating existing keys
print("Checking which target keys exist..")
try:
to_key_data = list_keys_helper(
to_lokalise, list(to_migrate.values()), validate=False
)
except ValueError:
return
existing = set(create_lookup(to_key_data))
missing = [key for key in to_migrate.values() if key not in existing]
if not missing:
print("All keys to migrate exist already, nothing to do")
return
# Fetch keys whose translations we're importing
print("Fetch translations that we're importing..")
try:
from_key_data = list_keys_helper(
from_lokalise,
[key for key, value in to_migrate.items() if value not in existing],
{"include_translations": 1},
)
except ValueError:
return
from_key_lookup = create_lookup(from_key_data)
print("Creating", ", ".join(missing))
to_key_lookup = create_lookup(
to_lokalise.keys_create(
[{"key_name": key, "platforms": ["web"]} for key in missing]
)
)
updates = []
for from_key, to_key in to_migrate.items():
# If it is not in lookup, it already existed, skipping it.
if to_key not in to_key_lookup:
continue
updates.append(
{
"key_id": to_key_lookup[to_key]["key_id"],
"translations": [
{
"language_iso": from_translation["language_iso"],
"translation": from_translation["translation"],
"is_reviewed": from_translation["is_reviewed"],
"is_fuzzy": from_translation["is_fuzzy"],
}
for from_translation in from_key_lookup[from_key]["translations"]
],
}
)
print("Updating")
pprint(updates)
print()
print()
pprint(to_lokalise.keys_bulk_update(updates))
def find_and_rename_keys():
"""Find and rename keys in core."""
to_migrate = {}
for integration in INTEGRATIONS_DIR.iterdir():
strings_file = integration / "strings.json"
if not strings_file.is_file():
continue
strings = json.loads(strings_file.read_text())
if "title" in strings.get("config", {}):
from_key = f"component::{integration.name}::config::title"
to_key = f"component::{integration.name}::title"
to_migrate[from_key] = to_key
rename_keys(CORE_PROJECT_ID, to_migrate)
def find_different_languages():
"""Find different supported languages."""
core_api = get_api(CORE_PROJECT_ID)
frontend_api = get_api(FRONTEND_PROJECT_ID)
core_languages = {lang["lang_iso"] for lang in core_api.languages_list()}
frontend_languages = {lang["lang_iso"] for lang in frontend_api.languages_list()}
print("Core minus frontend", core_languages - frontend_languages)
print("Frontend minus core", frontend_languages - core_languages)
def interactive_update():
"""Interactive update integration strings."""
for integration in INTEGRATIONS_DIR.iterdir():
strings_file = integration / "strings.json"
if not strings_file.is_file():
continue
strings = json.loads(strings_file.read_text())
if "title" not in strings:
continue
manifest = json.loads((integration / "manifest.json").read_text())
print("Processing", manifest["name"])
print("Translation title", strings["title"])
if input("Drop title? (1=yes, 2=no) ") == "1":
strings.pop("title")
strings_file.write_text(json.dumps(strings))
print()
STATE_REWRITE = {
"Off": "[%key:common::state::off%]",
"On": "[%key:common::state::on%]",
"Unknown": "[%key:common::state::unknown%]",
"Unavailable": "[%key:common::state::unavailable%]",
"Open": "[%key:common::state::open%]",
"Closed": "[%key:common::state::closed%]",
"Connected": "[%key:common::state::connected%]",
"Disconnected": "[%key:common::state::disconnected%]",
"Locked": "[%key:common::state::locked%]",
"Unlocked": "[%key:common::state::unlocked%]",
"Active": "[%key:common::state::active%]",
"active": "[%key:common::state::active%]",
"Standby": "[%key:common::state::standby%]",
"Idle": "[%key:common::state::idle%]",
"idle": "[%key:common::state::idle%]",
"Paused": "[%key:common::state::paused%]",
"paused": "[%key:common::state::paused%]",
"Home": "[%key:common::state::home%]",
"Away": "[%key:common::state::not_home%]",
"[%key:state::default::off%]": "[%key:common::state::off%]",
"[%key:state::default::on%]": "[%key:common::state::on%]",
"[%key:state::cover::open%]": "[%key:common::state::open%]",
"[%key:state::cover::closed%]": "[%key:common::state::closed%]",
"[%key:state::lock::locked%]": "[%key:common::state::locked%]",
"[%key:state::lock::unlocked%]": "[%key:common::state::unlocked%]",
}
SKIP_DOMAIN = {"default", "scene"}
STATES_WITH_DEV_CLASS = {"binary_sensor", "zwave"}
GROUP_DELETE = {"opening", "closing", "stopped"} # They don't exist
def find_frontend_states():
"""Find frontend states.
Source key -> target key
Add key to integrations strings.json
"""
frontend_states = json.loads(
(FRONTEND_REPO / "src/translations/en.json").read_text()
)["state"]
# domain => state object
to_write = {}
to_migrate = {}
for domain, states in frontend_states.items():
if domain in SKIP_DOMAIN:
continue
to_key_base = f"component::{domain}::state"
from_key_base = f"state::{domain}"
if domain in STATES_WITH_DEV_CLASS:
domain_to_write = dict(states)
for device_class, dev_class_states in domain_to_write.items():
to_device_class = "_" if device_class == "default" else device_class
for key in dev_class_states:
to_migrate[
f"{from_key_base}::{device_class}::{key}"
] = f"{to_key_base}::{to_device_class}::{key}"
# Rewrite "default" device class to _
if "default" in domain_to_write:
domain_to_write["_"] = domain_to_write.pop("default")
else:
if domain == "group":
for key in GROUP_DELETE:
states.pop(key)
domain_to_write = {"_": states}
for key in states:
to_migrate[f"{from_key_base}::{key}"] = f"{to_key_base}::_::{key}"
# Map out common values with
for dev_class_states in domain_to_write.values():
for key, value in dev_class_states.copy().items():
if value in STATE_REWRITE:
dev_class_states[key] = STATE_REWRITE[value]
continue
match = re.match(r"\[\%key:state::(\w+)::(.+)\%\]", value)
if not match:
continue
dev_class_states[key] = "[%key:component::{}::state::{}%]".format(
*match.groups()
)
to_write[domain] = domain_to_write
for domain, state in to_write.items():
strings = INTEGRATIONS_DIR / domain / "strings.json"
if strings.is_file():
content = json.loads(strings.read_text())
else:
content = {}
content["state"] = state
strings.write_text(json.dumps(content, indent=2) + "\n")
pprint(to_migrate)
print()
while input("Type YES to confirm: ") != "YES":
pass
migrate_project_keys_translations(FRONTEND_PROJECT_ID, CORE_PROJECT_ID, to_migrate)
def apply_data_references(to_migrate):
"""Apply references."""
for strings_file in INTEGRATIONS_DIR.glob("*/strings.json"):
strings = json.loads(strings_file.read_text())
steps = strings.get("config", {}).get("step")
if not steps:
continue
changed = False
for step_data in steps.values():
step_data = step_data.get("data", {})
for key, value in step_data.items():
if key in to_migrate and value != to_migrate[key]:
if key.split("_")[0].lower() in value.lower():
step_data[key] = to_migrate[key]
changed = True
elif value.startswith("[%key"):
pass
else:
print(
f"{strings_file}: Skipped swapping '{key}': '{value}' does not contain '{key}'"
)
if not changed:
continue
strings_file.write_text(json.dumps(strings, indent=2))
def run():
"""Migrate translations."""
apply_data_references(
{
"host": "[%key:common::config_flow::data::host%]",
"username": "[%key:common::config_flow::data::username%]",
"password": "[%key:common::config_flow::data::password%]",
"port": "[%key:common::config_flow::data::port%]",
"usb_path": "[%key:common::config_flow::data::usb_path%]",
"access_token": "[%key:common::config_flow::data::access_token%]",
"api_key": "[%key:common::config_flow::data::api_key%]",
}
)
# Rename existing keys to common keys,
# Old keys have been updated with reference to the common key
# rename_keys(
# CORE_PROJECT_ID,
# {
# "component::blebox::config::step::user::data::host": "common::config_flow::data::ip",
# },
# )
# find_frontend_states()
# find_different_languages()
return 0
| apache-2.0 |
tdliu/hoop-picks | lib/rsa/transform.py | 81 | 6892 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data transformation functions.
From bytes to a number, number to bytes, etc.
"""
from __future__ import absolute_import
try:
# We'll use psyco if available on 32-bit architectures to speed up code.
# Using psyco (if available) cuts down the execution time on Python 2.5
# at least by half.
import psyco
psyco.full()
except ImportError:
pass
import binascii
from struct import pack
from rsa import common
from rsa._compat import is_integer, b, byte, get_word_alignment, ZERO_BYTE, EMPTY_BYTE
def bytes2int(raw_bytes):
r"""Converts a list of bytes or an 8-bit string to an integer.
When using unicode strings, encode it to some encoding like UTF8 first.
>>> (((128 * 256) + 64) * 256) + 15
8405007
>>> bytes2int(b'\x80@\x0f')
8405007
"""
return int(binascii.hexlify(raw_bytes), 16)
def _int2bytes(number, block_size=None):
r"""Converts a number to a string of bytes.
Usage::
>>> _int2bytes(123456789)
b'\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789))
123456789
>>> _int2bytes(123456789, 6)
b'\x00\x00\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789, 128))
123456789
>>> _int2bytes(123456789, 3)
Traceback (most recent call last):
...
OverflowError: Needed 4 bytes for number, but block size is 3
@param number: the number to convert
@param block_size: the number of bytes to output. If the number encoded to
bytes is less than this, the block will be zero-padded. When not given,
the returned block is not padded.
@throws OverflowError when block_size is given and the number takes up more
bytes than fit into the block.
"""
# Type checking
if not is_integer(number):
raise TypeError("You must pass an integer for 'number', not %s" %
number.__class__)
if number < 0:
raise ValueError('Negative numbers cannot be used: %i' % number)
# Do some bounds checking
if number == 0:
needed_bytes = 1
raw_bytes = [ZERO_BYTE]
else:
needed_bytes = common.byte_size(number)
raw_bytes = []
# You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
if block_size and block_size > 0:
if needed_bytes > block_size:
raise OverflowError('Needed %i bytes for number, but block size '
'is %i' % (needed_bytes, block_size))
# Convert the number to bytes.
while number > 0:
raw_bytes.insert(0, byte(number & 0xFF))
number >>= 8
# Pad with zeroes to fill the block
if block_size and block_size > 0:
padding = (block_size - needed_bytes) * ZERO_BYTE
else:
padding = EMPTY_BYTE
return padding + EMPTY_BYTE.join(raw_bytes)
def bytes_leading(raw_bytes, needle=ZERO_BYTE):
"""
Finds the number of prefixed byte occurrences in the haystack.
Useful when you want to deal with padding.
:param raw_bytes:
Raw bytes.
:param needle:
The byte to count. Default \000.
:returns:
The number of leading needle bytes.
"""
leading = 0
# Indexing keeps compatibility between Python 2.x and Python 3.x
_byte = needle[0]
for x in raw_bytes:
if x == _byte:
leading += 1
else:
break
return leading
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
"""
Convert an unsigned integer to bytes (base-256 representation)::
Does not preserve leading zeros if you don't specify a chunk size or
fill size.
.. NOTE:
You must not specify both fill_size and chunk_size. Only one
of them is allowed.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:param chunk_size:
If optional chunk size is given and greater than zero, pad the front of
the byte string with binary zeros so that the length is a multiple of
``chunk_size``.
:param overflow:
``False`` (default). If this is ``True``, no ``OverflowError``
will be raised when the fill_size is shorter than the length
of the generated byte sequence. Instead the byte sequence will
be returned as is.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
"""
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
if fill_size and chunk_size:
raise ValueError("You can either fill or pad chunks, but not both")
# Ensure these are integers.
number & 1
raw_bytes = b('')
# Pack the integer one machine word at a time into bytes.
num = number
word_bits, _, max_uint, pack_type = get_word_alignment(num)
pack_format = ">%s" % pack_type
while num > 0:
raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
num >>= word_bits
# Obtain the index of the first non-zero byte.
zero_leading = bytes_leading(raw_bytes)
if number == 0:
raw_bytes = ZERO_BYTE
# De-padding.
raw_bytes = raw_bytes[zero_leading:]
length = len(raw_bytes)
if fill_size and fill_size > 0:
if not overflow and length > fill_size:
raise OverflowError(
"Need %d bytes for number, but fill size is %d" %
(length, fill_size)
)
raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE)
elif chunk_size and chunk_size > 0:
remainder = length % chunk_size
if remainder:
padding_size = chunk_size - remainder
raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE)
return raw_bytes
if __name__ == '__main__':
import doctest
doctest.testmod()
| apache-2.0 |
isnnn/Sick-Beard-TPB | lib/unidecode/x02c.py | 246 | 3596 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'L', # 0x60
'l', # 0x61
'L', # 0x62
'P', # 0x63
'R', # 0x64
'a', # 0x65
't', # 0x66
'H', # 0x67
'h', # 0x68
'K', # 0x69
'k', # 0x6a
'Z', # 0x6b
'z', # 0x6c
'', # 0x6d
'M', # 0x6e
'A', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| gpl-3.0 |
s142857/servo | python/mozlog/mozlog/structured/scripts/unstable.py | 45 | 3604 | import argparse
from collections import defaultdict
import json
from mozlog.structured import reader
class StatusHandler(reader.LogHandler):
def __init__(self):
self.run_info = None
self.statuses = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: defaultdict(int))))
def test_id(self, test):
if type(test) in (str, unicode):
return test
else:
return tuple(test)
def suite_start(self, item):
self.run_info = tuple(sorted(item.get("run_info", {}).items()))
def test_status(self, item):
self.statuses[self.run_info][self.test_id(item["test"])][item["subtest"]][item["status"]] += 1
def test_end(self, item):
self.statuses[self.run_info][self.test_id(item["test"])][None][item["status"]] += 1
def suite_end(self, item):
self.run_info = None
def get_statuses(filenames):
handler = StatusHandler()
for filename in filenames:
with open(filename) as f:
reader.handle_log(reader.read(f), handler)
return handler.statuses
def _filter(results_cmp):
def inner(statuses):
rv = defaultdict(lambda:defaultdict(dict))
for run_info, tests in statuses.iteritems():
for test, subtests in tests.iteritems():
for name, results in subtests.iteritems():
if results_cmp(results):
rv[run_info][test][name] = results
return rv
return inner
filter_unstable = _filter(lambda x: len(x) > 1)
filter_stable = _filter(lambda x: len(x) == 1)
def group_results(data):
rv = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for run_info, tests in data.iteritems():
for test, subtests in tests.iteritems():
for name, results in subtests.iteritems():
for status, number in results.iteritems():
rv[test][name][status] += number
return rv
def print_results(data):
for run_info, tests in data.iteritems():
run_str = " ".join("%s:%s" % (k,v) for k,v in run_info) if run_info else "No Run Info"
print run_str
print "=" * len(run_str)
print_run(tests)
def print_run(tests):
for test, subtests in sorted(tests.items()):
print "\n" + str(test)
print "-" * len(test)
for name, results in subtests.iteritems():
print "[%s]: %s" % (name if name is not None else "",
" ".join("%s (%i)" % (k,v) for k,v in results.iteritems()))
def get_parser(add_help=True):
parser = argparse.ArgumentParser("unstable",
description="List tests that don't give consistent results from one or more runs.", add_help=add_help)
parser.add_argument("--json", action="store_true", default=False,
help="Output in JSON format")
parser.add_argument("--group", action="store_true", default=False,
help="Group results from different run types")
parser.add_argument("log_file", nargs="+",
help="Log files to read")
return parser
def main(**kwargs):
unstable = filter_unstable(get_statuses(kwargs["log_file"]))
if kwargs["group"]:
unstable = group_results(unstable)
if kwargs["json"]:
print json.dumps(unstable)
else:
if not kwargs["group"]:
print_results(unstable)
else:
print_run(unstable)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
kwargs = vars(args)
main(**kwargs)
| mpl-2.0 |
aarontuor/antk | docs/conf.py | 1 | 8137 | # -*- coding: utf-8 -*-
#
# sampledoc documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 11 05:04:40 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Monkey-patch functools.wraps and contextlib.wraps
# https://github.com/sphinx-doc/sphinx/issues/1711#issuecomment-93126473
import functools
# def no_op_wraps(func):
# """
# Replaces functools.wraps in order to undo wrapping when generating Sphinx documentation
# """
# import sys
# # if func.__module__ is None or 'antk' not in func.__module__:
# # return functools.orig_wraps(func)
# def wrapper(decorator):
# sys.stderr.write('patched for function signature: {0!r}\n'.format(func))
# return func
# return wrapper
# # functools.orig_wraps = functools.wraps
# # functools.wraps = no_op_wraps
# # import contextlib
# # contextlib.wraps = no_op_wraps
# import antk.lib.decorate as decorate
# decorate.node_op = no_op_wraps
# decorate.neural_net = no_op_wraps
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('../antk/models/'))
sys.path.append(os.path.abspath('../antk/datascripts/'))
sys.path.append(os.path.abspath('../antk/core/'))
sys.path.append(os.path.abspath('../antk/datascripts/ml100k/'))
# sys.setrecursionlimit(5000)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinxarg.ext',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
[extensions]
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'antk'
copyright = u'2016, Aaron Tuor'
author = u'Aaron Tuor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'perldoc'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/antlogo_cut_shrunk2.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'antkdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(master_doc, 'antk.tex', u'antk Documentation',
u'Aaron Tuor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# pygments_style = 'sphinx'
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes", ]
# Monkey-patch functools.wraps
# import functools
#
# def no_op_wraps(func):
# """Replaces functools.wraps in order to undo wrapping.
#
# Can be used to preserve the decorated function's signature
# in the documentation generated by Sphinx.
#
# """
# def wrapper(decorator):
# return func
# return wrapper
#functools.wraps = no_op_wraps
autoclass_content = "both" | mit |
safwanrahman/readthedocs.org | readthedocs/notifications/backends.py | 2 | 3025 | """
Pluggable backends for the delivery of notifications.
Delivery of notifications to users depends on a list of backends configured in
Django settings. For example, they might be e-mailed to users as well as
displayed on the site.
"""
from __future__ import absolute_import
from builtins import object
from django.conf import settings
from django.http import HttpRequest
from django.utils.module_loading import import_string
from messages_extends.constants import INFO_PERSISTENT
from readthedocs.core.utils import send_email
from .constants import LEVEL_MAPPING, REQUIREMENT, HTML
def send_notification(request, notification):
"""
Send notifications through all backends defined by settings.
Backends should be listed in the settings ``NOTIFICATION_BACKENDS``, which
should be a list of class paths to be loaded, using the standard Django
string module loader.
"""
backends = getattr(settings, 'NOTIFICATION_BACKENDS', [])
for cls_name in backends:
backend = import_string(cls_name)(request)
backend.send(notification)
class Backend(object):
def __init__(self, request):
self.request = request
def send(self, notification):
pass
class EmailBackend(Backend):
"""
Send templated notification emails through our standard email backend.
The content body is first rendered from an on-disk template, then passed
into the standard email templates as a string.
"""
name = 'email'
def send(self, notification):
if notification.level >= REQUIREMENT:
send_email(
recipient=notification.user.email,
subject=notification.get_subject(),
template='core/email/common.txt',
template_html='core/email/common.html',
context={
'content': notification.render(self.name, source_format=HTML),
},
request=self.request,
)
class SiteBackend(Backend):
"""
Add messages through Django messages application.
This uses persistent messageing levels provided by :py:mod:`message_extends`
and stores persistent messages in the database.
"""
name = 'site'
def send(self, notification):
# Instead of calling the standard messages.add method, this instead
# manipulates the storage directly. This is because we don't have a
# request object and need to mock one out to fool the message storage
# into saving a message for a separate user.
cls_name = settings.MESSAGE_STORAGE
cls = import_string(cls_name)
req = HttpRequest()
setattr(req, 'session', '')
storage = cls(req)
storage.add(
level=LEVEL_MAPPING.get(notification.level, INFO_PERSISTENT),
message=notification.render(
backend_name=self.name,
source_format=HTML
),
extra_tags='',
user=notification.user,
)
| mit |
miguelparaiso/OdooAccessible | openerp/addons/base/tests/test_uninstall.py | 200 | 2717 | # -*- coding: utf-8 -*-
# This assumes an existing but uninitialized database.
import unittest2
import openerp
from openerp import SUPERUSER_ID
import common
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry(model):
return openerp.modules.registry.RegistryManager.get(common.get_db_name())[model]
def cursor():
return openerp.modules.registry.RegistryManager.get(common.get_db_name()).cursor()
def get_module(module_name):
registry = openerp.modules.registry.RegistryManager.get(common.get_db_name())
return registry.get(module_name)
def reload_registry():
openerp.modules.registry.RegistryManager.new(
common.get_db_name(), update_module=True)
def search_registry(model_name, domain):
cr = cursor()
model = registry(model_name)
record_ids = model.search(cr, SUPERUSER_ID, domain, {})
cr.close()
return record_ids
def install_module(module_name):
ir_module_module = registry('ir.module.module')
cr = cursor()
module_ids = ir_module_module.search(cr, SUPERUSER_ID,
[('name', '=', module_name)], {})
assert len(module_ids) == 1
ir_module_module.button_install(cr, SUPERUSER_ID, module_ids, {})
cr.commit()
cr.close()
reload_registry()
def uninstall_module(module_name):
ir_module_module = registry('ir.module.module')
cr = cursor()
module_ids = ir_module_module.search(cr, SUPERUSER_ID,
[('name', '=', module_name)], {})
assert len(module_ids) == 1
ir_module_module.button_uninstall(cr, SUPERUSER_ID, module_ids, {})
cr.commit()
cr.close()
reload_registry()
class test_uninstall(unittest2.TestCase):
"""
Test the install/uninstall of a test module. The module is available in
`openerp.tests` which should be present in the addons-path.
"""
def test_01_install(self):
""" Check a few things showing the module is installed. """
install_module('test_uninstall')
assert get_module('test_uninstall.model')
assert search_registry('ir.model.data',
[('module', '=', 'test_uninstall')])
assert search_registry('ir.model.fields',
[('model', '=', 'test_uninstall.model')])
def test_02_uninstall(self):
""" Check a few things showing the module is uninstalled. """
uninstall_module('test_uninstall')
assert not get_module('test_uninstall.model')
assert not search_registry('ir.model.data',
[('module', '=', 'test_uninstall')])
assert not search_registry('ir.model.fields',
[('model', '=', 'test_uninstall.model')])
if __name__ == '__main__':
unittest2.main()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
crowdhackathon-transport/optimizers | crowdstance-api/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py | 164 | 10570 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir
logger = logging.getLogger(__name__)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory where the cache should be located. If
not specified, this will be the ``resource-cache``
directory under whatever :func:`get_cache_base` returns.
"""
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
cache = Cache()
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
parts = resource_name.split('/')
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return f != '__pycache__' and not f.endswith(('.pyc', '.pyo'))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
import _frozen_importlib
_finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder
_finder_registry[_frozen_importlib.FileFinder] = ResourceFinder
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
| mit |
SethMMorton/input_reader | input_reader/__init__.py | 1 | 2431 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import dirname, join
from .input_reader import InputReader
from .helpers import ReaderError, SUPPRESS, Namespace
from .files import file_safety_check, abs_file_path
from ._version import __version__
__all__ = [
'InputReader',
'ReaderError',
'SUPPRESS',
'abs_file_path',
'file_safety_check',
'range_check',
'include_path',
]
include_path = join(dirname(__file__), 'include')
def range_check(low, high, expand=False, asint=False):
"""\
:py:func:`range_check` will verify that that given range has a
*low* lower than the *high*. If both numbers are integers, it
will return a list of the expanded range unless *expand* is
:py:const:`False`, in which it will just return the high and low.
If *low* or *high* is not an integers, it will return the *low*
and *high* values as floats.
:argument low:
The low value if the range to check.
:type low: float, int
:argument high:
The high value if the range to check.
:type high: float, int
:keyword expand:
If :py:obj:`True` and both *low* or *high* are integers, then
:py:func:`range_check` will return the range of integers between
*low* and *high*, inclusive. Otherwise, :py:func:`range_check`
just returns *low* and *high*.
:type expand: bool, optional
:keyword asint:
If *expand* is :py:obj:`False`, this will attempt to return the
*low* and *high* as integers instead of floats.
:type expand: bool, optional
:rtype:
See the explanation of *expand*.
:exception:
* :py:exc:`ValueError`: *low* > *high*.
* :py:exc:`ValueError`: *low* or *high* cannot be converted to a
:py:obj:`float`.
"""
# Convert to float first. A ValueError is raised if not possible
low = float(low)
high = float(high)
# Raise a value error if the range is invalid
if low >= high:
raise ValueError('low >= high')
# If we need special integer handling, check that we have integers
if (expand or asint) and int(low) == low and int(high) == high:
if expand:
return tuple(range(int(low), int(high)+1))
else:
return int(low), int(high)
# Otherwise return the floats
else:
return low, high
| mit |
ybellavance/python-for-android | python3-alpha/python3-src/Lib/threading.py | 45 | 36822 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from collections import deque
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those originaly names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Debug support (adapted from ihooks.py).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self._verbose = verbose
def _note(self, format, *args):
if self._verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(verbose=None, *args, **kwargs):
if verbose is None:
verbose = _VERBOSE
if (__debug__ and verbose) or _CRLock is None:
return _PyRLock(verbose, *args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
me = _get_ident()
if self._owner == me:
self._count = self._count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self._owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == _get_ident()
_PyRLock = _RLock
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
if __debug__:
self._note("%s.wait(): got it", self)
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self._waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
if __debug__:
self._note("%s.wait_for(%r, %r): Timed out.",
self, predicate, timeout)
break
if __debug__:
self._note("%s.wait_for(%r, %r): Waiting with timeout=%s.",
self, predicate, timeout, waittime)
self.wait(waittime)
result = predicate()
else:
if __debug__:
self._note("%s.wait_for(%r, %r): Success.",
self, predicate, timeout)
return result
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
self.notify(len(self._waiters))
notifyAll = notify_all
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
self._cond.acquire()
while self._value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self._value)
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self._value)
rc = True
self._cond.release()
return rc
__enter__ = acquire
def release(self):
self._cond.acquire()
self._value = self._value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self._value)
self._cond.notify()
self._cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
return self._flag
isSet = is_set
def set(self):
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._flag:
self._cond.wait(timeout)
return self._flag
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get get the exception.
class Barrier(_Verbose):
"""
Barrier. Useful for synchronizing a fixed number of threads
at known synchronization points. Threads block on 'wait()' and are
simultaneously once they have all made that call.
"""
def __init__(self, parties, action=None, timeout=None, verbose=None):
"""
Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called
by one of the threads after they have all entered the
barrier and just prior to releasing them all.
If a 'timeout' is provided, it is uses as the default for
all subsequent 'wait()' calls.
"""
_Verbose.__init__(self, verbose)
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""
Wait for the barrier. When the specified number of threads have
started waiting, they are all simultaneously awoken. If an 'action'
was provided for the barrier, one of the threads will have executed
that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""
Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""
Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and
threads attempting to 'wait()' will have BrokenBarrierError
raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""
Return the number of threads required to trip the barrier.
"""
return self._parties
@property
def n_waiting(self):
"""
Return the number of threads that are currently waiting at the barrier.
"""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""
Return True if the barrier is in a broken state
"""
return self._state == -2
#exception raised by the Barrier class
class BrokenBarrierError(RuntimeError): pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
self._daemonic = self._set_daemon()
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = _get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if __debug__:
self._note("%s._bootstrap(): thread started", self)
if _trace_hook:
self._note("%s._bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s._bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s._bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s._bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s._bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self._stopped:
self._note("%s.join(): waiting until thread stops", self)
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self._block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self._block.release()
@property
def name(self):
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
currentThread = current_thread
def active_count():
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.values():
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._ident = ident
# Any condition variables hanging off of the active thread may
# be in an invalid state, so we reinitialize them.
thread._reset_internal_locks()
new_active[ident] = thread
else:
# All the others are already stopped.
# We don't call _Thread__stop() because it tries to acquire
# thread._Thread__block which could also have been held while
# we forked.
thread._stopped = True
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| apache-2.0 |
laiy/Database_Project | third_party/nltk/corpus/reader/ppattach.py | 10 | 3138 | # Natural Language Toolkit: PP Attachment Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Read lines from the Prepositional Phrase Attachment Corpus.
The PP Attachment Corpus contains several files having the format:
sentence_id verb noun1 preposition noun2 attachment
For example:
42960 gives authority to administration V
46742 gives inventors of microchip N
The PP attachment is to the verb phrase (V) or noun phrase (N), i.e.:
(VP gives (NP authority) (PP to administration))
(VP gives (NP inventors (PP of microchip)))
The corpus contains the following files:
training: training set
devset: development test set, used for algorithm development.
test: test set, used to report results
bitstrings: word classes derived from Mutual Information Clustering for the Wall Street Journal.
Ratnaparkhi, Adwait (1994). A Maximum Entropy Model for Prepositional
Phrase Attachment. Proceedings of the ARPA Human Language Technology
Conference. [http://www.cis.upenn.edu/~adwait/papers/hlt94.ps]
The PP Attachment Corpus is distributed with NLTK with the permission
of the author.
"""
from __future__ import unicode_literals
from nltk import compat
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
@compat.python_2_unicode_compatible
class PPAttachment(object):
def __init__(self, sent, verb, noun1, prep, noun2, attachment):
self.sent = sent
self.verb = verb
self.noun1 = noun1
self.prep = prep
self.noun2 = noun2
self.attachment = attachment
def __repr__(self):
return ('PPAttachment(sent=%r, verb=%r, noun1=%r, prep=%r, '
'noun2=%r, attachment=%r)' %
(self.sent, self.verb, self.noun1, self.prep,
self.noun2, self.attachment))
class PPAttachmentCorpusReader(CorpusReader):
"""
sentence_id verb noun1 preposition noun2 attachment
"""
def attachments(self, fileids):
return concat([StreamBackedCorpusView(fileid, self._read_obj_block,
encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def tuples(self, fileids):
return concat([StreamBackedCorpusView(fileid, self._read_tuple_block,
encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def _read_tuple_block(self, stream):
line = stream.readline()
if line:
return [tuple(line.split())]
else:
return []
def _read_obj_block(self, stream):
line = stream.readline()
if line:
return [PPAttachment(*line.split())]
else:
return []
| apache-2.0 |
mrquim/mrquimrepo | plugin.video.mrpiracy/resources/lib/js2py/translators/friendly_nodes.py | 20 | 8484 | import binascii
from pyjsparser import PyJsParser
import six
if six.PY3:
basestring = str
long = int
xrange = range
unicode = str
REGEXP_CONVERTER = PyJsParser()
def to_hex(s):
return binascii.hexlify(s.encode('utf8')).decode('utf8') # fucking python 3, I hate it so much
# wtf was wrong with s.encode('hex') ???
def indent(lines, ind=4):
return ind*' '+lines.replace('\n', '\n'+ind*' ').rstrip(' ')
def inject_before_lval(source, lval, code):
if source.count(lval)>1:
print()
print(lval)
raise RuntimeError('To many lvals (%s)' % lval)
elif not source.count(lval):
print()
print(lval)
assert lval not in source
raise RuntimeError('No lval found "%s"' % lval)
end = source.index(lval)
inj = source.rfind('\n', 0, end)
ind = inj
while source[ind+1]==' ':
ind+=1
ind -= inj
return source[:inj+1]+ indent(code, ind) + source[inj+1:]
def get_continue_label(label):
return CONTINUE_LABEL%to_hex(label)
def get_break_label(label):
return BREAK_LABEL%to_hex(label)
def is_valid_py_name(name):
try:
compile(name+' = 11', 'a','exec')
except:
return False
return True
def indent(lines, ind=4):
return ind*' '+lines.replace('\n', '\n'+ind*' ').rstrip(' ')
def compose_regex(val):
reg, flags = val
#reg = REGEXP_CONVERTER._unescape_string(reg)
return u'/%s/%s' % (reg, flags)
def float_repr(f):
if int(f)==f:
return repr(int(f))
return repr(f)
def argsplit(args, sep=','):
"""used to split JS args (it is not that simple as it seems because
sep can be inside brackets).
pass args *without* brackets!
Used also to parse array and object elements, and more"""
parsed_len = 0
last = 0
splits = []
for e in bracket_split(args, brackets=['()', '[]', '{}']):
if e[0] not in ('(', '[', '{'):
for i, char in enumerate(e):
if char==sep:
splits.append(args[last:parsed_len+i])
last = parsed_len + i + 1
parsed_len += len(e)
splits.append(args[last:])
return splits
def bracket_split(source, brackets=('()','{}','[]'), strip=False):
"""DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)"""
starts = [e[0] for e in brackets]
in_bracket = 0
n = 0
last = 0
while n<len(source):
e = source[n]
if not in_bracket and e in starts:
in_bracket = 1
start = n
b_start, b_end = brackets[starts.index(e)]
elif in_bracket:
if e==b_start:
in_bracket += 1
elif e==b_end:
in_bracket -= 1
if not in_bracket:
if source[last:start]:
yield source[last:start]
last = n+1
yield source[start+strip:n+1-strip]
n+=1
if source[last:]:
yield source[last:]
def js_comma(a, b):
return 'PyJsComma('+a+','+b+')'
def js_or(a, b):
return '('+a+' or '+b+')'
def js_bor(a, b):
return '('+a+'|'+b+')'
def js_bxor(a, b):
return '('+a+'^'+b+')'
def js_band(a, b):
return '('+a+'&'+b+')'
def js_and(a, b):
return '('+a+' and '+b+')'
def js_strict_eq(a, b):
return 'PyJsStrictEq('+a+','+b+')'
def js_strict_neq(a, b):
return 'PyJsStrictNeq('+a+','+b+')'
#Not handled by python in the same way like JS. For example 2==2==True returns false.
# In JS above would return true so we need brackets.
def js_abstract_eq(a, b):
return '('+a+'=='+b+')'
#just like ==
def js_abstract_neq(a, b):
return '('+a+'!='+b+')'
def js_lt(a, b):
return '('+a+'<'+b+')'
def js_le(a, b):
return '('+a+'<='+b+')'
def js_ge(a, b):
return '('+a+'>='+b+')'
def js_gt(a, b):
return '('+a+'>'+b+')'
def js_in(a, b):
return b+'.contains('+a+')'
def js_instanceof(a, b):
return a+'.instanceof('+b+')'
def js_lshift(a, b):
return '('+a+'<<'+b+')'
def js_rshift(a, b):
return '('+a+'>>'+b+')'
def js_shit(a, b):
return 'PyJsBshift('+a+','+b+')'
def js_add(a, b): # To simplify later process of converting unary operators + and ++
return '(%s+%s)'%(a, b)
def js_sub(a, b): # To simplify
return '(%s-%s)'%(a, b)
def js_mul(a, b):
return '('+a+'*'+b+')'
def js_div(a, b):
return '('+a+'/'+b+')'
def js_mod(a, b):
return '('+a+'%'+b+')'
def js_typeof(a):
cand = list(bracket_split(a, ('()',)))
if len(cand)==2 and cand[0]=='var.get':
return cand[0]+cand[1][:-1]+',throw=False).typeof()'
return a+'.typeof()'
def js_void(a):
# eval and return undefined
return 'PyJsComma(%s, Js(None))' % a
def js_new(a):
cands = list(bracket_split(a, ('()',)))
lim = len(cands)
if lim < 2:
return a + '.create()'
n = 0
while n < lim:
c = cands[n]
if c[0]=='(':
if cands[n-1].endswith('.get') and n+1>=lim: # last get operation.
return a + '.create()'
elif cands[n-1][0]=='(':
return ''.join(cands[:n])+'.create' + c + ''.join(cands[n+1:])
elif cands[n-1]=='.callprop':
beg = ''.join(cands[:n-1])
args = argsplit(c[1:-1],',')
prop = args[0]
new_args = ','.join(args[1:])
create = '.get(%s).create(%s)' % (prop, new_args)
return beg + create + ''.join(cands[n+1:])
n+=1
return a + '.create()'
def js_delete(a):
#replace last get with delete.
c = list(bracket_split(a, ['()']))
beg, arglist = ''.join(c[:-1]).strip(), c[-1].strip() #strips just to make sure... I will remove it later
if beg[-4:]!='.get':
print(a)
raise SyntaxError('Invalid delete operation')
return beg[:-3]+'delete'+arglist
def js_neg(a):
return '(-'+a+')'
def js_pos(a):
return '(+'+a+')'
def js_inv(a):
return '(~'+a+')'
def js_not(a):
return a+'.neg()'
def js_postfix(a, inc, post):
bra = list(bracket_split(a, ('()',)))
meth = bra[-2]
if not meth.endswith('get'):
raise SyntaxError('Invalid ++ or -- operation.')
bra[-2] = bra[-2][:-3] + 'put'
bra[-1] = '(%s,Js(%s.to_number())%sJs(1))' % (bra[-1][1:-1], a, '+' if inc else '-')
res = ''.join(bra)
return res if not post else '(%s%sJs(1))' % (res, '-' if inc else '+')
def js_pre_inc(a):
return js_postfix(a, True, False)
def js_post_inc(a):
return js_postfix(a, True, True)
def js_pre_dec(a):
return js_postfix(a, False, False)
def js_post_dec(a):
return js_postfix(a, False, True)
CONTINUE_LABEL = 'JS_CONTINUE_LABEL_%s'
BREAK_LABEL = 'JS_BREAK_LABEL_%s'
PREPARE = '''HOLDER = var.own.get(NAME)\nvar.force_own_put(NAME, PyExceptionToJs(PyJsTempException))\n'''
RESTORE = '''if HOLDER is not None:\n var.own[NAME] = HOLDER\nelse:\n del var.own[NAME]\ndel HOLDER\n'''
TRY_CATCH = '''%stry:\nBLOCKfinally:\n%s''' % (PREPARE, indent(RESTORE))
OR = {'||': js_or}
AND = {'&&': js_and}
BOR = {'|': js_bor}
BXOR = {'^': js_bxor}
BAND = {'&': js_band}
EQS = {'===': js_strict_eq,
'!==': js_strict_neq,
'==': js_abstract_eq, # we need == and != too. Read a note above method
'!=': js_abstract_neq}
#Since JS does not have chained comparisons we need to implement all cmp methods.
COMPS = {'<': js_lt,
'<=': js_le,
'>=': js_ge,
'>': js_gt,
'instanceof': js_instanceof, #todo change to validitate
'in': js_in}
BSHIFTS = {'<<': js_lshift,
'>>': js_rshift,
'>>>': js_shit}
ADDS = {'+': js_add,
'-': js_sub}
MULTS = {'*': js_mul,
'/': js_div,
'%': js_mod}
BINARY = {}
BINARY.update(ADDS)
BINARY.update(MULTS)
BINARY.update(BSHIFTS)
BINARY.update(COMPS)
BINARY.update(EQS)
BINARY.update(BAND)
BINARY.update(BXOR)
BINARY.update(BOR)
BINARY.update(AND)
BINARY.update(OR)
#Note they dont contain ++ and -- methods because they both have 2 different methods
# correct method will be found automatically in translate function
UNARY = {'typeof': js_typeof,
'void': js_void,
'new': js_new,
'delete': js_delete,
'!': js_not,
'-': js_neg,
'+': js_pos,
'~': js_inv,
'++': None,
'--': None
} | gpl-2.0 |
ThoughtWorksInc/treadmill | treadmill/websocket/api/endpoint.py | 3 | 1861 | """
A WebSocket handler for Treadmill state.
"""
import os
import logging
from treadmill import schema
from treadmill.websocket import utils
_LOGGER = logging.getLogger(__name__)
class EndpointAPI(object):
"""Handler for /endpoints topic."""
def __init__(self):
"""init"""
@schema.schema({'$ref': 'websocket/endpoint.json#/message'})
def subscribe(message):
"""Return filter based on message payload."""
parsed_filter = utils.parse_message_filter(message['filter'])
if '.' not in parsed_filter.filter:
raise ValueError('Invalid filter: expect proid.pattern')
proid, pattern = parsed_filter.filter.split('.', 1)
proto = message.get('proto', '*')
endpoint = message.get('endpoint', '*')
full_pattern = ':'.join([pattern, proto, endpoint])
return [(os.path.join('/endpoints', proid), full_pattern)]
def on_event(filename, operation, content):
"""Event handler."""
if not filename.startswith('/endpoints/'):
return
proid, endpoint_file = filename[len('/endpoints/'):].split('/', 1)
host = None
port = None
if content is not None:
host, port = content.split(':')
sow = operation is None
app, proto, endpoint = endpoint_file.split(':')
return {
'topic': '/endpoints',
'name': '.'.join([proid, app]),
'proto': proto,
'endpoint': endpoint,
'host': host,
'port': port,
'sow': sow,
}
self.subscribe = subscribe
self.on_event = on_event
def init():
"""API module init."""
return [('/endpoints', EndpointAPI(), [])]
| apache-2.0 |
ZuluPro/python-librsync | tests.py | 1 | 2603 | import os
import unittest
import librsync
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
class TraceLevelTestCase(unittest.TestCase):
def test_set(self):
librsync.debug()
def test_set_invalid(self):
self.assertRaises(AssertionError, librsync.debug, level=40)
class SingleFileTestCase(unittest.TestCase):
def setUp(self):
self.rand = StringIO(os.urandom(1024**2))
class DoubleFileTestCase(unittest.TestCase):
def setUp(self):
self.rand1 = StringIO(os.urandom(1024**2))
self.rand2 = StringIO(os.urandom(1024**2))
class SignatureTestCase(SingleFileTestCase):
def test_signature(self):
s = librsync.signature(self.rand)
class DeltaTestCase(DoubleFileTestCase):
def test_signature(self):
s = librsync.signature(self.rand1)
d = librsync.delta(self.rand2, s)
def test_failure(self):
"Ensure delta aborts when provided invalid signature."
self.assertRaises(librsync.LibrsyncError, librsync.delta, self.rand2,
self.rand1)
class PatchTestCase(DoubleFileTestCase):
def test_patch(self):
s = librsync.signature(self.rand1)
d = librsync.delta(self.rand2, s)
self.rand1.seek(0)
self.rand2.seek(0)
o = librsync.patch(self.rand1, d)
self.assertEqual(o.read(), self.rand2.read())
def test_nonseek(self):
self.assertRaises(AssertionError, librsync.patch, None, self.rand2)
def test_failure(self):
"Ensure patch aborts when provided invalid delta."
self.assertRaises(librsync.LibrsyncError, librsync.patch, self.rand1,
self.rand2)
class BigPatchTestCase(PatchTestCase):
def setUp(self):
"Use large enough test files to cause temp files to hit disk."
self.rand1 = StringIO(os.urandom(1024**2*5))
self.rand2 = StringIO(os.urandom(1024**2*5))
class Issue3TestCase(PatchTestCase):
def setUp(self):
"Use test data provided in issue #3."
self.rand1 = StringIO('Text.')
self.rand2 = StringIO('New text.\nText.')
class SimpleStringTestCase(unittest.TestCase):
def setUp(self):
self.src = 'FF'
self.dst = 'FF123FF'
def test_string_patch(self):
src_sig = librsync.signature(StringIO(self.src))
delta = librsync.delta(StringIO(self.dst), src_sig).read()
out = librsync.patch(StringIO(self.src), StringIO(delta))
self.assertEqual(self.dst, out.read())
if __name__ == '__main__':
unittest.main()
| mit |
subramani95/neutron | neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py | 3 | 10070 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
import neutron.services.firewall.drivers.linux.iptables_fwaas as fwaas
from neutron.tests import base
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1):
apply_list = self._fake_apply_list(router_count=router_count)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
func(apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.ensure_remove_chain('iv4fake-fw-uuid'),
mock.call.ensure_remove_chain('ov4fake-fw-uuid'),
mock.call.ensure_remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s' % ipt_mgr_ichain),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s' % ipt_mgr_echain),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall(apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.ensure_remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.ensure_remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.ensure_remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall(apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.ensure_remove_chain(ingress_chain),
mock.call.ensure_remove_chain(egress_chain),
mock.call.ensure_remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall(apply_list, firewall)
calls = [mock.call.ensure_remove_chain('iv4fake-fw-uuid'),
mock.call.ensure_remove_chain('ov4fake-fw-uuid'),
mock.call.ensure_remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
| apache-2.0 |
keedio/hue | desktop/core/ext-py/pycrypto-2.6.1/pct-speedtest.py | 36 | 7848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pct-speedtest.py: Speed test for the Python Cryptography Toolkit
#
# Written in 2009 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import time
import os
import sys
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, ARC2, ARC4, Blowfish, CAST, DES3, DES, XOR
from Crypto.Hash import MD2, MD4, MD5, SHA256, SHA
try:
from Crypto.Hash import RIPEMD
except ImportError: # Some builds of PyCrypto don't have the RIPEMD module
RIPEMD = None
class Benchmark:
def __init__(self):
self.__random_data = None
def random_keys(self, bytes):
"""Return random keys of the specified number of bytes.
If this function has been called before with the same number of bytes,
cached keys are used instead of randomly generating new ones.
"""
return self.random_blocks(bytes, 10**5) # 100k
def random_blocks(self, bytes_per_block, blocks):
bytes = bytes_per_block * blocks
data = self.random_data(bytes)
retval = []
for i in xrange(blocks):
p = i * bytes_per_block
retval.append(data[p:p+bytes_per_block])
return retval
def random_data(self, bytes):
if self.__random_data is None:
self.__random_data = self._random_bytes(bytes)
return self.__random_data
elif bytes == len(self.__random_data):
return self.__random_data
elif bytes < len(self.__random_data):
return self.__random_data[:bytes]
else:
self.__random_data += self._random_bytes(bytes - len(self.__random_data))
return self.__random_data
def _random_bytes(self, b):
return os.urandom(b)
def announce_start(self, test_name):
sys.stdout.write("%s: " % (test_name,))
sys.stdout.flush()
def announce_result(self, value, units):
sys.stdout.write("%.2f %s\n" % (value, units))
sys.stdout.flush()
def test_pubkey_setup(self, pubkey_name, module, key_bytes):
self.announce_start("%s pubkey setup" % (pubkey_name,))
keys = self.random_keys(key_bytes)[:5]
t0 = time.time()
for k in keys:
module.generate(key_bytes*8)
t = time.time()
pubkey_setups_per_second = len(keys) / (t - t0)
self.announce_result(pubkey_setups_per_second, "Keys/sec")
def test_key_setup(self, cipher_name, module, key_bytes, mode):
self.announce_start("%s key setup" % (cipher_name,))
# Generate random keys for use with the tests
keys = self.random_keys(key_bytes)
# Perform key setups
if mode is None:
t0 = time.time()
for k in keys:
module.new(k)
t = time.time()
else:
t0 = time.time()
for k in keys:
module.new(k, module.MODE_ECB)
t = time.time()
key_setups_per_second = len(keys) / (t - t0)
self.announce_result(key_setups_per_second/1000, "kKeys/sec")
def test_encryption(self, cipher_name, module, key_bytes, mode):
self.announce_start("%s encryption" % (cipher_name,))
# Generate random keys for use with the tests
rand = self.random_data(key_bytes + module.block_size)
key, iv = rand[:key_bytes], rand[key_bytes:]
blocks = self.random_blocks(16384, 1000)
if mode is None:
cipher = module.new(key)
else:
cipher = module.new(key, mode, iv)
# Perform encryption
t0 = time.time()
for b in blocks:
cipher.encrypt(b)
t = time.time()
encryption_speed = (len(blocks) * len(blocks[0])) / (t - t0)
self.announce_result(encryption_speed / 10**6, "MBps")
def test_hash_small(self, hash_name, module):
self.announce_start("%s (%d-byte inputs)" % (hash_name, module.digest_size))
blocks = self.random_blocks(module.digest_size, 10000)
# Initialize hashes
t0 = time.time()
for b in blocks:
module.new(b).digest()
t = time.time()
hashes_per_second = len(blocks) / (t - t0)
self.announce_result(hashes_per_second / 1000, "kHashes/sec")
def test_hash_large(self, hash_name, module):
self.announce_start("%s (single large input)" % (hash_name,))
blocks = self.random_blocks(16384, 10000)
# Perform hashing
t0 = time.time()
h = module.new()
for b in blocks:
h.update(b)
h.digest()
t = time.time()
hash_speed = len(blocks) * len(blocks[0]) / (t - t0)
self.announce_result(hash_speed / 10**6, "MBps")
def run(self):
pubkey_specs = [
("RSA(1024)", RSA, 1024/8),
("RSA(2048)", RSA, 2048/8),
("RSA(4096)", RSA, 4096/8),
]
block_specs = [
("DES", DES, 8),
("DES3", DES3, 24),
("AES128", AES, 16),
("AES192", AES, 24),
("AES256", AES, 32),
("Blowfish(256)", Blowfish, 32),
("CAST(40)", CAST, 5),
("CAST(80)", CAST, 10),
("CAST(128)", CAST, 16),
]
stream_specs = [
("ARC2(128)", ARC2, 16),
("ARC4(128)", ARC4, 16),
("XOR(24)", XOR, 3),
("XOR(256)", XOR, 32),
]
hash_specs = [
("MD2", MD2),
("MD4", MD4),
("MD5", MD5),
("SHA", SHA),
("SHA256", SHA256),
]
if RIPEMD is not None:
hash_specs += [("RIPEMD", RIPEMD)]
for pubkey_name, module, key_bytes in pubkey_specs:
self.test_pubkey_setup(pubkey_name, module, key_bytes)
for cipher_name, module, key_bytes in block_specs:
self.test_key_setup(cipher_name, module, key_bytes, module.MODE_CBC)
self.test_encryption("%s-CBC" % (cipher_name,), module, key_bytes, module.MODE_CBC)
self.test_encryption("%s-CFB-8" % (cipher_name,), module, key_bytes, module.MODE_CFB)
self.test_encryption("%s-OFB" % (cipher_name,), module, key_bytes, module.MODE_OFB)
self.test_encryption("%s-ECB" % (cipher_name,), module, key_bytes, module.MODE_ECB)
self.test_encryption("%s-OPENPGP" % (cipher_name,), module, key_bytes, module.MODE_OPENPGP)
for cipher_name, module, key_bytes in stream_specs:
self.test_key_setup(cipher_name, module, key_bytes, None)
self.test_encryption(cipher_name, module, key_bytes, None)
for hash_name, module in hash_specs:
self.test_hash_small(hash_name, module)
self.test_hash_large(hash_name, module)
if __name__ == '__main__':
Benchmark().run()
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
roderickmackenzie/gpvdm | gpvdm_gui/gui/check_lib_in_bash_rc.py | 1 | 1767 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package check_lib_in_bash_rc
# Check the bash rc for the lib path
#
import sys
import os
#import shutil
from pathlib import Path
from win_lin import running_on_linux
from inp import inp
from cal_path import get_home_path
from cal_path import get_exe_path
def check_lib_in_bash_rc():
if running_on_linux()==True:
f=inp()
gpvdm_installed=-1
if f.load(os.path.join(get_home_path(),".bashrc"))!=False:
for i in range(0,len(f.lines)):
if f.lines[i].startswith("export LD_LIBRARY_PATH")==True and f.lines[i].count("gpvdm")!=0:
gpvdm_installed=i
if f.lines[i].endswith(get_exe_path()):
return
line="export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"+get_exe_path()
if gpvdm_installed==-1:
f.lines.append(line)
else:
f.lines[i]=line
f.save()
| gpl-2.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/keras-0.2.0/tests/auto/keras/test_constraints.py | 7 | 2448 | import unittest
import numpy as np
from numpy.testing import assert_allclose
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1, 0.5, 3, 8, 1e-7]
np.random.seed(3537)
self.example_array = np.random.random((100, 100)) * 100. - 50.
self.example_array[0, 0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert (np.all(normed.eval() < m))
# a more explicit example
norm_instance = maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
x_normed_actual = norm_instance(x).eval()
assert_allclose(x_normed_actual, x_normed_target)
def test_nonneg(self):
from keras.constraints import nonneg
nonneg_instance = nonneg()
normed = nonneg_instance(self.example_array)
assert (np.all(np.min(normed.eval(), axis=1) == 0.))
def test_identity(self):
from keras.constraints import identity
identity_instance = identity()
normed = identity_instance(self.example_array)
assert (np.all(normed == self.example_array))
def test_identity_oddballs(self):
"""
test the identity constraint on some more exotic input.
this does not need to pass for the desired real life behaviour,
but it should in the current implementation.
"""
from keras.constraints import identity
identity_instance = identity()
oddball_examples = ["Hello", [1], -1, None]
assert(oddball_examples == identity_instance(oddball_examples))
def test_unitnorm(self):
from keras.constraints import unitnorm
unitnorm_instance = unitnorm()
normalized = unitnorm_instance(self.example_array)
norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1))
difference = norm_of_normalized - 1. # in the unit norm constraint, it should be equal to 1.
largest_difference = np.max(np.abs(difference))
self.assertAlmostEqual(largest_difference, 0.)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
sbuss/voteswap | lib/django/middleware/cache.py | 372 | 7303 | """
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key, get_max_age, has_vary_header, learn_cache_key,
patch_response_headers,
)
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, '_cache_update_cache') and request._cache_update_cache
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code != 200:
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if request.method not in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs['key_prefix']
if key_prefix is None:
key_prefix = ''
except KeyError:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.key_prefix = key_prefix
try:
cache_alias = kwargs['cache_alias']
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
except KeyError:
cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache_alias = cache_alias
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.cache_timeout = cache_timeout
self.cache = caches[self.cache_alias]
| mit |
Lh4cKg/sl4a | python/src/Tools/pynche/TypeinViewer.py | 94 | 6102 | """TypeinViewer class.
The TypeinViewer is what you see at the lower right of the main Pynche
widget. It contains three text entry fields, one each for red, green, blue.
Input into these windows is highly constrained; it only allows you to enter
values that are legal for a color axis. This usually means 0-255 for decimal
input and 0x0 - 0xff for hex input.
You can toggle whether you want to view and input the values in either decimal
or hex by clicking on Hexadecimal. By clicking on Update while typing, the
color selection will be made on every change to the text field. Otherwise,
you must hit Return or Tab to select the color.
"""
from Tkinter import *
class TypeinViewer:
def __init__(self, switchboard, master=None):
# non-gui ivars
self.__sb = switchboard
optiondb = switchboard.optiondb()
self.__hexp = BooleanVar()
self.__hexp.set(optiondb.get('HEXTYPE', 0))
self.__uwtyping = BooleanVar()
self.__uwtyping.set(optiondb.get('UPWHILETYPE', 0))
# create the gui
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
self.__frame.grid(row=3, column=1, sticky='NSEW')
# Red
self.__xl = Label(self.__frame, text='Red:')
self.__xl.grid(row=0, column=0, sticky=E)
subframe = Frame(self.__frame)
subframe.grid(row=0, column=1)
self.__xox = Label(subframe, text='0x')
self.__xox.grid(row=0, column=0, sticky=E)
self.__xox['font'] = 'courier'
self.__x = Entry(subframe, width=3)
self.__x.grid(row=0, column=1)
self.__x.bindtags(self.__x.bindtags() + ('Normalize', 'Update'))
self.__x.bind_class('Normalize', '<Key>', self.__normalize)
self.__x.bind_class('Update' , '<Key>', self.__maybeupdate)
# Green
self.__yl = Label(self.__frame, text='Green:')
self.__yl.grid(row=1, column=0, sticky=E)
subframe = Frame(self.__frame)
subframe.grid(row=1, column=1)
self.__yox = Label(subframe, text='0x')
self.__yox.grid(row=0, column=0, sticky=E)
self.__yox['font'] = 'courier'
self.__y = Entry(subframe, width=3)
self.__y.grid(row=0, column=1)
self.__y.bindtags(self.__y.bindtags() + ('Normalize', 'Update'))
# Blue
self.__zl = Label(self.__frame, text='Blue:')
self.__zl.grid(row=2, column=0, sticky=E)
subframe = Frame(self.__frame)
subframe.grid(row=2, column=1)
self.__zox = Label(subframe, text='0x')
self.__zox.grid(row=0, column=0, sticky=E)
self.__zox['font'] = 'courier'
self.__z = Entry(subframe, width=3)
self.__z.grid(row=0, column=1)
self.__z.bindtags(self.__z.bindtags() + ('Normalize', 'Update'))
# Update while typing?
self.__uwt = Checkbutton(self.__frame,
text='Update while typing',
variable=self.__uwtyping)
self.__uwt.grid(row=3, column=0, columnspan=2, sticky=W)
# Hex/Dec
self.__hex = Checkbutton(self.__frame,
text='Hexadecimal',
variable=self.__hexp,
command=self.__togglehex)
self.__hex.grid(row=4, column=0, columnspan=2, sticky=W)
def __togglehex(self, event=None):
red, green, blue = self.__sb.current_rgb()
if self.__hexp.get():
label = '0x'
else:
label = ' '
self.__xox['text'] = label
self.__yox['text'] = label
self.__zox['text'] = label
self.update_yourself(red, green, blue)
def __normalize(self, event=None):
ew = event.widget
contents = ew.get()
icursor = ew.index(INSERT)
if contents and contents[0] in 'xX' and self.__hexp.get():
contents = '0' + contents
# Figure out the contents in the current base.
try:
if self.__hexp.get():
v = int(contents, 16)
else:
v = int(contents)
except ValueError:
v = None
# If value is not legal, or empty, delete the last character inserted
# and ring the bell. Don't ring the bell if the field is empty (it'll
# just equal zero.
if v is None:
pass
elif v < 0 or v > 255:
i = ew.index(INSERT)
if event.char:
contents = contents[:i-1] + contents[i:]
icursor -= 1
ew.bell()
elif self.__hexp.get():
contents = hex(v)[2:]
else:
contents = int(v)
ew.delete(0, END)
ew.insert(0, contents)
ew.icursor(icursor)
def __maybeupdate(self, event=None):
if self.__uwtyping.get() or event.keysym in ('Return', 'Tab'):
self.__update(event)
def __update(self, event=None):
redstr = self.__x.get() or '0'
greenstr = self.__y.get() or '0'
bluestr = self.__z.get() or '0'
if self.__hexp.get():
base = 16
else:
base = 10
red, green, blue = [int(x, base) for x in (redstr, greenstr, bluestr)]
self.__sb.update_views(red, green, blue)
def update_yourself(self, red, green, blue):
if self.__hexp.get():
sred, sgreen, sblue = [hex(x)[2:] for x in (red, green, blue)]
else:
sred, sgreen, sblue = red, green, blue
x, y, z = self.__x, self.__y, self.__z
xicursor = x.index(INSERT)
yicursor = y.index(INSERT)
zicursor = z.index(INSERT)
x.delete(0, END)
y.delete(0, END)
z.delete(0, END)
x.insert(0, sred)
y.insert(0, sgreen)
z.insert(0, sblue)
x.icursor(xicursor)
y.icursor(yicursor)
z.icursor(zicursor)
def hexp_var(self):
return self.__hexp
def save_options(self, optiondb):
optiondb['HEXTYPE'] = self.__hexp.get()
optiondb['UPWHILETYPE'] = self.__uwtyping.get()
| apache-2.0 |
mrGeen/eden | modules/tests/hrm/add_staff_to_office.py | 1 | 2564 | """ Sahana Eden Module Automated Tests - HRM006 Add Staff To Office
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
from tests import *
#import unittest, re, time
class AddStaffToOffice(SeleniumUnitTest):
def test_hrm006_add_staff_to_office(self):
"""
@case: HRM006
@description: Add a premade made staff to an Office
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
browser = self.browser
config = self.config
self.login(account="admin", nexturl="org/office")
self.dt_filter("AP Zone")
self.dt_action()
url = browser.current_url
url_parts = url.split("/")
try:
org_id = int(url_parts[-2])
except:
org_id = int(url_parts[-1])
browser.get("%s/org/office/%s/human_resource" % (config.url, org_id))
self.browser.find_element_by_id("show-add-btn").click()
self.browser.find_element_by_id("select_from_registry").click()
self.create("hrm_human_resource",
[
( "person_id",
"Liliana Otilia",
"autocomplete")
]
)
| mit |
ahnqirage/spark | examples/src/main/python/ml/decision_tree_classification_example.py | 123 | 3003 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
| apache-2.0 |
notmyname/swift | test/unit/common/test_header_key_dict.py | 7 | 4780 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.header_key_dict import HeaderKeyDict
class TestHeaderKeyDict(unittest.TestCase):
def test_case_insensitive(self):
headers = HeaderKeyDict()
headers['Content-Length'] = 0
headers['CONTENT-LENGTH'] = 10
headers['content-length'] = 20
self.assertEqual(headers['Content-Length'], '20')
self.assertEqual(headers['content-length'], '20')
self.assertEqual(headers['CONTENT-LENGTH'], '20')
def test_setdefault(self):
headers = HeaderKeyDict()
# it gets set
headers.setdefault('x-rubber-ducky', 'the one')
self.assertEqual(headers['X-Rubber-Ducky'], 'the one')
# it has the right return value
ret = headers.setdefault('x-boat', 'dinghy')
self.assertEqual(ret, 'dinghy')
ret = headers.setdefault('x-boat', 'yacht')
self.assertEqual(ret, 'dinghy')
# shouldn't crash
headers.setdefault('x-sir-not-appearing-in-this-request', None)
def test_del_contains(self):
headers = HeaderKeyDict()
headers['Content-Length'] = 0
self.assertIn('Content-Length', headers)
del headers['Content-Length']
self.assertNotIn('Content-Length', headers)
def test_update(self):
headers = HeaderKeyDict()
headers.update({'Content-Length': '0'})
headers.update([('Content-Type', 'text/plain')])
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(headers['Content-Type'], 'text/plain')
def test_set_none(self):
headers = HeaderKeyDict()
headers['test'] = None
self.assertNotIn('test', headers)
headers['test'] = 'something'
self.assertEqual('something', headers['test']) # sanity check
headers['test'] = None
self.assertNotIn('test', headers)
def test_init_from_dict(self):
headers = HeaderKeyDict({'Content-Length': 20,
'Content-Type': 'text/plain'})
self.assertEqual('20', headers['Content-Length'])
self.assertEqual('text/plain', headers['Content-Type'])
headers = HeaderKeyDict(headers)
self.assertEqual('20', headers['Content-Length'])
self.assertEqual('text/plain', headers['Content-Type'])
def test_set(self):
# mappings = ((<tuple of input vals>, <expected output val>), ...)
mappings = (((1.618, '1.618', b'1.618', u'1.618'), '1.618'),
((20, '20', b'20', u'20'), '20'),
((True, 'True', b'True', u'True'), 'True'),
((False, 'False', b'False', u'False'), 'False'))
for vals, expected in mappings:
for val in vals:
headers = HeaderKeyDict(test=val)
actual = headers['test']
self.assertEqual(expected, actual,
'Expected %s but got %s for val %s' %
(expected, actual, val))
self.assertIsInstance(
actual, str,
'Expected type str but got %s for val %s of type %s' %
(type(actual), val, type(val)))
def test_get(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
self.assertEqual(headers.get('CONTENT-LENGTH'), '20')
self.assertIsNone(headers.get('something-else'))
self.assertEqual(headers.get('something-else', True), True)
def test_keys(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
headers['cOnTent-tYpe'] = 'text/plain'
headers['SomeThing-eLse'] = 'somevalue'
self.assertEqual(
set(headers.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
def test_pop(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
headers['cOntent-tYpe'] = 'text/plain'
self.assertEqual(headers.pop('content-Length'), '20')
self.assertEqual(headers.pop('Content-type'), 'text/plain')
self.assertEqual(headers.pop('Something-Else', 'somevalue'),
'somevalue')
| apache-2.0 |
Achuth17/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
tanglu-org/merge-o-matic | manual-status.py | 1 | 15491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# manual-status.py - output status of manual merges
#
# Copyright © 2008 Canonical Ltd.
# Author: Scott James Remnant <scott@ubuntu.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, with_statement
import os
import bz2
import re
import time
from rfc822 import parseaddr
from momlib import *
# Order of priorities
PRIORITY = [ "unknown", "required", "important", "standard", "optional",
"extra" ]
COLOURS = [ "#ff8080", "#ffb580", "#ffea80", "#dfff80", "#abff80", "#80ff8b" ]
# Sections
SECTIONS = [ "new", "updated" ]
def options(parser):
parser.add_option("-D", "--source-distro", type="string", metavar="DISTRO",
default=SRC_DISTRO,
help="Source distribution")
parser.add_option("-S", "--source-suite", type="string", metavar="SUITE",
default=SRC_DIST,
help="Source suite (aka distrorelease)")
parser.add_option("-d", "--dest-distro", type="string", metavar="DISTRO",
default=OUR_DISTRO,
help="Destination distribution")
parser.add_option("-s", "--dest-suite", type="string", metavar="SUITE",
default=OUR_DIST,
help="Destination suite (aka distrorelease)")
parser.add_option("-c", "--component", type="string", metavar="COMPONENT",
action="append",
help="Process only these destination components")
def main(options, args):
src_distro = options.source_distro
src_dist = options.source_suite
our_distro = options.dest_distro
our_dist = options.dest_suite
# For each package in the destination distribution, find out whether
# there's an open merge, and if so add an entry to the table for it.
for our_component in DISTROS[our_distro]["components"]:
if options.component is not None \
and our_component not in options.component:
continue
merges = []
for our_source in get_sources(our_distro, our_dist, our_component):
try:
package = our_source["Package"]
our_version = Version(our_source["Version"])
our_pool_source = get_pool_source(our_distro, package,
our_version)
logging.debug("%s: %s is %s", package, our_distro, our_version)
except IndexError:
continue
try:
(src_source, src_version, src_pool_source) \
= get_same_source(src_distro, src_dist, package)
logging.debug("%s: %s is %s", package, src_distro, src_version)
except IndexError:
continue
try:
base = get_base(our_pool_source)
base_source = get_nearest_source(package, base)
base_version = Version(base_source["Version"])
logging.debug("%s: base is %s (%s wanted)",
package, base_version, base)
continue
except IndexError:
pass
try:
priority_idx = PRIORITY.index(our_source["Priority"])
except KeyError:
priority_idx = 0
filename = changes_file(our_distro, our_source)
if os.path.isfile(filename):
changes = open(filename)
elif os.path.isfile(filename + ".bz2"):
changes = bz2.BZ2File(filename + ".bz2")
else:
changes = None
if changes is not None:
info = ControlFile(fileobj=changes,
multi_para=False, signed=False).para
user = info["Changed-By"]
uploaded = info["Distribution"] == OUR_DIST
else:
user = None
uploaded = False
uploader = get_uploader(our_distro, our_source)
if uploaded:
section = "updated"
else:
section = "new"
merges.append((section, priority_idx, package, user, uploader,
our_source, our_version, src_version))
write_status_page(our_component, merges, our_distro, src_distro)
write_status_json(our_component, merges, our_distro, src_distro)
status_file = "%s/merges/tomerge-%s-manual" % (ROOT, our_component)
remove_old_comments(status_file, merges)
write_status_file(status_file, merges)
def write_status_page(component, merges, left_distro, right_distro):
"""Write out the manual merge status page."""
merges.sort()
status_file = "%s/merges/%s-manual.html" % (ROOT, component)
with open(status_file + ".new", "w") as status:
print("<html>", file=status)
print(file=status)
print("<head>", file=status)
print("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">", file=status)
print("<title>Tanglu Merge-o-Matic: %s manual</title>" % component,
file=status)
print("<style>", file=status)
print("img#tanglu {", file=status)
print(" border: 0;", file=status)
print("}", file=status)
print("h1 {", file=status)
print(" padding-top: 0.5em;", file=status)
print(" font-family: sans-serif;", file=status)
print(" font-size: 2.0em;", file=status)
print(" font-weight: bold;", file=status)
print("}", file=status)
print("h2 {", file=status)
print(" padding-top: 0.5em;", file=status)
print(" font-family: sans-serif;", file=status)
print(" font-size: 1.5em;", file=status)
print(" font-weight: bold;", file=status)
print("}", file=status)
print("p, td {", file=status)
print(" font-family: sans-serif;", file=status)
print(" margin-bottom: 0;", file=status)
print("}", file=status)
print("li {", file=status)
print(" font-family: sans-serif;", file=status)
print(" margin-bottom: 1em;", file=status)
print("}", file=status)
print("tr.first td {", file=status)
print(" border-top: 2px solid white;", file=status)
print("}", file=status)
print("</style>", file=status)
print("<% from momlib import * %>", file=status)
print("</head>", file=status)
print("<body>", file=status)
print("<img src=\".img/tanglulogo-100.png\" id=\"tanglu\">",
file=status)
print("<h1>Tanglu Merge-o-Matic: %s manual</h1>" % component,
file=status)
for section in SECTIONS:
section_merges = [ m for m in merges if m[0] == section ]
print("<p><a href=\"#%s\">%s %s merges</a></p>" %
(section, len(section_merges), section), file=status)
print("<% comment = get_comments() %>", file=status)
for section in SECTIONS:
section_merges = [ m for m in merges if m[0] == section ]
print("<h2 id=\"%s\">%s Merges</h2>" % (section, section.title()),
file=status)
do_table(status, section_merges, left_distro, right_distro, component)
print("<p><small>Generated at %s.</small></p>" %
time.strftime('%Y-%m-%d %H:%M:%S %Z'), file=status)
print("</body>", file=status)
print("</html>", file=status)
os.rename(status_file + ".new", status_file)
def get_uploader(distro, source):
"""Obtain the uploader from the dsc file signature."""
for md5sum, size, name in files(source):
if name.endswith(".dsc"):
dsc_file = name
break
else:
return None
filename = "%s/pool/%s/%s/%s/%s" \
% (ROOT, distro, pathhash(source["Package"]), source["Package"],
dsc_file)
(a, b, c) = os.popen3("gpg --verify %s" % filename)
stdout = c.readlines()
try:
return stdout[1].split("Good signature from")[1].strip().strip("\"")
except IndexError:
return None
def do_table(status, merges, left_distro, right_distro, component):
"""Output a table."""
print("<table cellspacing=0>", file=status)
print("<tr bgcolor=#d0d0d0>", file=status)
print("<td rowspan=2><b>Package</b></td>", file=status)
print("<td colspan=2><b>Last Uploader</b></td>", file=status)
print("<td rowspan=2><b>Comment</b></td>", file=status)
print("<td rowspan=2><b>Bug</b></td>", file=status)
print("</tr>", file=status)
print("<tr bgcolor=#d0d0d0>", file=status)
print("<td><b>%s Version</b></td>" % left_distro.title(), file=status)
print("<td><b>%s Version</b></td>" % right_distro.title(), file=status)
print("</tr>", file=status)
for uploaded, priority, package, user, uploader, source, \
left_version, right_version in merges:
if user is not None:
who = user
who = who.replace("&", "&")
who = who.replace("<", "<")
who = who.replace(">", ">")
if uploader is not None:
(usr_name, usr_mail) = parseaddr(user)
(upl_name, upl_mail) = parseaddr(uploader)
if len(usr_name) and usr_name != upl_name:
u_who = uploader
u_who = u_who.replace("&", "&")
u_who = u_who.replace("<", "<")
u_who = u_who.replace(">", ">")
who = "%s<br><small><em>Uploader:</em> %s</small>" \
% (who, u_who)
else:
who = " "
print("<tr bgcolor=%s class=first>" % COLOURS[priority], file=status)
print("<td><tt><a href=\"https://patches.tanglu.org/" \
"%s/%s/%s_%s.patch\">%s</a></tt>" \
% (pathhash(package), package, package, left_version, package),
file=status)
print(" <sup><a href=\"https://launchpad.net/ubuntu/" \
"+source/%s\">LP</a></sup>" % package, file=status)
print(" <sup><a href=\"http://packages.qa.debian.org/" \
"%s\">PTS</a></sup></td>" % package, file=status)
print("<td colspan=2>%s</td>" % who, file=status)
print("<td rowspan=2><form method=\"get\" action=\"addcomment.py\"><br />",
file=status)
print("<input type=\"hidden\" name=\"component\" value=\"%s-manual\" />" % component,
file=status)
print("<input type=\"hidden\" name=\"package\" value=\"%s\" />" % package,
file=status)
print("<%%\n\
the_comment = \"\"\n\
if \"%s\" in comment:\n\
the_comment = comment[\"%s\"]\n\
req.write(\"<input type=\\\"text\\\" style=\\\"border-style: none; background-color: %s\\\" name=\\\"comment\\\" value=\\\"%%s\\\" title=\\\"%%s\\\" />\" %% (the_comment, the_comment))\n\
%%>" % (package, package, COLOURS[priority]), file=status)
print("</form></td>", file=status)
print("<td rowspan=2>", file=status)
print("<%%\n\
if \"%s\" in comment:\n\
req.write(\"%%s\" %% gen_buglink_from_comment(comment[\"%s\"]))\n\
else:\n\
req.write(\" \")\n\
\n\
%%>" % (package, package), file=status)
print("</td>", file=status)
print("</tr>", file=status)
print("<tr bgcolor=%s>" % COLOURS[priority], file=status)
print("<td><small>%s</small></td>" % source["Binary"], file=status)
print("<td>%s</td>" % left_version, file=status)
print("<td>%s</td>" % right_version, file=status)
print("</tr>", file=status)
print("</table>", file=status)
def write_status_json(component, merges, left_distro, right_distro):
"""Write out the merge status JSON dump."""
status_file = "%s/merges/%s-manual.json" % (ROOT, component)
with open(status_file + ".new", "w") as status:
# No json module available on merges.tanglu.org right now, but it's
# not that hard to do it ourselves.
print('[', file=status)
cur_merge = 0
for uploaded, priority, package, user, uploader, source, \
left_version, right_version in merges:
print(' {', end=' ', file=status)
# source_package, short_description, and link are for
# Harvest (http://daniel.holba.ch/blog/?p=838).
print('"source_package": "%s",' % package, end=' ', file=status)
print('"short_description": "merge %s",' % right_version,
end=' ', file=status)
print('"link": "https://merges.tanglu.org/%s/%s/",' %
(pathhash(package), package), end=' ', file=status)
print('"uploaded": "%s",' % uploaded, end=' ', file=status)
print('"priority": "%s",' % priority, end=' ', file=status)
if user is not None:
who = user
who = who.replace('\\', '\\\\')
who = who.replace('"', '\\"')
print('"user": "%s",' % who, end=' ', file=status)
if uploader is not None:
(usr_name, usr_mail) = parseaddr(user)
(upl_name, upl_mail) = parseaddr(uploader)
if len(usr_name) and usr_name != upl_name:
u_who = uploader
u_who = u_who.replace('\\', '\\\\')
u_who = u_who.replace('"', '\\"')
print('"uploader": "%s",' % u_who, end=' ', file=status)
binaries = re.split(', *', source["Binary"].replace('\n', ''))
print('"binaries": [ %s ],' %
', '.join(['"%s"' % b for b in binaries]),
end=' ', file=status)
print('"left_version": "%s",' % left_version, end=' ', file=status)
print('"right_version": "%s"' % right_version,
end=' ', file=status)
cur_merge += 1
if cur_merge < len(merges):
print('},', file=status)
else:
print('}', file=status)
print(']', file=status)
os.rename(status_file + ".new", status_file)
def write_status_file(status_file, merges):
"""Write out the merge status file."""
with open(status_file + ".new", "w") as status:
for uploaded, priority, package, user, uploader, source, \
left_version, right_version in merges:
print("%s %s %s %s, %s, %s, %s"
% (package, priority,
left_version, right_version, user, uploader, uploaded),
file=status)
os.rename(status_file + ".new", status_file)
if __name__ == "__main__":
run(main, options, usage="%prog",
description="output status of manual merges")
| gpl-3.0 |
ddy88958620/lib | Python/scrapy/petsafe/drsfostersmithcom.py | 2 | 3594 | from __future__ import with_statement
import re
from csv import DictReader
from petsafeconfig import CSV_FILENAME
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class DrsfostersmithComSpider(BaseSpider):
name = 'drsfostersmith.com'
allowed_domains = ['drsfostersmith.com']
start_urls = ()
site_name_csv = 'drsfostersmith.com'
def start_requests(self):
products = []
with open(CSV_FILENAME, 'rb') as csv_file:
csv_reader = DictReader(csv_file)
for row in csv_reader:
if row['Retailer'] == self.site_name_csv and row['Link'] != '':
products.append((row['SKU'].strip(), row['Link'].strip(), row['Notes'].strip(), row['Name of Product'].strip().decode('utf-8')))
for sku, url, notes, name in products:
yield Request(url, self.parse, meta={'sku': sku, 'notes': notes, 'name': name}, dont_filter=True)
def parse(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
sku = response.meta['sku']
name = response.meta['name'].encode('ascii', 'ignore')
sec_number = response.meta['notes']
prod_name = hxs.select("//h1[contains(@class, 'categoryname')]/text()").extract()
if not prod_name:
logging.error('ERROR!! NO NAME!! %s "%s"' % (sku, url))
return
prod_name = prod_name[0].strip()
options = hxs.select("//tr[td/input[@type='radio']]")
found_products = []
for option in options:
text = option.select("td[2]/div[1]/span/text()").extract()
if not text:
logging.error("OPTIONS TEXT NOT FOUND! '%s'" % url)
continue
text = "".join(text)
m = re.search("([^,]*),([^,]*)", text)
if not m:
logging.error("CAN'T PARSE OPTIONS TEXT! '%s', '%s'" % (text, url))
continue
add_name = m.group(1).strip()
add_number = m.group(2).strip()
price = option.select('.//span[@class="productSave"]/text()').extract()
if not price:
price = option.select("td[2]/div[2]/span/text()").extract()
if not price:
price = option.select("td[2]/div[1]/span[2]/text()").extract()
if not price:
logging.error('ERROR!! NO PRICE!! %s "%s" "%s"' % (sku, prod_name, url))
return
price = price[0].strip()
found_products.append(("%s %s" % (prod_name.encode('ascii', 'ignore'), add_name), add_number, price))
if add_number == sec_number:
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
with open("/home/juraseg/src/drsfostersmith_products.txt", 'a+') as handle:
handle.write("======================================\n")
handle.write("Product not found\n")
handle.write("SKU: %s, Name: %s\n" % (sku, name))
for prod in found_products:
handle.write("Found: %s, %s, %s\n" % prod)
handle.write("======================================\n\n") | apache-2.0 |
goanpeca/qtpy | qtpy/QtWebEngineWidgets.py | 10 | 1534 | # -*- coding: utf-8 -*-
#
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtWebEngineWidgets classes and functions.
"""
from . import PYQT5,PYSIDE2, PYQT4, PYSIDE, PythonQtError
# To test if we are using WebEngine or WebKit
WEBENGINE = True
if PYQT5:
try:
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
except ImportError:
from PyQt5.QtWebKitWidgets import QWebPage as QWebEnginePage
from PyQt5.QtWebKitWidgets import QWebView as QWebEngineView
from PyQt5.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
elif PYSIDE2:
from PySide2.QtWebEngineWidgets import QWebEnginePage
from PySide2.QtWebEngineWidgets import QWebEngineView
from PySide2.QtWebEngineWidgets import QWebEngineSettings
elif PYQT4:
from PyQt4.QtWebKit import QWebPage as QWebEnginePage
from PyQt4.QtWebKit import QWebView as QWebEngineView
from PyQt4.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
elif PYSIDE:
from PySide.QtWebKit import QWebPage as QWebEnginePage
from PySide.QtWebKit import QWebView as QWebEngineView
from PySide.QtWebKit import QWebSettings as QWebEngineSettings
WEBENGINE = False
else:
raise PythonQtError('No Qt bindings could be found')
| mit |
joanneko/goodbooks | goodbooks/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py | 1002 | 25650 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registery operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| bsd-3-clause |
chris1610/pbpython | code/md_to_email/email_gen.py | 1 | 2813 | """ Generate responsive HTML emails from Markdown files used in a pelican blog.
Refer to https://pbpython.com/ for the details.
"""
from markdown2 import Markdown
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from premailer import transform
from argparse import ArgumentParser
from bs4 import BeautifulSoup
def parse_args():
"""Parse the command line input
Returns:
args -- ArgumentParser object
"""
parser = ArgumentParser(
description='Generate HTML email from markdown file')
parser.add_argument('doc', action='store', help='Markdown input document')
parser.add_argument('-t',
help='email HTML template',
default='template.html')
parser.add_argument(
'-o', help='output filename. Default is inputfile_email.html')
args = parser.parse_args()
return args
def create_HTML(config):
"""Read in the source markdown file and convert it to a standalone
HTML file suitable for emailing
Arguments:
config -- ArgumentParser object that contains the input file
"""
# Define all the file locations
in_doc = Path(config.doc)
if config.o:
out_file = Path(config.o)
else:
out_file = Path.cwd() / f'{in_doc.stem}_email.html'
template_file = config.t
# Read in the entire file as a list
# This can be problematic if the file is really large
with open(in_doc) as f:
all_content = f.readlines()
# Get the title line and clean it up
title_line = all_content[0]
title = f'My Newsletter - {title_line[7:].strip()}'
# Parse out the body from the meta data content at the top of the file
body_content = all_content[6:]
# Create a markdown object and convert the list of file lines to HTML
markdowner = Markdown()
markdown_content = markdowner.convert(''.join(body_content))
# Set up jinja templates
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template(template_file)
# Define the template variables and render
template_vars = {'email_content': markdown_content, 'title': title}
raw_html = template.render(template_vars)
# Generate the final output string
# Inline all the CSS using premailer.transform
# Use BeautifulSoup to make the formatting nicer
soup = BeautifulSoup(transform(raw_html),
'html.parser').prettify(formatter="html")
# The unsubscribe tag gets mangled. Clean it up.
final_HTML = str(soup).replace('%7B%7BUnsubscribeURL%7D%7D',
'{{UnsubscribeURL}}')
out_file.write_text(final_HTML)
if __name__ == '__main__':
conf = parse_args()
print('Creating output HTML')
create_HTML(conf)
print('Completed')
| bsd-3-clause |
Lujeni/ansible | test/units/module_utils/basic/test_heuristic_log_sanitize.py | 22 | 3726 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from units.compat import unittest
from ansible.module_utils.basic import heuristic_log_sanitize
class TestHeuristicLogSanitize(unittest.TestCase):
def setUp(self):
self.URL_SECRET = 'http://username:pas:word@foo.com/data'
self.SSH_SECRET = 'username:pas:word@foo.com/data'
self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {
'host%s' % i: {
'pstack': {
'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']
},
}
}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def test_did_not_hide_too_much(self):
self.assertEqual(heuristic_log_sanitize(self.clean_data), self.clean_data)
def test_hides_url_secrets(self):
url_output = heuristic_log_sanitize(self.url_data)
# Basic functionality: Successfully hid the password
self.assertNotIn('pas:word', url_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(self.url_data))
def test_hides_ssh_secrets(self):
ssh_output = heuristic_log_sanitize(self.ssh_data)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', ssh_output)
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
self.assertIn(":********@foo.com/data'", ssh_output)
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
self.assertNotIn('secret', output)
| gpl-3.0 |
jsma/django-cms | menus/templatetags/menu_tags.py | 8 | 15025 | # -*- coding: utf-8 -*-
from classytags.arguments import IntegerArgument, Argument, StringArgument
from classytags.core import Options
from classytags.helpers import InclusionTag
from cms.utils.i18n import force_language, get_language_objects
from django import template
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import get_language, ugettext
from menus.menu_pool import menu_pool
from menus.utils import DefaultLanguageChanger
register = template.Library()
class NOT_PROVIDED:
pass
def cut_after(node, levels, removed):
"""
given a tree of nodes cuts after N levels
"""
if levels == 0:
removed.extend(node.children)
node.children = []
else:
removed_local = []
for child in node.children:
if child.visible:
cut_after(child, levels - 1, removed)
else:
removed_local.append(child)
for removed_child in removed_local:
node.children.remove(removed_child)
removed.extend(removed_local)
def remove(node, removed):
removed.append(node)
if node.parent:
if node in node.parent.children:
node.parent.children.remove(node)
def cut_levels(nodes, from_level, to_level, extra_inactive, extra_active):
"""
cutting nodes away from menus
"""
final = []
removed = []
selected = None
for node in nodes:
if not hasattr(node, 'level'):
# remove and ignore nodes that don't have level information
remove(node, removed)
continue
if node.level == from_level:
# turn nodes that are on from_level into root nodes
final.append(node)
node.parent = None
if not node.ancestor and not node.selected and not node.descendant:
# cut inactive nodes to extra_inactive, but not of descendants of
# the selected node
cut_after(node, extra_inactive, removed)
if node.level > to_level and node.parent:
# remove nodes that are too deep, but not nodes that are on
# from_level (local root nodes)
remove(node, removed)
if node.selected:
selected = node
if not node.visible:
remove(node, removed)
if selected:
cut_after(selected, extra_active, removed)
if removed:
for node in removed:
if node in final:
final.remove(node)
return final
def flatten(nodes):
flat = []
for node in nodes:
flat.append(node)
flat.extend(flatten(node.children))
return flat
class ShowMenu(InclusionTag):
"""
render a nested list of all children of the pages
- from_level: starting level
- to_level: max level
- extra_inactive: how many levels should be rendered of the not active tree?
- extra_active: how deep should the children of the active node be rendered?
- namespace: the namespace of the menu. if empty will use all namespaces
- root_id: the id of the root node
- template: template used to render the menu
"""
name = 'show_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
StringArgument('template', default='menu/menu.html', required=False),
StringArgument('namespace', default=None, required=False),
StringArgument('root_id', default=None, required=False),
Argument('next_page', default=None, required=False),
)
def get_context(self, context, from_level, to_level, extra_inactive,
extra_active, template, namespace, root_id, next_page):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
if next_page:
children = next_page.children
else:
# new menu... get all the data so we can save a lot of queries
menu_renderer = context.get('cms_menu_renderer')
if not menu_renderer:
menu_renderer = menu_pool.get_renderer(request)
nodes = menu_renderer.get_nodes(namespace, root_id)
if root_id: # find the root id and cut the nodes
id_nodes = menu_pool.get_nodes_by_attribute(nodes, "reverse_id", root_id)
if id_nodes:
node = id_nodes[0]
nodes = node.children
for remove_parent in nodes:
remove_parent.parent = None
from_level += node.level + 1
to_level += node.level + 1
nodes = flatten(nodes)
else:
nodes = []
children = cut_levels(nodes, from_level, to_level, extra_inactive, extra_active)
children = menu_renderer.apply_modifiers(children, namespace, root_id, post_cut=True)
try:
context['children'] = children
context['template'] = template
context['from_level'] = from_level
context['to_level'] = to_level
context['extra_inactive'] = extra_inactive
context['extra_active'] = extra_active
context['namespace'] = namespace
except:
context = {"template": template}
return context
register.tag(ShowMenu)
class ShowMenuBelowId(ShowMenu):
name = 'show_menu_below_id'
options = Options(
Argument('root_id', default=None, required=False),
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
Argument('template', default='menu/menu.html', required=False),
Argument('namespace', default=None, required=False),
Argument('next_page', default=None, required=False),
)
register.tag(ShowMenuBelowId)
class ShowSubMenu(InclusionTag):
"""
show the sub menu of the current nav-node.
- levels: how many levels deep
- root_level: the level to start the menu at
- nephews: the level of descendants of siblings (nephews) to show
- template: template used to render the navigation
"""
name = 'show_sub_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('levels', default=100, required=False),
Argument('root_level', default=None, required=False),
IntegerArgument('nephews', default=100, required=False),
Argument('template', default='menu/sub_menu.html', required=False),
)
def get_context(self, context, levels, root_level, nephews, template):
# Django 1.4 doesn't accept 'None' as a tag value and resolve to ''
# So we need to force it to None again
if not root_level and root_level != 0:
root_level = None
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
menu_renderer = context.get('cms_menu_renderer')
if not menu_renderer:
menu_renderer = menu_pool.get_renderer(request)
nodes = menu_renderer.get_nodes()
children = []
# adjust root_level so we cut before the specified level, not after
include_root = False
if root_level is not None and root_level > 0:
root_level -= 1
elif root_level is not None and root_level == 0:
include_root = True
for node in nodes:
if root_level is None:
if node.selected:
# if no root_level specified, set it to the selected nodes level
root_level = node.level
# is this the ancestor of current selected node at the root level?
is_root_ancestor = (node.ancestor and node.level == root_level)
# is a node selected on the root_level specified
root_selected = (node.selected and node.level == root_level)
if is_root_ancestor or root_selected:
cut_after(node, levels, [])
children = node.children
for child in children:
if child.sibling:
cut_after(child, nephews, [])
# if root_level was 0 we need to give the menu the entire tree
# not just the children
if include_root:
children = menu_renderer.apply_modifiers([node], post_cut=True)
else:
children = menu_renderer.apply_modifiers(children, post_cut=True)
context['children'] = children
context['template'] = template
context['from_level'] = 0
context['to_level'] = 0
context['extra_inactive'] = 0
context['extra_active'] = 0
return context
register.tag(ShowSubMenu)
class ShowBreadcrumb(InclusionTag):
"""
Shows the breadcrumb from the node that has the same url as the current request
- start level: after which level should the breadcrumb start? 0=home
- template: template used to render the breadcrumb
"""
name = 'show_breadcrumb'
template = 'menu/dummy.html'
options = Options(
Argument('start_level', default=0, required=False),
Argument('template', default='menu/breadcrumb.html', required=False),
Argument('only_visible', default=True, required=False),
)
def get_context(self, context, start_level, template, only_visible):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if not (isinstance(start_level, int) or start_level.isdigit()):
only_visible = template
template = start_level
start_level = 0
try:
only_visible = bool(int(only_visible))
except:
only_visible = bool(only_visible)
ancestors = []
menu_renderer = context.get('cms_menu_renderer')
if not menu_renderer:
menu_renderer = menu_pool.get_renderer(request)
nodes = menu_renderer.get_nodes(breadcrumb=True)
# Find home
home = None
root_url = unquote(reverse("pages-root"))
home = next((node for node in nodes if node.get_absolute_url() == root_url), None)
# Find selected
selected = None
selected = next((node for node in nodes if node.selected), None)
if selected and selected != home:
node = selected
while node:
if node.visible or not only_visible:
ancestors.append(node)
node = node.parent
if not ancestors or (ancestors and ancestors[-1] != home) and home:
ancestors.append(home)
ancestors.reverse()
if len(ancestors) >= start_level:
ancestors = ancestors[start_level:]
else:
ancestors = []
context['ancestors'] = ancestors
context['template'] = template
return context
register.tag(ShowBreadcrumb)
def _raw_language_marker(language, lang_code):
return language
def _native_language_marker(language, lang_code):
with force_language(lang_code):
return force_text(ugettext(language))
def _current_language_marker(language, lang_code):
return force_text(ugettext(language))
def _short_language_marker(language, lang_code):
return lang_code
MARKERS = {
'raw': _raw_language_marker,
'native': _native_language_marker,
'current': _current_language_marker,
'short': _short_language_marker,
}
class LanguageChooser(InclusionTag):
"""
Displays a language chooser
- template: template used to render the language chooser
"""
name = 'language_chooser'
template = 'menu/dummy.html'
options = Options(
Argument('template', default=NOT_PROVIDED, required=False),
Argument('i18n_mode', default='raw', required=False),
)
def get_context(self, context, template, i18n_mode):
if template in MARKERS:
_tmp = template
if i18n_mode not in MARKERS:
template = i18n_mode
else:
template = NOT_PROVIDED
i18n_mode = _tmp
if template is NOT_PROVIDED:
template = "menu/language_chooser.html"
if not i18n_mode in MARKERS:
i18n_mode = 'raw'
if 'request' not in context:
# If there's an exception (500), default context_processors may not be called.
return {'template': 'cms/content.html'}
marker = MARKERS[i18n_mode]
current_lang = get_language()
site = Site.objects.get_current()
languages = []
for lang in get_language_objects(site.pk):
if lang.get('public', True):
languages.append((lang['code'], marker(lang['name'], lang['code'])))
context['languages'] = languages
context['current_language'] = current_lang
context['template'] = template
return context
register.tag(LanguageChooser)
class PageLanguageUrl(InclusionTag):
"""
Displays the url of the current page in the defined language.
You can set a language_changer function with the set_language_changer function in the utils.py if there is no page.
This is needed if you have slugs in more than one language.
"""
name = 'page_language_url'
template = 'cms/content.html'
options = Options(
Argument('lang'),
)
def get_context(self, context, lang):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if hasattr(request, "_language_changer"):
try:
url = request._language_changer(lang)
except NoReverseMatch:
url = DefaultLanguageChanger(request)(lang)
else:
# use the default language changer
url = DefaultLanguageChanger(request)(lang)
return {'content': url}
register.tag(PageLanguageUrl)
| bsd-3-clause |
rec/echomesh | code/python/external/platform/darwin/numpy/core/tests/test_defchararray.py | 10 | 25660 | from numpy.testing import *
from numpy.core import *
import numpy as np
import sys
from numpy.core.multiarray import _vec_string
from numpy.compat import asbytes, asbytes_nested
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
class TestBasic(TestCase):
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
assert_array_equal(B, asbytes_nested([['abc', '2'],
['long', '0123456789']]))
def test_from_object_array_unicode(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
self.assertRaises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
['long', '0123456789']])
def test_from_string_array(self):
A = np.array(asbytes_nested([['abc', 'foo'],
['long ', '0123456789']]))
assert_equal(A.dtype.type, np.string_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B[0,0] = 'changed'
assert_(B[0,0] != A[0,0])
C = np.char.asarray(A)
assert_array_equal(C, A)
assert_equal(C.dtype, A.dtype)
C[0,0] = 'changed again'
assert_(C[0,0] != B[0,0])
assert_(C[0,0] == A[0,0])
def test_from_unicode_array(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B = np.char.array(A, **kw_unicode_true)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
def fail():
B = np.char.array(A, **kw_unicode_false)
self.assertRaises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
B = np.char.array([u'\u03a3'])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
A = np.char.array(asbytes('abc'))
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
A = np.char.array(u'\u03a3')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
class TestVecString(TestCase):
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
self.assertRaises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
self.assertRaises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
self.assertRaises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
self.assertRaises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
self.assertRaises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.integer, 'strip')
self.assertRaises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
self.assertRaises(ValueError, fail)
class TestWhitespace(TestCase):
def setUp(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
def test1(self):
assert_(all(self.A == self.B))
assert_(all(self.A >= self.B))
assert_(all(self.A <= self.B))
assert_(all(negative(self.A > self.B)))
assert_(all(negative(self.A < self.B)))
assert_(all(negative(self.A != self.B)))
class TestChar(TestCase):
def setUp(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tostring(), asbytes('AB'))
class TestComparisons(TestCase):
def setUp(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
['051', 'tuv']]).view(np.chararray)
def test_not_equal(self):
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
def test_equal(self):
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
def test_greater_equal(self):
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
def test_less_equal(self):
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
def test_greater(self):
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
def test_less(self):
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
def setUp(self):
TestComparisons.setUp(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
def setUp(self):
TestComparisons.setUp(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
class TestInformation(TestCase):
def setUp(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
def test_count(self):
assert_(issubclass(self.A.count('').dtype.type, np.integer))
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
# Python doesn't seem to like counting NULL characters
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
def test_endswith(self):
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.endswith('3', 'fdjk')
self.assertRaises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
def test_index(self):
def fail():
self.A.index('a')
self.assertRaises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
def test_isalnum(self):
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
def test_isalpha(self):
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
def test_isdigit(self):
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
def test_islower(self):
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
def test_isspace(self):
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
def test_istitle(self):
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
def test_isupper(self):
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
def test_rfind(self):
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
def test_rindex(self):
def fail():
self.A.rindex('a')
self.assertRaises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
def test_startswith(self):
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.startswith('3', 'fdjk')
self.assertRaises(TypeError, fail)
class TestMethods(TestCase):
def setUp(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_capitalize(self):
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
assert_array_equal(self.A.capitalize(), asbytes_nested([
[' abc ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]))
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
assert_array_equal(self.B.capitalize(), [
[u' \u03c3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']])
def test_center(self):
assert_(issubclass(self.A.center(10).dtype.type, np.string_))
widths = np.array([[10, 20]])
C = self.A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.center(20, asbytes('#'))
assert_(np.all(C.startswith(asbytes('#'))))
assert_(np.all(C.endswith(asbytes('#'))))
C = np.char.center(asbytes('FOO'), [[10, 20], [15, 8]])
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, asbytes_nested([
[' FOO ', ' FOO '],
[' FOO ', ' FOO ']]))
def test_decode(self):
if sys.version_info[0] >= 3:
A = np.char.array([asbytes('\\u03a3')])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
else:
A = np.char.array(['736563726574206d657373616765'])
assert_(A.decode('hex_codec')[0] == 'secret message')
def test_encode(self):
B = self.B.encode('unicode_escape')
assert_(B[0][0] == asbytes(r' \u03a3 '))
def test_expandtabs(self):
T = self.A.expandtabs()
assert_(T[2][0] == asbytes('123 345'))
def test_join(self):
if sys.version_info[0] >= 3:
# NOTE: list(b'123') == [49, 50, 51]
# so that b','.join(b'123') results to an error on Py3
A0 = self.A.decode('ascii')
else:
A0 = self.A
A = np.char.join([',', '#'], A0)
if sys.version_info[0] >= 3:
assert_(issubclass(A.dtype.type, np.unicode_))
else:
assert_(issubclass(A.dtype.type, np.string_))
assert_array_equal(np.char.join([',', '#'], A0),
[
[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
def test_ljust(self):
assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
widths = np.array([[10, 20]])
C = self.A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.ljust(20, asbytes('#'))
assert_array_equal(C.startswith(asbytes('#')), [
[False, True], [False, False], [False, False]])
assert_(np.all(C.endswith(asbytes('#'))))
C = np.char.ljust(asbytes('FOO'), [[10, 20], [15, 8]])
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, asbytes_nested([
['FOO ', 'FOO '],
['FOO ', 'FOO ']]))
def test_lower(self):
assert_(issubclass(self.A.lower().dtype.type, np.string_))
assert_array_equal(self.A.lower(), asbytes_nested([
[' abc ', ''],
['12345', 'mixedcase'],
['123 \t 345 \0 ', 'upper']]))
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), [
[u' \u03c3 ', u''],
[u'12345', u'mixedcase'],
[u'123 \t 345 \0 ', u'upper']])
def test_lstrip(self):
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
assert_array_equal(self.A.lstrip(), asbytes_nested([
['abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]))
assert_array_equal(self.A.lstrip(asbytes_nested(['1', 'M'])),
asbytes_nested([
[' abc', ''],
['2345', 'ixedCase'],
['23 \t 345 \x00', 'UPPER']]))
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), [
[u'\u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']])
def test_partition(self):
if sys.version_info >= (2, 5):
P = self.A.partition(asbytes_nested(['3', 'M']))
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, asbytes_nested([
[(' abc ', '', ''), ('', '', '')],
[('12', '3', '45'), ('', 'M', 'ixedCase')],
[('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]]))
def test_replace(self):
R = self.A.replace(asbytes_nested(['3', 'a']),
asbytes_nested(['##########', '@']))
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, asbytes_nested([
[' abc ', ''],
['12##########45', 'MixedC@se'],
['12########## \t ##########45 \x00', 'UPPER']]))
if sys.version_info[0] < 3:
# NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
R = self.A.replace(asbytes('a'), u'\u03a3')
assert_(issubclass(R.dtype.type, np.unicode_))
assert_array_equal(R, [
[u' \u03a3bc ', ''],
['12345', u'MixedC\u03a3se'],
['123 \t 345 \x00', 'UPPER']])
def test_rjust(self):
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
widths = np.array([[10, 20]])
C = self.A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.rjust(20, asbytes('#'))
assert_(np.all(C.startswith(asbytes('#'))))
assert_array_equal(C.endswith(asbytes('#')),
[[False, True], [False, False], [False, False]])
C = np.char.rjust(asbytes('FOO'), [[10, 20], [15, 8]])
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, asbytes_nested([
[' FOO', ' FOO'],
[' FOO', ' FOO']]))
def test_rpartition(self):
if sys.version_info >= (2, 5):
P = self.A.rpartition(asbytes_nested(['3', 'M']))
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, asbytes_nested([
[('', '', ' abc '), ('', '', '')],
[('12', '3', '45'), ('', 'M', 'ixedCase')],
[('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]]))
def test_rsplit(self):
A = self.A.rsplit(asbytes('3'))
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), asbytes_nested([
[[' abc '], ['']],
[['12', '45'], ['MixedCase']],
[['12', ' \t ', '45 \x00 '], ['UPPER']]]))
def test_rstrip(self):
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
assert_array_equal(self.A.rstrip(), asbytes_nested([
[' abc', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]))
assert_array_equal(self.A.rstrip(asbytes_nested(['5', 'ER'])),
asbytes_nested([
[' abc ', ''],
['1234', 'MixedCase'],
['123 \t 345 \x00', 'UPP']]))
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), [
[u' \u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']])
def test_strip(self):
assert_(issubclass(self.A.strip().dtype.type, np.string_))
assert_array_equal(self.A.strip(), asbytes_nested([
['abc', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]))
assert_array_equal(self.A.strip(asbytes_nested(['15', 'EReM'])),
asbytes_nested([
[' abc ', ''],
['234', 'ixedCas'],
['23 \t 345 \x00', 'UPP']]))
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), [
[u'\u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']])
def test_split(self):
A = self.A.split(asbytes('3'))
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), asbytes_nested([
[[' abc '], ['']],
[['12', '45'], ['MixedCase']],
[['12', ' \t ', '45 \x00 '], ['UPPER']]]))
def test_splitlines(self):
A = np.char.array(['abc\nfds\nwer']).splitlines()
assert_(issubclass(A.dtype.type, np.object_))
assert_(A.shape == (1,))
assert_(len(A[0]) == 3)
def test_swapcase(self):
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
assert_array_equal(self.A.swapcase(), asbytes_nested([
[' ABC ', ''],
['12345', 'mIXEDcASE'],
['123 \t 345 \0 ', 'upper']]))
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), [
[u' \u03c3 ', u''],
[u'12345', u'mIXEDcASE'],
[u'123 \t 345 \0 ', u'upper']])
def test_title(self):
assert_(issubclass(self.A.title().dtype.type, np.string_))
assert_array_equal(self.A.title(), asbytes_nested([
[' Abc ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]))
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), [
[u' \u03a3 ', u''],
[u'12345', u'Mixedcase'],
[u'123 \t 345 \0 ', u'Upper']])
def test_upper(self):
assert_(issubclass(self.A.upper().dtype.type, np.string_))
assert_array_equal(self.A.upper(), asbytes_nested([
[' ABC ', ''],
['12345', 'MIXEDCASE'],
['123 \t 345 \0 ', 'UPPER']]))
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), [
[u' \u03a3 ', u''],
[u'12345', u'MIXEDCASE'],
[u'123 \t 345 \0 ', u'UPPER']])
def test_isnumeric(self):
def fail():
self.A.isnumeric()
self.assertRaises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
def test_isdecimal(self):
def fail():
self.A.isdecimal()
self.assertRaises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
class TestOperations(TestCase):
def setUp(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
['051', 'tuv']]).view(np.chararray)
def test_add(self):
AB = np.array([['abcefg', '123456'],
['789051', 'xyztuv']]).view(np.chararray)
assert_array_equal(AB, (self.A + self.B))
assert_(len((self.A + self.B)[0][0]) == 6)
def test_radd(self):
QA = np.array([['qabc', 'q123'],
['q789', 'qxyz']]).view(np.chararray)
assert_array_equal(QA, ('q' + self.A))
def test_mul(self):
A = self.A
for r in (2,3,5,7,197):
Ar = np.array([[A[0,0]*r, A[0,1]*r],
[A[1,0]*r, A[1,1]*r]]).view(np.chararray)
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
try:
A * ob
except ValueError:
pass
else:
self.fail("chararray can only be multiplied by integers")
def test_rmul(self):
A = self.A
for r in (2,3,5,7,197):
Ar = np.array([[A[0,0]*r, A[0,1]*r],
[A[1,0]*r, A[1,1]*r]]).view(np.chararray)
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
try:
ob * A
except ValueError:
pass
else:
self.fail("chararray can only be multiplied by integers")
def test_mod(self):
"""Ticket #856"""
F = np.array([['%d', '%f'],['%s','%r']]).view(np.chararray)
C = np.array([[3,7],[19,1]])
FC = np.array([['3', '7.000000'],
['19', '1']]).view(np.chararray)
assert_array_equal(FC, F % C)
A = np.array([['%.3f','%d'],['%s','%r']]).view(np.chararray)
A1 = np.array([['1.000','1'],['1','1']]).view(np.chararray)
assert_array_equal(A1, (A % 1))
A2 = np.array([['1.000','2'],['3','4']]).view(np.chararray)
assert_array_equal(A2, (A % [[1,2],[3,4]]))
def test_rmod(self):
assert_(("%s" % self.A) == str(self.A))
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
try:
ob % self.A
except TypeError:
pass
else:
self.fail("chararray __rmod__ should fail with " \
"non-string objects")
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
if __name__ == "__main__":
run_module_suite()
| mit |
caronc/nzbget-subliminal | Subliminal/guessit/transfo/expected_title.py | 6 | 2517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.containers import PropertiesContainer
from guessit.matcher import GuessFinder
from guessit.plugins.transformers import Transformer
import re
class ExpectedTitle(Transformer):
def __init__(self):
Transformer.__init__(self, 225)
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title',
help='Expected title (can be used multiple times)')
def should_process(self, mtree, options=None):
return options and options.get('expected_title')
def expected_titles(self, string, node=None, options=None):
container = PropertiesContainer(enhance=True, canonical_from_pattern=False)
for expected_title in options.get('expected_title'):
if expected_title.startswith('re:'):
expected_title = expected_title[3:]
expected_title = expected_title.replace(' ', '-')
container.register_property('title', expected_title, enhance=True)
else:
expected_title = re.escape(expected_title)
container.register_property('title', expected_title, enhance=False)
found = container.find_properties(string, node, options)
return container.as_guess(found, string)
def supported_properties(self):
return ['title']
def process(self, mtree, options=None):
GuessFinder(self.expected_titles, None, self.log, options).process_nodes(mtree.unidentified_leaves())
| gpl-3.0 |
natanovia/zulip | zerver/lib/socket.py | 120 | 12506 | from __future__ import absolute_import
from django.conf import settings
from django.utils.importlib import import_module
from django.utils import timezone
from django.contrib.sessions.models import Session as djSession
import sockjs.tornado
import tornado.ioloop
import ujson
import logging
import time
from zerver.models import UserProfile, get_user_profile_by_id, get_client
from zerver.lib.queue import queue_json_publish
from zerver.lib.actions import check_send_message, extract_recipients
from zerver.decorator import JsonableError
from zerver.lib.utils import statsd
from zerver.lib.event_queue import get_client_descriptor
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.session_user import get_session_user
logger = logging.getLogger('zulip.socket')
djsession_engine = import_module(settings.SESSION_ENGINE)
def get_user_profile(session_id):
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone.now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return UserProfile.objects.get(pk=get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict()
def get_connection(id):
return connections.get(id)
def register_connection(id, conn):
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn):
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id):
return 'socket_req_status:%s' % (req_id,)
class SocketAuthError(Exception):
def __init__(self, msg):
self.msg = msg
class CloseErrorInfo(object):
def __init__(self, status_code, err_msg):
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
def on_open(self, info):
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout():
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.add_timeout(time.time() + 10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg):
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error', 'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise SocketAuthError('Unknown or missing session')
self.session.user_profile = user_profile
if msg['request']['csrf_token'] != self.csrf_token:
raise SocketAuthError('CSRF token does not match that in cookie')
if not 'queue_id' in msg['request']:
raise SocketAuthError("Missing 'queue_id' argument")
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise SocketAuthError('Bad event queue id: %s' % (queue_id,))
if user_profile.id != client.user_profile_id:
raise SocketAuthError("You are not the owner of the queue with id '%s'" % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {}
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry))
if len(status) == 0:
status['status'] = 'not_received'
if 'response' in status:
status['response'] = ujson.loads(status['response'])
results[str(inquiry)] = status
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg):
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg)
if self.did_close:
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except SocketAuthError as e:
response = {'result': 'error', 'msg': e.msg}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue="tornado_return",
log_data=log_data,
request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
fake_message_sender)
def on_close(self):
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def fake_message_sender(event):
log_data = dict()
record_request_start_data(log_data)
req = event['request']
try:
sender = get_user_profile_by_id(event['server_meta']['user_id'])
client = get_client(req['client'])
msg_id = check_send_message(sender, client, req['type'],
extract_recipients(req['to']),
req['subject'], req['content'],
local_id=req.get('local_id', None),
sender_queue_id=req.get('queue_id', None))
resp = {"result": "success", "msg": "", "id": msg_id}
except JsonableError as e:
resp = {"result": "error", "msg": str(e)}
server_meta = event['server_meta']
server_meta.update({'worker_log_data': log_data,
'time_request_finished': time.time()})
result = {'response': resp, 'req_id': event['req_id'],
'server_meta': server_meta}
respond_send_message(result)
def respond_send_message(data):
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the zulip.com cookie, which we use as part of our
# authentication scheme.
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': 'https://%s/static/third/sockjs/sockjs-0.3.4.js' % (settings.EXTERNAL_HOST,),
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router():
return sockjs_router
| apache-2.0 |
palkeo/nebulosa | django_extensions/management/commands/create_command.py | 8 | 3331 | import os
import sys
from django.core.management.base import AppCommand
from django_extensions.management.utils import _make_writeable
from optparse import make_option
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--name', '-n', action='store', dest='command_name', default='sample',
help='The name to use for the management command'),
make_option('--base', '-b', action='store', dest='base_command', default='Base',
help='The base class used for implementation of this command. Should be one of Base, App, Label, or NoArgs'),
)
help = ("Creates a Django management command directory structure for the given app name"
" in the app's directory.")
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
def handle_app(self, app, **options):
app_dir = os.path.dirname(app.__file__)
copy_template('command_template', app_dir, options.get('command_name'), '%sCommand' % options.get('base_command'))
def copy_template(template_name, copy_to, command_name, base_command):
"""copies the specified template directory to the copy_to location"""
import django_extensions
import shutil
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
handle_method = "handle(self, *args, **options)"
if base_command == 'AppCommand':
handle_method = "handle_app(self, app, **options)"
elif base_command == 'LabelCommand':
handle_method = "handle_label(self, label, **options)"
elif base_command == 'NoArgsCommand':
handle_method = "handle_noargs(self, **options)"
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('sample', command_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ command_name }}', command_name).replace('{{ base_command }}', base_command).replace('{{ handle_method }}', handle_method))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| unlicense |
peterdemartini/KrakenMaster | kraken/grade/views.py | 1 | 1318 | # -*- coding: utf-8 -*-
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required
from kraken.grade.models import Grade
import json
from kraken.helpers.Skynet import Skynet
blueprint = Blueprint("grade", __name__, url_prefix='/grades',
static_folder="../static")
@blueprint.route("/recent/")
@login_required
def recent():
grades = Grade.get_recent(100, "created_at DESC");
return render_template("grades/recent.html", grades = grades)
@blueprint.route("/create/", methods=['POST'])
@login_required
def create():
newGrade = Grade.create(start=int(request.form['start']),
end=int(request.form['end']),
snoozes=int(request.form['snooze_count']))
return render_template("grades/result.html", grade=newGrade)
@blueprint.route("/api/create", methods=['GET'])
def api_create():
print("Request :: %s" % request.args )
if request.args and 'start' in request.args and 'end' in request.args and 'snooze_count' in request.args:
newGrade = Grade.create(start=int(request.args['start']),
end=int(request.args['end']),
snoozes=int(request.args['snooze_count']))
else:
return json.dumps({'success' : False, 'error' : 'Invalid Request'})
return json.dumps({'success' : True})
| bsd-3-clause |
nchammas/click | examples/repo/repo.py | 44 | 4802 | import os
import sys
import posixpath
import click
class Repo(object):
def __init__(self, home):
self.home = home
self.config = {}
self.verbose = False
def set_config(self, key, value):
self.config[key] = value
if self.verbose:
click.echo(' config[%s] = %s' % (key, value), file=sys.stderr)
def __repr__(self):
return '<Repo %r>' % self.home
pass_repo = click.make_pass_decorator(Repo)
@click.group()
@click.option('--repo-home', envvar='REPO_HOME', default='.repo',
metavar='PATH', help='Changes the repository folder location.')
@click.option('--config', nargs=2, multiple=True,
metavar='KEY VALUE', help='Overrides a config key/value pair.')
@click.option('--verbose', '-v', is_flag=True,
help='Enables verbose mode.')
@click.version_option('1.0')
@click.pass_context
def cli(ctx, repo_home, config, verbose):
"""Repo is a command line tool that showcases how to build complex
command line interfaces with Click.
This tool is supposed to look like a distributed version control
system to show how something like this can be structured.
"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_repo decorator.
ctx.obj = Repo(os.path.abspath(repo_home))
ctx.obj.verbose = verbose
for key, value in config:
ctx.obj.set_config(key, value)
@cli.command()
@click.argument('src')
@click.argument('dest', required=False)
@click.option('--shallow/--deep', default=False,
help='Makes a checkout shallow or deep. Deep by default.')
@click.option('--rev', '-r', default='HEAD',
help='Clone a specific revision instead of HEAD.')
@pass_repo
def clone(repo, src, dest, shallow, rev):
"""Clones a repository.
This will clone the repository at SRC into the folder DEST. If DEST
is not provided this will automatically use the last path component
of SRC and create that folder.
"""
if dest is None:
dest = posixpath.split(src)[-1] or '.'
click.echo('Cloning repo %s to %s' % (src, os.path.abspath(dest)))
repo.home = dest
if shallow:
click.echo('Making shallow checkout')
click.echo('Checking out revision %s' % rev)
@cli.command()
@click.confirmation_option()
@pass_repo
def delete(repo):
"""Deletes a repository.
This will throw away the current repository.
"""
click.echo('Destroying repo %s' % repo.home)
click.echo('Deleted!')
@cli.command()
@click.option('--username', prompt=True,
help='The developer\'s shown username.')
@click.option('--email', prompt='E-Mail',
help='The developer\'s email address')
@click.password_option(help='The login password.')
@pass_repo
def setuser(repo, username, email, password):
"""Sets the user credentials.
This will override the current user config.
"""
repo.set_config('username', username)
repo.set_config('email', email)
repo.set_config('password', '*' * len(password))
click.echo('Changed credentials.')
@cli.command()
@click.option('--message', '-m', multiple=True,
help='The commit message. If provided multiple times each '
'argument gets converted into a new line.')
@click.argument('files', nargs=-1, type=click.Path())
@pass_repo
def commit(repo, files, message):
"""Commits outstanding changes.
Commit changes to the given files into the repository. You will need to
"repo push" to push up your changes to other repositories.
If a list of files is omitted, all changes reported by "repo status"
will be committed.
"""
if not message:
marker = '# Files to be committed:'
hint = ['', '', marker, '#']
for file in files:
hint.append('# U %s' % file)
message = click.edit('\n'.join(hint))
if message is None:
click.echo('Aborted!')
return
msg = message.split(marker)[0].rstrip()
if not msg:
click.echo('Aborted! Empty commit message')
return
else:
msg = '\n'.join(message)
click.echo('Files to be committed: %s' % (files,))
click.echo('Commit message:\n' + msg)
@cli.command(short_help='Copies files.')
@click.option('--force', is_flag=True,
help='forcibly copy over an existing managed file')
@click.argument('src', nargs=-1, type=click.Path())
@click.argument('dst', type=click.Path())
@pass_repo
def copy(repo, src, dst, force):
"""Copies one or multiple files to a new location. This copies all
files from SRC to DST.
"""
for fn in src:
click.echo('Copy from %s -> %s' % (fn, dst))
| bsd-3-clause |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/ndlstm/python/lstm2d_test.py | 14 | 2985 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for 2D LSTMs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
lstm2d = tf.contrib.ndlstm.lstm2d
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class Lstm2DTest(test_util.TensorFlowTestCase):
def testImagesToSequenceDims(self):
with self.test_session():
inputs = tf.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.images_to_sequence(inputs)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (11, 14, 5))
def testSequenceToImagesDims(self):
with self.test_session():
inputs = tf.constant(_rand(11, 14, 5))
outputs = lstm2d.sequence_to_images(inputs, 2)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 7, 11, 5))
def testImagesAndSequenceDims(self):
with self.test_session():
size = (2, 7, 11, 5)
inputs = tf.constant(_rand(*size))
sequence = lstm2d.images_to_sequence(inputs)
outputs = lstm2d.sequence_to_images(sequence, size[0])
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), size)
def testSeparableLstmDims(self):
with self.test_session():
inputs = tf.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.separable_lstm(inputs, 8)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 7, 11, 8))
def testReduceToSequenceDims(self):
with self.test_session():
inputs = tf.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.reduce_to_sequence(inputs, 8)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 11, 8))
def testReduceToFinalDims(self):
with self.test_session():
inputs = tf.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.reduce_to_final(inputs, 8, 12)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 8))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
citrix-openstack/build-python-troveclient | troveclient/limits.py | 2 | 1461 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import base
import exceptions
class Limit(base.Resource):
def __repr__(self):
return "<Limit: %s>" % self.verb
class Limits(base.ManagerWithFind):
"""
Manages :class `Limit` resources
"""
resource_class = Limit
def __repr__(self):
return "<Limit Manager at %s>" % id(self)
def _list(self, url, response_key):
resp, body = self.api.client.get(url)
if resp is None or resp.status != 200:
raise exceptions.from_response(resp, body)
if not body:
raise Exception("Call to " + url + " did not return a body.")
return [self.resource_class(self, res) for res in body[response_key]]
def list(self):
"""
Retrieve the limits
"""
return self._list("/limits", "limits")
| apache-2.0 |
koyuawsmbrtn/eclock | windows/Python27/Lib/site-packages/Cython/Compiler/UtilNodes.py | 97 | 11384 | #
# Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes
# so it is convenient to have them in a seperate module.
#
import Nodes
import ExprNodes
from Nodes import Node
from ExprNodes import AtomicExprNode
from PyrexTypes import c_ptr_type
class TempHandle(object):
# THIS IS DEPRECATED, USE LetRefNode instead
temp = None
needs_xdecref = False
def __init__(self, type, needs_cleanup=None):
self.type = type
if needs_cleanup is None:
self.needs_cleanup = type.is_pyobject
else:
self.needs_cleanup = needs_cleanup
def ref(self, pos):
return TempRefNode(pos, handle=self, type=self.type)
def cleanup_ref(self, pos):
return CleanupTempRefNode(pos, handle=self, type=self.type)
class TempRefNode(AtomicExprNode):
# THIS IS DEPRECATED, USE LetRefNode instead
# handle TempHandle
def analyse_types(self, env):
assert self.type == self.handle.type
return self
def analyse_target_types(self, env):
assert self.type == self.handle.type
return self
def analyse_target_declaration(self, env):
pass
def calculate_result_code(self):
result = self.handle.temp
if result is None: result = "<error>" # might be called and overwritten
return result
def generate_result_code(self, code):
pass
def generate_assignment_code(self, rhs, code):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
# TODO: analyse control flow to see if this is necessary
code.put_xdecref(self.result(), self.ctype())
code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
class CleanupTempRefNode(TempRefNode):
# THIS IS DEPRECATED, USE LetRefNode instead
# handle TempHandle
def generate_assignment_code(self, rhs, code):
pass
def generate_execution_code(self, code):
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.type)
self.handle.needs_cleanup = False
class TempsBlockNode(Node):
# THIS IS DEPRECATED, USE LetNode instead
"""
Creates a block which allocates temporary variables.
This is used by transforms to output constructs that need
to make use of a temporary variable. Simply pass the types
of the needed temporaries to the constructor.
The variables can be referred to using a TempRefNode
(which can be constructed by calling get_ref_node).
"""
# temps [TempHandle]
# body StatNode
child_attrs = ["body"]
def generate_execution_code(self, code):
for handle in self.temps:
handle.temp = code.funcstate.allocate_temp(
handle.type, manage_ref=handle.needs_cleanup)
self.body.generate_execution_code(code)
for handle in self.temps:
if handle.needs_cleanup:
if handle.needs_xdecref:
code.put_xdecref_clear(handle.temp, handle.type)
else:
code.put_decref_clear(handle.temp, handle.type)
code.funcstate.release_temp(handle.temp)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
class ResultRefNode(AtomicExprNode):
# A reference to the result of an expression. The result_code
# must be set externally (usually a temp name).
subexprs = []
lhs_of_first_assignment = False
def __init__(self, expression=None, pos=None, type=None, may_hold_none=True, is_temp=False):
self.expression = expression
self.pos = None
self.may_hold_none = may_hold_none
if expression is not None:
self.pos = expression.pos
if hasattr(expression, "type"):
self.type = expression.type
if pos is not None:
self.pos = pos
if type is not None:
self.type = type
if is_temp:
self.is_temp = True
assert self.pos is not None
def clone_node(self):
# nothing to do here
return self
def type_dependencies(self, env):
if self.expression:
return self.expression.type_dependencies(env)
else:
return ()
def analyse_types(self, env):
if self.expression is not None:
self.type = self.expression.type
return self
def infer_type(self, env):
if self.type is not None:
return self.type
if self.expression is not None:
if self.expression.type is not None:
return self.expression.type
return self.expression.infer_type(env)
assert False, "cannot infer type of ResultRefNode"
def may_be_none(self):
if not self.type.is_pyobject:
return False
return self.may_hold_none
def _DISABLED_may_be_none(self):
# not sure if this is safe - the expression may not be the
# only value that gets assigned
if self.expression is not None:
return self.expression.may_be_none()
if self.type is not None:
return self.type.is_pyobject
return True # play safe
def is_simple(self):
return True
def result(self):
try:
return self.result_code
except AttributeError:
if self.expression is not None:
self.result_code = self.expression.result()
return self.result_code
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def generate_assignment_code(self, rhs, code):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
if not self.lhs_of_first_assignment:
code.put_decref(self.result(), self.ctype())
code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def allocate_temps(self, env):
pass
def release_temp(self, env):
pass
def free_temps(self, code):
pass
class LetNodeMixin:
def set_temp_expr(self, lazy_temp):
self.lazy_temp = lazy_temp
self.temp_expression = lazy_temp.expression
def setup_temp_expr(self, code):
self.temp_expression.generate_evaluation_code(code)
self.temp_type = self.temp_expression.type
if self.temp_type.is_array:
self.temp_type = c_ptr_type(self.temp_type.base_type)
self._result_in_temp = self.temp_expression.result_in_temp()
if self._result_in_temp:
self.temp = self.temp_expression.result()
else:
self.temp_expression.make_owned_reference(code)
self.temp = code.funcstate.allocate_temp(
self.temp_type, manage_ref=True)
code.putln("%s = %s;" % (self.temp, self.temp_expression.result()))
self.temp_expression.generate_disposal_code(code)
self.temp_expression.free_temps(code)
self.lazy_temp.result_code = self.temp
def teardown_temp_expr(self, code):
if self._result_in_temp:
self.temp_expression.generate_disposal_code(code)
self.temp_expression.free_temps(code)
else:
if self.temp_type.is_pyobject:
code.put_decref_clear(self.temp, self.temp_type)
code.funcstate.release_temp(self.temp)
class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
# A wrapper around a subexpression that moves an expression into a
# temp variable and provides it to the subexpression.
subexprs = ['temp_expression', 'subexpression']
def __init__(self, lazy_temp, subexpression):
self.set_temp_expr(lazy_temp)
self.pos = subexpression.pos
self.subexpression = subexpression
# if called after type analysis, we already know the type here
self.type = self.subexpression.type
def infer_type(self, env):
return self.subexpression.infer_type(env)
def result(self):
return self.subexpression.result()
def analyse_types(self, env):
self.temp_expression = self.temp_expression.analyse_types(env)
self.subexpression = self.subexpression.analyse_types(env)
self.type = self.subexpression.type
return self
def free_subexpr_temps(self, code):
self.subexpression.free_temps(code)
def generate_subexpr_disposal_code(self, code):
self.subexpression.generate_disposal_code(code)
def generate_evaluation_code(self, code):
self.setup_temp_expr(code)
self.subexpression.generate_evaluation_code(code)
self.teardown_temp_expr(code)
LetRefNode = ResultRefNode
class LetNode(Nodes.StatNode, LetNodeMixin):
# Implements a local temporary variable scope. Imagine this
# syntax being present:
# let temp = VALUE:
# BLOCK (can modify temp)
# if temp is an object, decref
#
# Usually used after analysis phase, but forwards analysis methods
# to its children
child_attrs = ['temp_expression', 'body']
def __init__(self, lazy_temp, body):
self.set_temp_expr(lazy_temp)
self.pos = body.pos
self.body = body
def analyse_declarations(self, env):
self.temp_expression.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.temp_expression = self.temp_expression.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
self.setup_temp_expr(code)
self.body.generate_execution_code(code)
self.teardown_temp_expr(code)
def generate_function_definitions(self, env, code):
self.temp_expression.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
class TempResultFromStatNode(ExprNodes.ExprNode):
# An ExprNode wrapper around a StatNode that executes the StatNode
# body. Requires a ResultRefNode that it sets up to refer to its
# own temp result. The StatNode must assign a value to the result
# node, which then becomes the result of this node.
subexprs = []
child_attrs = ['body']
def __init__(self, result_ref, body):
self.result_ref = result_ref
self.pos = body.pos
self.body = body
self.type = result_ref.type
self.is_temp = 1
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_types(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_result_code(self, code):
self.result_ref.result_code = self.result()
self.body.generate_execution_code(code)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.