repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
dwaynebailey/poedit | deps/boost/libs/python/test/pickle2.py | 45 | 1514 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
r'''>>> import pickle2_ext
>>> import pickle
>>> pickle2_ext.world.__module__
'pickle2_ext'
>>> pickle2_ext.world.__safe_for_unpickling__
1
>>> pickle2_ext.world.__name__
'world'
>>> pickle2_ext.world('Hello').__reduce__()
(<class 'pickle2_ext.world'>, ('Hello',), (0,))
>>> for number in (24, 42):
... wd = pickle2_ext.world('California')
... wd.set_secret_number(number)
... pstr = pickle.dumps(wd)
... wl = pickle.loads(pstr)
... print wd.greet(), wd.get_secret_number()
... print wl.greet(), wl.get_secret_number()
Hello from California! 24
Hello from California! 24
Hello from California! 42
Hello from California! 0
# Now show that the __dict__ is not taken care of.
>>> wd = pickle2_ext.world('California')
>>> wd.x = 1
>>> wd.__dict__
{'x': 1}
>>> try: pstr = pickle.dumps(wd)
... except RuntimeError, err: print err
...
Incomplete pickle support (__getstate_manages_dict__ not set)
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| mit |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/win32/twisted/test/test_unix.py | 3 | 8599 | # -*- test-case-name: twisted.test.test_unix -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import stat, os, sys
from twisted.internet import interfaces, reactor, protocol, error, address, defer, utils
from twisted.python import components, lockfile, failure
from twisted.protocols import loopback
from twisted.trial import unittest, assertions
from twisted.trial.util import spinWhile, spinUntil, wait
class MyProtocol(protocol.Protocol):
made = closed = failed = 0
data = ""
def connectionMade(self):
self.made = 1
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
self.closed = 1
class TestClientFactory(protocol.ClientFactory):
protocol = None
def __init__(self, testcase, name):
self.testcase = testcase
self.name = name
def buildProtocol(self, addr):
self.testcase.assertEquals(address.UNIXAddress(self.name), addr)
self.protocol = MyProtocol()
return self.protocol
class Factory(protocol.Factory):
protocol = stopped = None
def __init__(self, testcase, name):
self.testcase = testcase
self.name = name
def stopFactory(self):
self.stopped = True
def buildProtocol(self, addr):
self.testcase.assertEquals(None, addr)
self.protocol = p = MyProtocol()
return p
class FailedConnectionClientFactory(protocol.ClientFactory):
def __init__(self, onFail):
self.onFail = onFail
def clientConnectionFailed(self, connector, reason):
self.onFail.errback(reason)
class PortCleanerUpper(unittest.TestCase):
callToLoseCnx = 'loseConnection'
def setUp(self):
self.ports = []
def tearDown(self):
self.cleanPorts(*self.ports)
def _addPorts(self, *ports):
for p in ports:
self.ports.append(p)
def cleanPorts(self, *ports):
for p in ports:
if not hasattr(p, 'disconnected'):
raise RuntimeError, ("You handed something to cleanPorts that"
" doesn't have a disconnected attribute, dummy!")
if not p.disconnected:
d = getattr(p, self.callToLoseCnx)()
if isinstance(d, defer.Deferred):
wait(d)
else:
try:
spinUntil(lambda :p.disconnected)
except:
failure.Failure().printTraceback()
class UnixSocketTestCase(PortCleanerUpper):
"""Test unix sockets."""
def testDumber(self):
filename = self.mktemp()
f = Factory(self, filename)
l = reactor.listenUNIX(filename, f)
tcf = TestClientFactory(self, filename)
c = reactor.connectUNIX(filename, tcf)
spinUntil(lambda :getattr(f.protocol, 'made', None) and
getattr(tcf.protocol, 'made', None))
self._addPorts(l, c.transport, tcf.protocol.transport, f.protocol.transport)
def testMode(self):
filename = self.mktemp()
f = Factory(self, filename)
l = reactor.listenUNIX(filename, f, mode = 0600)
self.assertEquals(stat.S_IMODE(os.stat(filename)[0]), 0600)
tcf = TestClientFactory(self, filename)
c = reactor.connectUNIX(filename, tcf)
self._addPorts(l, c.transport)
def testPIDFile(self):
filename = self.mktemp()
f = Factory(self, filename)
l = reactor.listenUNIX(filename, f, mode = 0600, wantPID=1)
self.failUnless(lockfile.isLocked(filename + ".lock"))
tcf = TestClientFactory(self, filename)
c = reactor.connectUNIX(filename, tcf, checkPID=1)
spinUntil(lambda :getattr(f.protocol, 'made', None) and
getattr(tcf.protocol, 'made', None))
self._addPorts(l, c.transport, tcf.protocol.transport, f.protocol.transport)
self.cleanPorts(*self.ports)
self.failIf(lockfile.isLocked(filename + ".lock"))
def testSocketLocking(self):
filename = self.mktemp()
f = Factory(self, filename)
l = reactor.listenUNIX(filename, f, wantPID=True)
self.assertRaises(
error.CannotListenError,
reactor.listenUNIX, filename, f, wantPID=True)
def stoppedListening(ign):
l = reactor.listenUNIX(filename, f, wantPID=True)
return l.stopListening()
return l.stopListening().addCallback(stoppedListening)
def _uncleanSocketTest(self, callback):
self.filename = self.mktemp()
source = ("from twisted.internet import protocol, reactor\n"
"reactor.listenUNIX(%r, protocol.ServerFactory(), wantPID=True)\n") % (self.filename,)
env = {'PYTHONPATH': os.pathsep.join(sys.path)}
d = utils.getProcessOutput(sys.executable, ("-u", "-c", source), env=env)
d.addCallback(callback)
return d
def testUncleanServerSocketLocking(self):
def ranStupidChild(ign):
# If this next call succeeds, our lock handling is correct.
p = reactor.listenUNIX(self.filename, Factory(self, self.filename), wantPID=True)
return p.stopListening()
return self._uncleanSocketTest(ranStupidChild)
def testUncleanSocketLockingFromThePerspectiveOfAClientConnectingToTheDeadServerSocket(self):
def ranStupidChild(ign):
d = defer.Deferred()
f = FailedConnectionClientFactory(d)
c = reactor.connectUNIX(self.filename, f, checkPID=True)
return assertions.assertFailure(d, error.BadFileError)
return self._uncleanSocketTest(ranStupidChild)
def testRepr(self):
filename = self.mktemp()
f = Factory(self, filename)
p = reactor.listenUNIX(filename, f)
self.failIf(str(p).find(filename) == -1)
def stoppedListening(ign):
self.failIf(str(p).find(filename) != -1)
return defer.maybeDeferred(p.stopListening).addCallback(stoppedListening)
class ClientProto(protocol.ConnectedDatagramProtocol):
started = stopped = False
gotback = None
def stopProtocol(self):
self.stopped = True
def startProtocol(self):
self.started = True
def datagramReceived(self, data):
self.gotback = data
class ServerProto(protocol.DatagramProtocol):
started = stopped = False
gotwhat = gotfrom = None
def stopProtocol(self):
self.stopped = True
def startProtocol(self):
self.started = True
def datagramReceived(self, data, addr):
self.gotfrom = addr
self.gotwhat = data
self.transport.write("hi back", addr)
class DatagramUnixSocketTestCase(PortCleanerUpper):
"""Test datagram UNIX sockets."""
def testExchange(self):
clientaddr = self.mktemp()
serveraddr = self.mktemp()
sp = ServerProto()
cp = ClientProto()
s = reactor.listenUNIXDatagram(serveraddr, sp)
c = reactor.connectUNIXDatagram(serveraddr, cp, bindAddress = clientaddr)
spinUntil(lambda:sp.started and cp.started)
cp.transport.write("hi")
spinUntil(lambda:sp.gotwhat == "hi" and cp.gotback == "hi back")
s.stopListening()
c.stopListening()
os.unlink(clientaddr)
os.unlink(serveraddr)
spinWhile(lambda:s.connected and c.connected)
self.failUnlessEqual("hi", sp.gotwhat)
self.failUnlessEqual(clientaddr, sp.gotfrom)
self.failUnlessEqual("hi back", cp.gotback)
def testCannotListen(self):
addr = self.mktemp()
p = ServerProto()
s = reactor.listenUNIXDatagram(addr, p)
self.failUnlessRaises(error.CannotListenError, reactor.listenUNIXDatagram, addr, p)
s.stopListening()
os.unlink(addr)
# test connecting to bound and connected (somewhere else) address
def testRepr(self):
filename = self.mktemp()
f = ServerProto()
p = reactor.listenUNIXDatagram(filename, f)
self.failIf(str(p).find(filename) == -1)
def stoppedListening(ign):
self.failIf(str(p).find(filename) != -1)
return defer.maybeDeferred(p.stopListening).addCallback(stoppedListening)
if not interfaces.IReactorUNIX(reactor, None):
UnixSocketTestCase.skip = "This reactor does not support UNIX domain sockets"
if not interfaces.IReactorUNIXDatagram(reactor, None):
DatagramUnixSocketTestCase.skip = "This reactor does not support UNIX datagram sockets"
| gpl-2.0 |
google/google-ctf | third_party/edk2/AppPkg/Applications/Python/Python-2.7.10/Lib/importlib/__init__.py | 63 | 1365 | """Backport of importlib.import_module from 3.x."""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| apache-2.0 |
Changaco/oh-mainline | vendor/packages/Django/django/utils/unittest/util.py | 751 | 2821 | """Various utility functions."""
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def safe_str(obj):
try:
return str(obj)
except Exception:
return object.__str__(obj)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual, ignore_duplicate=False):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance.
"""
missing = []
unexpected = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
if ignore_duplicate:
for lst in expected, actual:
try:
while True:
lst.remove(item)
except ValueError:
pass
if ignore_duplicate:
while actual:
item = actual.pop()
unexpected.append(item)
try:
while True:
actual.remove(item)
except ValueError:
pass
return missing, unexpected
# anything left in actual is unexpected
return missing, actual
| agpl-3.0 |
imageio/imageio | tests/test_format.py | 1 | 12094 | from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir
import os
import gc
import shutil
import numpy as np
import imageio
from imageio.core import Format, FormatManager, Request
from imageio.core import get_remote_file
test_dir = get_test_dir()
def setup_module():
imageio.formats.sort()
def teardown_module():
imageio.formats.sort()
class MyFormat(Format):
"""TEST DOCS"""
_closed = []
def _can_read(self, request):
return request.filename.lower().endswith(self.extensions + (".haha",))
def _can_write(self, request):
return request.filename.lower().endswith(self.extensions + (".haha",))
class Reader(Format.Reader):
_failmode = False
_stream_mode = False
def _open(self):
self._read_frames = 0
def _close(self):
self.format._closed.append(id(self))
def _get_length(self):
if self._stream_mode:
return np.inf
return 3
def _get_data(self, index):
if self._failmode == 2:
raise IndexError()
elif self._failmode:
return "not an array", {}
elif self._stream_mode and self._read_frames >= 5:
raise IndexError() # Mark end of stream
else:
self._read_frames += 1
return np.ones((10, 10)) * index, self._get_meta_data(index)
def _get_meta_data(self, index):
if self._failmode:
return "not a dict"
return {"index": index}
class Writer(Format.Writer):
def _open(self):
self._written_data = []
self._written_meta = []
self._meta = None
def _close(self):
self.format._closed.append(id(self))
def _append_data(self, im, meta):
self._written_data.append(im)
self._written_meta.append(meta)
def _set_meta_data(self, meta):
self._meta = meta
def test_format():
"""Test the working of the Format class"""
filename1 = get_remote_file("images/chelsea.png", test_dir)
filename2 = filename1 + ".out"
# Test basic format creation
F = Format("testname", "test description", "foo bar spam")
assert F.name == "TESTNAME"
assert F.description == "test description"
assert F.name in repr(F)
assert F.name in F.doc
assert str(F) == F.doc
assert set(F.extensions) == {".foo", ".bar", ".spam"}
# Test setting extensions
F1 = Format("test", "", "foo bar spam")
F2 = Format("test", "", "foo, bar,spam")
F3 = Format("test", "", ["foo", "bar", "spam"])
F4 = Format("test", "", ".foo .bar .spam")
for F in (F1, F2, F3, F4):
assert set(F.extensions) == {".foo", ".bar", ".spam"}
# Fail
raises(ValueError, Format, "test", "", 3) # not valid ext
raises(ValueError, Format, "test", "", "", 3) # not valid mode
raises(ValueError, Format, "test", "", "", "x") # not valid mode
# Test subclassing
F = MyFormat("test", "", modes="i")
assert "TEST DOCS" in F.doc
# Get and check reader and write classes
R = F.get_reader(Request(filename1, "ri"))
W = F.get_writer(Request(filename2, "wi"))
assert isinstance(R, MyFormat.Reader)
assert isinstance(W, MyFormat.Writer)
assert R.format is F
assert W.format is F
assert R.request.filename == filename1
assert W.request.filename == filename2
# Fail
raises(RuntimeError, F.get_reader, Request(filename1, "rI"))
raises(RuntimeError, F.get_writer, Request(filename2, "wI"))
# Use as context manager
with R:
pass
with W:
pass
# Objects are now closed, cannot be used
assert R.closed
assert W.closed
#
raises(RuntimeError, R.__enter__)
raises(RuntimeError, W.__enter__)
#
raises(RuntimeError, R.get_data, 0)
raises(RuntimeError, W.append_data, np.zeros((10, 10)))
# Test __del__
R = F.get_reader(Request(filename1, "ri"))
W = F.get_writer(Request(filename2, "wi"))
ids = id(R), id(W)
F._closed[:] = []
del R
del W
gc.collect() # Invoke __del__
assert set(ids) == set(F._closed)
def test_reader_and_writer():
# Prepare
filename1 = get_remote_file("images/chelsea.png", test_dir)
filename2 = filename1 + ".out"
F = MyFormat("test", "", modes="i")
# Test using reader
n = 3
R = F.get_reader(Request(filename1, "ri"))
assert len(R) == n
ims = [im for im in R]
assert len(ims) == n
for i in range(3):
assert ims[i][0, 0] == i
assert ims[i].meta["index"] == i
for i in range(3):
assert R.get_meta_data(i)["index"] == i
# Read next
assert R.get_data(0)[0, 0] == 0
assert R.get_next_data()[0, 0] == 1
assert R.get_next_data()[0, 0] == 2
# Fail
R._failmode = 1
raises(ValueError, R.get_data, 0)
raises(ValueError, R.get_meta_data, 0)
R._failmode = 2
with raises(IndexError):
[im for im in R]
# Test writer no format
raises(ValueError, imageio.get_writer, "foo.unknownext")
# Test streaming reader
R = F.get_reader(Request(filename1, "ri"))
R._stream_mode = True
assert R.get_length() == np.inf
ims = [im for im in R]
assert len(ims) == 5
# Test using writer
im1 = np.zeros((10, 10))
im2 = imageio.core.Image(im1, {"foo": 1})
W = F.get_writer(Request(filename2, "wi"))
W.append_data(im1)
W.append_data(im2)
W.append_data(im1, {"bar": 1})
W.append_data(im2, {"bar": 1})
# Test that no data is copies (but may be different views)
assert len(W._written_data) == 4
for im in W._written_data:
assert (im == im1).all()
im1[2, 2] == 99
for im in W._written_data:
assert (im == im1).all()
# Test meta
assert W._written_meta[0] == {}
assert W._written_meta[1] == {"foo": 1}
assert W._written_meta[2] == {"bar": 1}
assert W._written_meta[3] == {"foo": 1, "bar": 1}
#
W.set_meta_data({"spam": 1})
assert W._meta == {"spam": 1}
# Fail
raises(ValueError, W.append_data, "not an array")
raises(ValueError, W.append_data, im, "not a dict")
raises(ValueError, W.set_meta_data, "not a dict")
def test_default_can_read_and_can_write():
F = imageio.plugins.example.DummyFormat("test", "", "foo bar", "v")
# Prepare files
filename1 = os.path.join(test_dir, "test")
open(filename1 + ".foo", "wb")
open(filename1 + ".bar", "wb")
open(filename1 + ".spam", "wb")
# Test _can_read()
assert F.can_read(Request(filename1 + ".foo", "rv"))
assert F.can_read(Request(filename1 + ".bar", "r?"))
assert not F.can_read(Request(filename1 + ".spam", "r?"))
assert not F.can_read(Request(filename1 + ".foo", "ri"))
# Test _can_write()
assert F.can_write(Request(filename1 + ".foo", "wv"))
assert F.can_write(Request(filename1 + ".bar", "w?"))
assert not F.can_write(Request(filename1 + ".spam", "w?"))
assert not F.can_write(Request(filename1 + ".foo", "wi"))
def test_format_selection():
formats = imageio.formats
fname1 = get_remote_file("images/chelsea.png", test_dir)
fname2 = os.path.join(test_dir, "test.selectext1")
fname3 = os.path.join(test_dir, "test.haha")
open(fname2, "wb")
open(fname3, "wb")
# Test searchinhg for read / write format
F = formats.search_read_format(Request(fname1, "ri"))
assert F is formats["PNG"]
F = formats.search_write_format(Request(fname1, "wi"))
assert F is formats["PNG"]
# Now with custom format
format = MyFormat("test_selection", "xx", "selectext1", "i")
formats.add_format(format)
# Select this format for files it said it could handle in extensions
assert ".selectext1" in fname2
F = formats.search_read_format(Request(fname2, "ri"))
assert F is format
F = formats.search_write_format(Request(fname2, "ri"))
assert F is format
# But this custom format also can deal with .haha files
assert ".haha" in fname3
F = formats.search_read_format(Request(fname3, "ri"))
assert F is format
F = formats.search_write_format(Request(fname3, "ri"))
assert F is format
# Format manager
def test_format_manager():
"""Test working of the format manager"""
formats = imageio.formats
# Test basics of FormatManager
assert isinstance(formats, FormatManager)
assert len(formats) > 0
assert "FormatManager" in repr(formats)
# Get docs
smalldocs = str(formats)
# fulldocs = formats.create_docs_for_all_formats()
# Check each format ...
for format in formats:
# That each format is indeed a Format
assert isinstance(format, Format)
# That they are mentioned
assert format.name in smalldocs
# assert format.name in fulldocs
fname = get_remote_file("images/chelsea.png", test_dir)
fname2 = fname[:-3] + "noext"
shutil.copy(fname, fname2)
# Check getting
F1 = formats["PNG"]
F2 = formats[".png"]
F3 = formats[fname2] # will look in file itself
assert F1 is F2
assert F1 is F3
# Check getting
F1 = formats["DICOM"]
F2 = formats[".dcm"]
F3 = formats["dcm"] # If omitting dot, format is smart enough to try with
assert F1 is F2
assert F1 is F3
# Fail
raises(ValueError, formats.__getitem__, 678) # must be str
raises(IndexError, formats.__getitem__, ".nonexistentformat")
# Adding a format
myformat = Format("test", "test description", "testext1 testext2")
formats.add_format(myformat)
assert myformat in [f for f in formats]
assert formats["testext1"] is myformat
assert formats["testext2"] is myformat
# Fail
raises(ValueError, formats.add_format, 678) # must be Format
raises(ValueError, formats.add_format, myformat) # cannot add twice
# Adding a format with the same name
myformat2 = Format("test", "other description", "foo bar")
raises(ValueError, formats.add_format, myformat2) # same name
formats.add_format(myformat2, True) # overwrite
assert formats["test"] is not myformat
assert formats["test"] is myformat2
# Test show (we assume it shows correctly)
formats.show()
# # Potential
# bytes = b'x' * 300
# F = formats.search_read_format(Request(bytes, 'r?', dummy_potential=1))
# assert F is formats['DUMMY']
def test_sorting_errors():
with raises(TypeError):
imageio.formats.sort(3)
with raises(ValueError):
imageio.formats.sort("foo,bar")
with raises(ValueError):
imageio.formats.sort("foo.png")
def test_default_order():
assert imageio.formats[".tiff"].name == "TIFF"
assert imageio.formats[".png"].name == "PNG-PIL"
assert imageio.formats[".pfm"].name == "PFM-FI"
def test_preferring_fi():
# Prefer FI all the way
imageio.formats.sort("-FI")
assert imageio.formats[".tiff"].name == "TIFF-FI"
assert imageio.formats[".png"].name == "PNG-FI"
assert imageio.formats[".pfm"].name == "PFM-FI"
# This would be better
imageio.formats.sort("TIFF", "-FI")
assert imageio.formats[".tiff"].name == "TIFF"
assert imageio.formats[".png"].name == "PNG-FI"
assert imageio.formats[".pfm"].name == "PFM-FI"
def test_preferring_arbitrary():
# Normally, these exotic formats are somewhere in the back
imageio.formats.sort()
names = [f.name for f in imageio.formats]
assert "DICOM" not in names[:10]
assert "FFMPEG" not in names[:10]
assert "NPZ" not in names[:10]
# But we can move them forward
imageio.formats.sort("DICOM", "FFMPEG", "NPZ")
names = [f.name for f in imageio.formats]
assert names[0] == "DICOM"
assert names[1] == "FFMPEG"
assert names[2] == "NPZ"
# And back to normal ..
imageio.formats.sort()
names = [f.name for f in imageio.formats]
assert "DICOM" not in names[:10]
assert "FFMPEG" not in names[:10]
assert "NPZ" not in names[:10]
run_tests_if_main()
| bsd-2-clause |
jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gru/seq_with_cursor.py | 3 | 1933 | #
# Copyright 2003,2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# misc utilities
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import types
class seq_with_cursor (object):
__slots__ = [ 'items', 'index' ]
def __init__ (self, items, initial_index = None, initial_value = None):
assert len (items) > 0, "seq_with_cursor: len (items) == 0"
self.items = items
self.set_index (initial_index)
if initial_value is not None:
self.set_index_by_value(initial_value)
def set_index (self, initial_index):
if initial_index is None:
self.index = len (self.items) / 2
elif initial_index >= 0 and initial_index < len (self.items):
self.index = initial_index
else:
raise ValueError
def set_index_by_value(self, v):
"""
Set index to the smallest value such that items[index] >= v.
If there is no such item, set index to the maximum value.
"""
self.set_index(0) # side effect!
cv = self.current()
more = True
while cv < v and more:
cv, more = next(self) # side effect!
def __next__ (self):
new_index = self.index + 1
if new_index < len (self.items):
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def prev (self):
new_index = self.index - 1
if new_index >= 0:
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def current (self):
return self.items[self.index]
def get_seq (self):
return self.items[:] # copy of items
| gpl-3.0 |
azureplus/hue | desktop/core/ext-py/Paste-2.0.1/paste/url.py | 33 | 14673 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
This module implements a class for handling URLs.
"""
from six.moves.urllib.parse import quote, unquote, urlencode
import cgi
from paste import request
import six
# Imported lazily from FormEncode:
variabledecode = None
__all__ = ["URL", "Image"]
def html_quote(v):
if v is None:
return ''
return cgi.escape(str(v), 1)
def url_quote(v):
if v is None:
return ''
return quote(str(v))
def js_repr(v):
if v is None:
return 'null'
elif v is False:
return 'false'
elif v is True:
return 'true'
elif isinstance(v, list):
return '[%s]' % ', '.join(map(js_repr, v))
elif isinstance(v, dict):
return '{%s}' % ', '.join(
['%s: %s' % (js_repr(key), js_repr(value))
for key, value in v])
elif isinstance(v, str):
return repr(v)
elif isinstance(v, unicode):
# @@: how do you do Unicode literals in Javascript?
return repr(v.encode('UTF-8'))
elif isinstance(v, (float, int)):
return repr(v)
elif isinstance(v, long):
return repr(v).lstrip('L')
elif hasattr(v, '__js_repr__'):
return v.__js_repr__()
else:
raise ValueError(
"I don't know how to turn %r into a Javascript representation"
% v)
class URLResource(object):
"""
This is an abstract superclass for different kinds of URLs
"""
default_params = {}
def __init__(self, url, vars=None, attrs=None,
params=None):
self.url = url or '/'
self.vars = vars or []
self.attrs = attrs or {}
self.params = self.default_params.copy()
self.original_params = params or {}
if params:
self.params.update(params)
#@classmethod
def from_environ(cls, environ, with_query_string=True,
with_path_info=True, script_name=None,
path_info=None, querystring=None):
url = request.construct_url(
environ, with_query_string=False,
with_path_info=with_path_info, script_name=script_name,
path_info=path_info)
if with_query_string:
if querystring is None:
vars = request.parse_querystring(environ)
else:
vars = cgi.parse_qsl(
querystring,
keep_blank_values=True,
strict_parsing=False)
else:
vars = None
v = cls(url, vars=vars)
return v
from_environ = classmethod(from_environ)
def __call__(self, *args, **kw):
res = self._add_positional(args)
res = res._add_vars(kw)
return res
def __getitem__(self, item):
if '=' in item:
name, value = item.split('=', 1)
return self._add_vars({unquote(name): unquote(value)})
return self._add_positional((item,))
def attr(self, **kw):
for key in kw.keys():
if key.endswith('_'):
kw[key[:-1]] = kw[key]
del kw[key]
new_attrs = self.attrs.copy()
new_attrs.update(kw)
return self.__class__(self.url, vars=self.vars,
attrs=new_attrs,
params=self.original_params)
def param(self, **kw):
new_params = self.original_params.copy()
new_params.update(kw)
return self.__class__(self.url, vars=self.vars,
attrs=self.attrs,
params=new_params)
def coerce_vars(self, vars):
global variabledecode
need_variable_encode = False
for key, value in vars.items():
if isinstance(value, dict):
need_variable_encode = True
if key.endswith('_'):
vars[key[:-1]] = vars[key]
del vars[key]
if need_variable_encode:
if variabledecode is None:
from formencode import variabledecode
vars = variabledecode.variable_encode(vars)
return vars
def var(self, **kw):
kw = self.coerce_vars(kw)
new_vars = self.vars + list(kw.items())
return self.__class__(self.url, vars=new_vars,
attrs=self.attrs,
params=self.original_params)
def setvar(self, **kw):
"""
Like ``.var(...)``, except overwrites keys, where .var simply
extends the keys. Setting a variable to None here will
effectively delete it.
"""
kw = self.coerce_vars(kw)
new_vars = []
for name, values in self.vars:
if name in kw:
continue
new_vars.append((name, values))
new_vars.extend(kw.items())
return self.__class__(self.url, vars=new_vars,
attrs=self.attrs,
params=self.original_params)
def setvars(self, **kw):
"""
Creates a copy of this URL, but with all the variables set/reset
(like .setvar(), except clears past variables at the same time)
"""
return self.__class__(self.url, vars=kw.items(),
attrs=self.attrs,
params=self.original_params)
def addpath(self, *paths):
u = self
for path in paths:
path = str(path).lstrip('/')
new_url = u.url
if not new_url.endswith('/'):
new_url += '/'
u = u.__class__(new_url+path, vars=u.vars,
attrs=u.attrs,
params=u.original_params)
return u
if six.PY3:
__truediv__ = addpath
else:
__div__ = addpath
def become(self, OtherClass):
return OtherClass(self.url, vars=self.vars,
attrs=self.attrs,
params=self.original_params)
def href__get(self):
s = self.url
if self.vars:
s += '?'
vars = []
for name, val in self.vars:
if isinstance(val, (list, tuple)):
val = [v for v in val if v is not None]
elif val is None:
continue
vars.append((name, val))
s += urlencode(vars, True)
return s
href = property(href__get)
def __repr__(self):
base = '<%s %s' % (self.__class__.__name__,
self.href or "''")
if self.attrs:
base += ' attrs(%s)' % (
' '.join(['%s="%s"' % (html_quote(n), html_quote(v))
for n, v in self.attrs.items()]))
if self.original_params:
base += ' params(%s)' % (
', '.join(['%s=%r' % (n, v)
for n, v in self.attrs.items()]))
return base + '>'
def html__get(self):
if not self.params.get('tag'):
raise ValueError(
"You cannot get the HTML of %r until you set the "
"'tag' param'" % self)
content = self._get_content()
tag = '<%s' % self.params.get('tag')
attrs = ' '.join([
'%s="%s"' % (html_quote(n), html_quote(v))
for n, v in self._html_attrs()])
if attrs:
tag += ' ' + attrs
tag += self._html_extra()
if content is None:
return tag + ' />'
else:
return '%s>%s</%s>' % (tag, content, self.params.get('tag'))
html = property(html__get)
def _html_attrs(self):
return self.attrs.items()
def _html_extra(self):
return ''
def _get_content(self):
"""
Return the content for a tag (for self.html); return None
for an empty tag (like ``<img />``)
"""
raise NotImplementedError
def _add_vars(self, vars):
raise NotImplementedError
def _add_positional(self, args):
raise NotImplementedError
class URL(URLResource):
r"""
>>> u = URL('http://localhost')
>>> u
<URL http://localhost>
>>> u = u['view']
>>> str(u)
'http://localhost/view'
>>> u['//foo'].param(content='view').html
'<a href="http://localhost/view/foo">view</a>'
>>> u.param(confirm='Really?', content='goto').html
'<a href="http://localhost/view" onclick="return confirm(\'Really?\')">goto</a>'
>>> u(title='See "it"', content='goto').html
'<a href="http://localhost/view?title=See+%22it%22">goto</a>'
>>> u('another', var='fuggetaboutit', content='goto').html
'<a href="http://localhost/view/another?var=fuggetaboutit">goto</a>'
>>> u.attr(content='goto').html
Traceback (most recent call last):
....
ValueError: You must give a content param to <URL http://localhost/view attrs(content="goto")> generate anchor tags
>>> str(u['foo=bar%20stuff'])
'http://localhost/view?foo=bar+stuff'
"""
default_params = {'tag': 'a'}
def __str__(self):
return self.href
def _get_content(self):
if not self.params.get('content'):
raise ValueError(
"You must give a content param to %r generate anchor tags"
% self)
return self.params['content']
def _add_vars(self, vars):
url = self
for name in ('confirm', 'content'):
if name in vars:
url = url.param(**{name: vars.pop(name)})
if 'target' in vars:
url = url.attr(target=vars.pop('target'))
return url.var(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = list(self.attrs.items())
attrs.insert(0, ('href', self.href))
if self.params.get('confirm'):
attrs.append(('onclick', 'return confirm(%s)'
% js_repr(self.params['confirm'])))
return attrs
def onclick_goto__get(self):
return 'location.href=%s; return false' % js_repr(self.href)
onclick_goto = property(onclick_goto__get)
def button__get(self):
return self.become(Button)
button = property(button__get)
def js_popup__get(self):
return self.become(JSPopup)
js_popup = property(js_popup__get)
class Image(URLResource):
r"""
>>> i = Image('/images')
>>> i = i / '/foo.png'
>>> i.html
'<img src="/images/foo.png" />'
>>> str(i['alt=foo'])
'<img src="/images/foo.png" alt="foo" />'
>>> i.href
'/images/foo.png'
"""
default_params = {'tag': 'img'}
def __str__(self):
return self.html
def _get_content(self):
return None
def _add_vars(self, vars):
return self.attr(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = list(self.attrs.items())
attrs.insert(0, ('src', self.href))
return attrs
class Button(URLResource):
r"""
>>> u = URL('/')
>>> u = u / 'delete'
>>> b = u.button['confirm=Sure?'](id=5, content='del')
>>> str(b)
'<button onclick="if (confirm(\'Sure?\')) {location.href=\'/delete?id=5\'}; return false">del</button>'
"""
default_params = {'tag': 'button'}
def __str__(self):
return self.html
def _get_content(self):
if self.params.get('content'):
return self.params['content']
if self.attrs.get('value'):
return self.attrs['content']
# @@: Error?
return None
def _add_vars(self, vars):
button = self
if 'confirm' in vars:
button = button.param(confirm=vars.pop('confirm'))
if 'content' in vars:
button = button.param(content=vars.pop('content'))
return button.var(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = list(self.attrs.items())
onclick = 'location.href=%s' % js_repr(self.href)
if self.params.get('confirm'):
onclick = 'if (confirm(%s)) {%s}' % (
js_repr(self.params['confirm']), onclick)
onclick += '; return false'
attrs.insert(0, ('onclick', onclick))
return attrs
class JSPopup(URLResource):
r"""
>>> u = URL('/')
>>> u = u / 'view'
>>> j = u.js_popup(content='view')
>>> j.html
'<a href="/view" onclick="window.open(\'/view\', \'_blank\'); return false" target="_blank">view</a>'
"""
default_params = {'tag': 'a', 'target': '_blank'}
def _add_vars(self, vars):
button = self
for var in ('width', 'height', 'stripped', 'content'):
if var in vars:
button = button.param(**{var: vars.pop(var)})
return button.var(**vars)
def _window_args(self):
p = self.params
features = []
if p.get('stripped'):
p['location'] = p['status'] = p['toolbar'] = '0'
for param in 'channelmode directories fullscreen location menubar resizable scrollbars status titlebar'.split():
if param not in p:
continue
v = p[param]
if v not in ('yes', 'no', '1', '0'):
if v:
v = '1'
else:
v = '0'
features.append('%s=%s' % (param, v))
for param in 'height left top width':
if not p.get(param):
continue
features.append('%s=%s' % (param, p[param]))
args = [self.href, p['target']]
if features:
args.append(','.join(features))
return ', '.join(map(js_repr, args))
def _html_attrs(self):
attrs = list(self.attrs.items())
onclick = ('window.open(%s); return false'
% self._window_args())
attrs.insert(0, ('target', self.params['target']))
attrs.insert(0, ('onclick', onclick))
attrs.insert(0, ('href', self.href))
return attrs
def _get_content(self):
if not self.params.get('content'):
raise ValueError(
"You must give a content param to %r generate anchor tags"
% self)
return self.params['content']
def _add_positional(self, args):
return self.addpath(*args)
if __name__ == '__main__':
import doctest
doctest.testmod()
| apache-2.0 |
theknightorc/p2pool-candycoin | p2pool/bitcoin/script.py | 282 | 2589 | from p2pool.util import math, pack
def reads_nothing(f):
return None, f
def protoPUSH(length):
return lambda f: pack.read(f, length)
def protoPUSHDATA(size_len):
def _(f):
length_str, f = pack.read(f, size_len)
length = math.string_to_natural(length_str[::-1].lstrip(chr(0)))
data, f = pack.read(f, length)
return data, f
return _
opcodes = {}
for i in xrange(256):
opcodes[i] = 'UNK_' + str(i), reads_nothing
opcodes[0] = 'PUSH', lambda f: ('', f)
for i in xrange(1, 76):
opcodes[i] = 'PUSH', protoPUSH(i)
opcodes[76] = 'PUSH', protoPUSHDATA(1)
opcodes[77] = 'PUSH', protoPUSHDATA(2)
opcodes[78] = 'PUSH', protoPUSHDATA(4)
opcodes[79] = 'PUSH', lambda f: ('\x81', f)
for i in xrange(81, 97):
opcodes[i] = 'PUSH', lambda f, _i=i: (chr(_i - 80), f)
opcodes[172] = 'CHECKSIG', reads_nothing
opcodes[173] = 'CHECKSIGVERIFY', reads_nothing
opcodes[174] = 'CHECKMULTISIG', reads_nothing
opcodes[175] = 'CHECKMULTISIGVERIFY', reads_nothing
def parse(script):
f = script, 0
while pack.size(f):
opcode_str, f = pack.read(f, 1)
opcode = ord(opcode_str)
opcode_name, read_func = opcodes[opcode]
opcode_arg, f = read_func(f)
yield opcode_name, opcode_arg
def get_sigop_count(script):
weights = {
'CHECKSIG': 1,
'CHECKSIGVERIFY': 1,
'CHECKMULTISIG': 20,
'CHECKMULTISIGVERIFY': 20,
}
return sum(weights.get(opcode_name, 0) for opcode_name, opcode_arg in parse(script))
def create_push_script(datums): # datums can be ints or strs
res = []
for datum in datums:
if isinstance(datum, (int, long)):
if datum == -1 or 1 <= datum <= 16:
res.append(chr(datum + 80))
continue
negative = datum < 0
datum = math.natural_to_string(abs(datum))
if datum and ord(datum[0]) & 128:
datum = '\x00' + datum
if negative:
datum = chr(ord(datum[0]) + 128) + datum[1:]
datum = datum[::-1]
if len(datum) < 76:
res.append(chr(len(datum)))
elif len(datum) <= 0xff:
res.append(76)
res.append(chr(len(datum)))
elif len(datum) <= 0xffff:
res.append(77)
res.append(pack.IntType(16).pack(len(datum)))
elif len(datum) <= 0xffffffff:
res.append(78)
res.append(pack.IntType(32).pack(len(datum)))
else:
raise ValueError('string too long')
res.append(datum)
return ''.join(res)
| gpl-3.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_8_0/device_cpu_stat_broker.py | 16 | 40187 | from ..broker import Broker
class DeviceCpuStatBroker(Broker):
controller = "device_cpu_stats"
def index(self, **kwargs):
"""Lists the available device cpu stats. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The ending date/time for the sample interval.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The ending date/time for the sample interval.
:type EndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The starting date/time for the sample interval.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The starting date/time for the sample interval.
:type StartTime: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the device cpu stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the device cpu stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device cpu stat methods. The listed methods will be called on each device cpu stat returned and included in the output. Available methods are: data_source, device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceCpuStatsID
:param sort: The data field(s) to use for sorting the output. Default is DeviceCpuStatsID. Valid values are DeviceCpuStatsID, DataSourceID, DeviceID, StartTime, EndTime, CpuIndex, CpuBusy.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceCpuStat. Valid values are DeviceCpuStatsID, DataSourceID, DeviceID, StartTime, EndTime, CpuIndex, CpuBusy. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_cpu_stats: An array of the DeviceCpuStat objects that match the specified input criteria.
:rtype device_cpu_stats: Array of DeviceCpuStat
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified device cpu stat.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device cpu stat methods. The listed methods will be called on each device cpu stat returned and included in the output. Available methods are: data_source, device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_cpu_stat: The device cpu stat identified by the specified DeviceCpuStatsID.
:rtype device_cpu_stat: DeviceCpuStat
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available device cpu stats matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CpuBusy: The CPU busy reading for the time period.
:type CpuBusy: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CpuBusy: The CPU busy reading for the time period.
:type CpuBusy: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CpuIndex: The CPU number.
:type CpuIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CpuIndex: The CPU number.
:type CpuIndex: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The ending date/time for the sample interval.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The ending date/time for the sample interval.
:type EndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The starting date/time for the sample interval.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The starting date/time for the sample interval.
:type StartTime: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the device cpu stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the device cpu stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device cpu stat methods. The listed methods will be called on each device cpu stat returned and included in the output. Available methods are: data_source, device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceCpuStatsID
:param sort: The data field(s) to use for sorting the output. Default is DeviceCpuStatsID. Valid values are DeviceCpuStatsID, DataSourceID, DeviceID, StartTime, EndTime, CpuIndex, CpuBusy.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceCpuStat. Valid values are DeviceCpuStatsID, DataSourceID, DeviceID, StartTime, EndTime, CpuIndex, CpuBusy. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device cpu stats, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: CpuBusy, CpuIndex, DataSourceID, DeviceCpuStatsID, DeviceID, EndTime, StartTime.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_cpu_stats: An array of the DeviceCpuStat objects that match the specified input criteria.
:rtype device_cpu_stats: Array of DeviceCpuStat
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device cpu stats matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: CpuBusy, CpuIndex, DataSourceID, DeviceCpuStatsID, DeviceID, EndTime, StartTime.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CpuBusy: The operator to apply to the field CpuBusy. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CpuBusy: The CPU busy reading for the time period. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CpuBusy: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CpuBusy: If op_CpuBusy is specified, the field named in this input will be compared to the value in CpuBusy using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CpuBusy must be specified if op_CpuBusy is specified.
:type val_f_CpuBusy: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CpuBusy: If op_CpuBusy is specified, this value will be compared to the value in CpuBusy using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CpuBusy must be specified if op_CpuBusy is specified.
:type val_c_CpuBusy: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CpuIndex: The operator to apply to the field CpuIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CpuIndex: The CPU number. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CpuIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CpuIndex: If op_CpuIndex is specified, the field named in this input will be compared to the value in CpuIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CpuIndex must be specified if op_CpuIndex is specified.
:type val_f_CpuIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CpuIndex: If op_CpuIndex is specified, this value will be compared to the value in CpuIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CpuIndex must be specified if op_CpuIndex is specified.
:type val_c_CpuIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceCpuStatsID: The operator to apply to the field DeviceCpuStatsID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceCpuStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceCpuStatsID: If op_DeviceCpuStatsID is specified, the field named in this input will be compared to the value in DeviceCpuStatsID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceCpuStatsID must be specified if op_DeviceCpuStatsID is specified.
:type val_f_DeviceCpuStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceCpuStatsID: If op_DeviceCpuStatsID is specified, this value will be compared to the value in DeviceCpuStatsID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceCpuStatsID must be specified if op_DeviceCpuStatsID is specified.
:type val_c_DeviceCpuStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which this record was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The ending date/time for the sample interval. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified.
:type val_f_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified.
:type val_c_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The starting date/time for the sample interval. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified.
:type val_f_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified.
:type val_c_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the device cpu stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the device cpu stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device cpu stat methods. The listed methods will be called on each device cpu stat returned and included in the output. Available methods are: data_source, device, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceCpuStatsID
:param sort: The data field(s) to use for sorting the output. Default is DeviceCpuStatsID. Valid values are DeviceCpuStatsID, DataSourceID, DeviceID, StartTime, EndTime, CpuIndex, CpuBusy.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceCpuStat. Valid values are DeviceCpuStatsID, DataSourceID, DeviceID, StartTime, EndTime, CpuIndex, CpuBusy. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_cpu_stats: An array of the DeviceCpuStat objects that match the specified input criteria.
:rtype device_cpu_stats: Array of DeviceCpuStat
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceCpuStatsID: The internal NetMRI for this device CPU statistics record.
:type DeviceCpuStatsID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| apache-2.0 |
Osmose/trephub | vendor-local/lib/python/south/management/commands/migrationcheck.py | 23 | 2648 | from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command, CommandError
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.models import loading
from django.test import simple
from south.migration import Migrations
from south.exceptions import NoMigrations
from south.hacks import hacks
class Command(BaseCommand):
help = "Runs migrations for each app in turn, detecting missing depends_on values."
usage_str = "Usage: ./manage.py migrationcheck"
def handle(self, check_app_name=None, **options):
runner = simple.DjangoTestSuiteRunner(verbosity=0)
err_msg = "Failed to migrate %s; see output for hints at missing dependencies:\n"
hacks.patch_flush_during_test_db_creation()
failures = 0
if check_app_name is None:
app_names = settings.INSTALLED_APPS
else:
app_names = [check_app_name]
for app_name in app_names:
app_label = app_name.split(".")[-1]
if app_name == 'south':
continue
try:
Migrations(app_name)
except (NoMigrations, ImproperlyConfigured):
continue
app = loading.get_app(app_label)
verbosity = int(options.get('verbosity', 1))
if verbosity >= 1:
self.stderr.write("processing %s\n" % app_name)
old_config = runner.setup_databases()
try:
call_command('migrate', app_label, noinput=True, verbosity=verbosity)
for model in loading.get_models(app):
dummy = model._default_manager.exists()
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
failures += 1
if verbosity >= 1:
self.stderr.write(err_msg % app_name)
self.stderr.write("%s\n" % e)
finally:
runner.teardown_databases(old_config)
if failures > 0:
raise CommandError("Missing depends_on found in %s app(s)." % failures)
self.stderr.write("No missing depends_on found.\n")
#
#for each app:
# start with blank db.
# syncdb only south (and contrib?)
#
# migrate a single app all the way up. any errors is missing depends_on.
# for all models of that app, try the default manager:
# from django.db.models import loading
# for m in loading.get_models(loading.get_app('a')):
# m._default_manager.exists()
# Any error is also a missing depends on.
| bsd-3-clause |
M4sse/chromium.src | content/test/gpu/gpu_tests/webgl_conformance_expectations.py | 9 | 14631 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class WebGLConformanceExpectations(GpuTestExpectations):
def SetExpectations(self):
# Fails on all platforms
self.Fail('conformance/glsl/misc/shaders-with-invariance.html',
bug=421710)
self.Fail('conformance/glsl/bugs/essl3-shaders-with-webgl1.html',
bug=428845)
self.Fail('conformance/glsl/misc/expression-list-in-declarator-initializer.html',
bug=428845)
self.Fail('conformance/uniforms/gl-uniform-arrays.html',
bug=433385)
# Win failures
self.Fail('conformance/glsl/misc/struct-equals.html',
['win'], bug=391957)
self.Fail('conformance/glsl/bugs/conditional-discard-in-loop.html',
['win'], bug=402195)
self.Fail('conformance/glsl/misc/ternary-operators-in-global-initializers.html',
['win'], bug=415694)
self.Fail('conformance/glsl/misc/struct-specifiers-in-uniforms.html',
['win'], bug=433412)
# This test still causes itself and any tests afterwards to time out
# in Win Debug bots.
self.Skip('conformance/textures/texture-copying-feedback-loops.html',
['Win'], bug=421695)
self.Fail('conformance/rendering/framebuffer-switch.html',
['win'], bug=428849)
self.Fail('conformance/rendering/framebuffer-texture-switch.html',
['win'], bug=428849)
# Win7 / Intel failures
self.Fail('conformance/rendering/gl-scissor-test.html',
['win7', 'intel'], bug=314997)
self.Fail('conformance/context/premultiplyalpha-test.html',
['win7', 'intel'])
self.Fail('conformance/textures/copy-tex-image-and-sub-image-2d.html',
['win7', 'intel'])
self.Fail('conformance/rendering/gl-viewport-test.html',
['win7', 'intel'], bug=372511)
self.Fail('conformance/glsl/misc/shader-with-array-of-structs-uniform.html',
['win7', 'intel', 'nvidia'], bug=373972)
# Win / AMD failures
self.Fail('conformance/textures/texparameter-test.html',
['win', 'amd', 'd3d9'], bug=839) # angle bug ID
# Mac / Intel failures
# Radar 13499466
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['mac', 'intel'], bug=225642)
# Radar 13499623
self.Fail('conformance/textures/texture-size.html',
['mac', 'intel'], bug=225642)
# Mac / Intel HD 3000 failures
self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html',
['mac', ('intel', 0x116)], bug=322795)
# Radar 13499677
self.Fail('conformance/glsl/functions/' +
'glsl-function-smoothstep-gentype.html',
['mac', ('intel', 0x116)], bug=225642)
self.Fail('conformance/extensions/webgl-draw-buffers.html',
['mac', ('intel', 0x116)], bug=369349)
# Mac 10.8 / Intel HD 3000 failures
self.Fail('conformance/rendering/gl-scissor-test.html',
['mountainlion', ('intel', 0x116)], bug=314997)
self.Fail('conformance/ogles/GL/operators/operators_009_to_016.html',
['mountainlion', ('intel', 0x116)], bug=322795)
# Mac 10.9 / Intel HD 3000 failures
self.Fail('conformance/ogles/GL/operators/operators_009_to_016.html',
['mavericks', ('intel', 0x116)], bug=417415)
self.Fail('conformance/rendering/gl-scissor-test.html',
['mavericks', ('intel', 0x116)], bug=417415)
# Mac Retina failures
self.Fail(
'conformance/glsl/bugs/array-of-struct-with-int-first-position.html',
['mac', ('nvidia', 0xfd5), ('nvidia', 0xfe9)], bug=368912)
# Mac 10.8 / ATI failures
self.Fail(
'conformance/rendering/' +
'point-with-gl-pointcoord-in-fragment-shader.html',
['mountainlion', 'amd'])
# Mac 10.7 / Intel failures
self.Skip('conformance/glsl/functions/glsl-function-asin.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-dot.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-faceforward.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-length.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-normalize.html',
['lion', 'intel'])
self.Skip('conformance/glsl/functions/glsl-function-reflect.html',
['lion', 'intel'])
self.Skip('conformance/rendering/line-loop-tri-fan.html',
['lion', 'intel'])
self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['lion', 'intel'], bug=345575)
self.Skip('conformance/ogles/GL/dot/dot_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/faceforward/faceforward_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/length/length_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/normalize/normalize_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/reflect/reflect_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/refract/refract_001_to_006.html',
['lion', 'intel'], bug=323736)
self.Skip('conformance/ogles/GL/tan/tan_001_to_006.html',
['lion', 'intel'], bug=323736)
# Two flaky tests.
self.Fail('conformance/ogles/GL/functions/functions_049_to_056.html',
['lion', 'intel'], bug=393331)
self.Fail('conformance/extensions/webgl-compressed-texture-size-limit.html',
['lion', 'intel'], bug=393331)
# Linux failures
self.Fail('conformance/textures/default-texture.html',
['linux', ('nvidia', 0x104a)], bug=422152)
self.Fail('conformance/programs/program-test.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/rendering/multisample-corruption.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/default-texture.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-video.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-webgl-canvas.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-webgl-canvas-rgb565.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-webgl-canvas-rgba4444.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-webgl-canvas-rgba5551.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/texture-mips.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/texture-npot-video.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/textures/texture-size.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/more/functions/copyTexSubImage2D.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/more/functions/drawArraysOutOfBounds.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/more/functions/texImage2DHTML.html',
['linux', ('amd', 0x68f9)], bug=436212)
self.Fail('conformance/more/functions/texSubImage2DHTML.html',
['linux', ('amd', 0x68f9)], bug=436212)
# Android failures
# The following test is very slow and therefore times out on Android bot.
self.Skip('conformance/rendering/multisample-corruption.html',
['android'])
# The following test times out on Android bot.
self.Fail('conformance/uniforms/gl-uniform-arrays.html',
['android'], bug=369300)
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['android'], bug=315976)
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['android'], bug=315976)
# The following tests are disabled due to security issues.
self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-video.html',
['android'], bug=334204)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-video-rgb565.html',
['android'], bug=334204)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-video-rgba4444.html',
['android'], bug=334204)
self.Fail('conformance/textures/' +
'tex-image-and-sub-image-2d-with-video-rgba5551.html',
['android'], bug=334204)
self.Fail('conformance/textures/texture-npot-video.html',
['android'], bug=334204)
# ChromeOS: affecting all devices.
self.Fail('conformance/extensions/webgl-depth-texture.html',
['chromeos'], bug=382651)
# ChromeOS: all Intel except for pinetrail (stumpy, parrot, peppy,...)
# We will just include pinetrail here for now as we don't want to list
# every single Intel device ID.
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/glsl/misc/shaders-with-varyings.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/renderbuffers/framebuffer-object-attachment.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/textures/texture-size-limit.html',
['chromeos', 'intel'], bug=385361)
# ChromeOS: pinetrail (alex, mario, zgb).
self.Fail('conformance/attribs/gl-vertex-attrib-render.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/functions/glsl-function-atan-xy.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/functions/glsl-function-cos.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/functions/glsl-function-sin.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/misc/shaders-with-varyings.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/variables/gl-frontfacing.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/acos/acos_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/asin/asin_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/atan/atan_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/build/build_009_to_016.html',
['chromeos', ('intel', 0xa011)], bug=378938)
self.Fail('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/cos/cos_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/discard/discard_001_to_002.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_065_to_072.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_081_to_088.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_097_to_104.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_105_to_112.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_113_to_120.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_121_to_126.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail(
'conformance/ogles/GL/gl_FrontFacing/gl_FrontFacing_001_to_001.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/log/log_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/log2/log2_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/normalize/normalize_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/sin/sin_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/rendering/point-size.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/rendering/polygon-offset.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/texture-mips.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/texture-npot.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/texture-npot-video.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/texture-size.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/texture-size-limit.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/uniforms/gl-uniform-arrays.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Skip('conformance/uniforms/uniform-default-values.html',
['chromeos', ('intel', 0xa011)], bug=375554)
# Flaky on Mac & Linux
self.Fail('conformance/textures/texture-upload-size.html',
['mac'], bug=436493)
self.Fail('conformance/textures/texture-upload-size.html',
['linux'], bug=436493)
# Temporary suppression while updating this test.
self.Fail('conformance/misc/bad-arguments-test.html', bug=441997)
| bsd-3-clause |
AuyaJackie/odoo | addons/point_of_sale/wizard/pos_box.py | 381 | 2211 |
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.account.wizard.pos_box import CashBox
class PosBox(CashBox):
_register = False
def run(self, cr, uid, ids, context=None):
if not context:
context = dict()
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
records = self.pool[active_model].browse(cr, uid, active_ids, context=context)
bank_statements = [record.cash_register_id for record in records if record.cash_register_id]
if not bank_statements:
raise osv.except_osv(_('Error!'),
_("There is no cash register for this PoS Session"))
return self._run(cr, uid, ids, bank_statements, context=context)
else:
return super(PosBox, self).run(cr, uid, ids, context=context)
class PosBoxIn(PosBox):
_inherit = 'cash.box.in'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
if context is None:
context = {}
values = super(PosBoxIn, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool[active_model].browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
class PosBoxOut(PosBox):
_inherit = 'cash.box.out'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
values = super(PosBoxOut, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool[active_model].browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
| agpl-3.0 |
onaio/rapidpro | temba/flows/migrations/0098_flowpathrecentmessage.py | 3 | 1055 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-26 14:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flows', '0097_interrupt_runs_for_archived_flows'),
]
operations = [
migrations.CreateModel(
name='FlowPathRecentMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_uuid', models.UUIDField(help_text='Which flow node they came from')),
('to_uuid', models.UUIDField(help_text='Which flow node they went to')),
('text', models.CharField(max_length=640)),
('created_on', models.DateTimeField(help_text='When the message arrived')),
('run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recent_messages', to='flows.FlowRun')),
],
),
]
| agpl-3.0 |
CBMM/CBaaS | democlients/deepdream.py | 1 | 4018 | # imports and basic notebook setup
from cStringIO import StringIO
import numpy as np
import scipy.ndimage as nd
import PIL.Image
# from IPython.display import clear_output, Image, display
from google.protobuf import text_format
import caffe
import cbaas
try:
caffe.set_gpu_mode()
except:
print "CPU mode
caffe_root = '/Users/greghale/Programming/caffe/'
model_path = caffe_root + 'models/bvlc_googlenet/'
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'bvlc_googlenet.caffemodel'
# Patching model to be able to compute gradients.
# Note that you can also manually add "force_backward: true" line to "deploy.prototxt".
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True
open('tmp.prototxt', 'w').write(str(model))
net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
# a couple of utility functions for converting to and from Caffe's input image layout
def preprocess(net, img):
# Drop the alpha channel if there is one
if np.shape(img)[2] == 4:
img = img[:,:,0:3]
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
def deprocess(net, img):
return np.dstack((img + net.transformer.mean['data'])[::-1]) / 255
def objective_L2(dst):
dst.diff[:] = dst.data
def make_step(net, step_size=1.5, end='inception_4c/output',
jitter=32, clip=True, objective=objective_L2):
'''Basic gradient ascent step.'''
src = net.blobs['data'] # input image is stored in Net's 'data' blob
dst = net.blobs[end]
ox, oy = np.random.randint(-jitter, jitter+1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift
net.forward(end=end)
objective(dst) # specify the optimization objective
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size/np.abs(g).mean() * g
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image
if clip:
bias = net.transformer.mean['data']
src.data[:] = np.clip(src.data, -bias, 255-bias)
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_4c/output', clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
# showarray(vis)
print octave, i, end, vis.shape
# clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
def do_work(img):
return deepdream(net,img)
if __name__ == "__main__":
l = cbaas.Listener(domain='greghale.io', on_job=do_work, function_name="deepdream", type="TModelImage -> TModelImage")
print "Finished (why?)"
| bsd-3-clause |
arnif/CouchPotatoServer | libs/bs4/builder/_lxml.py | 36 | 6297 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = None
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, basestring):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = None
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(nsmap) == 0 and self.nsmaps != None:
# There are no new namespaces for this tag, but namespaces
# are in play, so we need a separate tag stack to know
# when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
if self.nsmaps is None:
self.nsmaps = []
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
namespace, name = self._getNsTag(name)
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if self.nsmaps != None:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
if len(self.nsmaps) == 0:
# Namespaces are no longer in play, so don't bother keeping
# track of the namespace stack.
self.nsmaps = None
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| gpl-3.0 |
sodafree/backend | build/ipython/build/lib.linux-i686-2.7/IPython/parallel/util.py | 3 | 11405 | """some generic utilities for dealing with classes, urls, and serialization
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports.
import logging
import os
import re
import stat
import socket
import sys
from signal import signal, SIGINT, SIGABRT, SIGTERM
try:
from signal import SIGKILL
except ImportError:
SIGKILL=None
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
# System library imports
import zmq
from zmq.log import handlers
from IPython.external.decorator import decorator
# IPython imports
from IPython.config.application import Application
from IPython.utils import py3compat
from IPython.utils.pickleutil import can, uncan, canSequence, uncanSequence
from IPython.utils.newserialized import serialize, unserialize
from IPython.zmq.log import EnginePUBHandler
from IPython.zmq.serialize import (
unserialize_object, serialize_object, pack_apply_message, unpack_apply_message
)
if py3compat.PY3:
buffer = memoryview
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class Namespace(dict):
"""Subclass of dict for attribute access to keys."""
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self.iterkeys():
return self[key]
else:
raise NameError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if hasattr(dict, key):
raise KeyError("Cannot override dict keys %r"%key)
self[key] = value
class ReverseDict(dict):
"""simple double-keyed subset of dict methods."""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self._reverse = dict()
for key, value in self.iteritems():
self._reverse[value] = key
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self._reverse[key]
def __setitem__(self, key, value):
if key in self._reverse:
raise KeyError("Can't have key %r on both sides!"%key)
dict.__setitem__(self, key, value)
self._reverse[value] = key
def pop(self, key):
value = dict.pop(self, key)
self._reverse.pop(value)
return value
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
@decorator
def log_errors(f, self, *args, **kwargs):
"""decorator to log unhandled exceptions raised in a method.
For use wrapping on_recv callbacks, so that exceptions
do not cause the stream to be closed.
"""
try:
return f(self, *args, **kwargs)
except Exception:
self.log.error("Uncaught exception in %r" % f, exc_info=True)
def is_url(url):
"""boolean check for whether a string is a zmq url"""
if '://' not in url:
return False
proto, addr = url.split('://', 1)
if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']:
return False
return True
def validate_url(url):
"""validate a url for zeromq"""
if not isinstance(url, basestring):
raise TypeError("url must be a string, not %r"%type(url))
url = url.lower()
proto_addr = url.split('://')
assert len(proto_addr) == 2, 'Invalid url: %r'%url
proto, addr = proto_addr
assert proto in ['tcp','pgm','epgm','ipc','inproc'], "Invalid protocol: %r"%proto
# domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391
# author: Remi Sabourin
pat = re.compile(r'^([\w\d]([\w\d\-]{0,61}[\w\d])?\.)*[\w\d]([\w\d\-]{0,61}[\w\d])?$')
if proto == 'tcp':
lis = addr.split(':')
assert len(lis) == 2, 'Invalid url: %r'%url
addr,s_port = lis
try:
port = int(s_port)
except ValueError:
raise AssertionError("Invalid port %r in url: %r"%(port, url))
assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url
else:
# only validate tcp urls currently
pass
return True
def validate_url_container(container):
"""validate a potentially nested collection of urls."""
if isinstance(container, basestring):
url = container
return validate_url(url)
elif isinstance(container, dict):
container = container.itervalues()
for element in container:
validate_url_container(element)
def split_url(url):
"""split a zmq url (tcp://ip:port) into ('tcp','ip','port')."""
proto_addr = url.split('://')
assert len(proto_addr) == 2, 'Invalid url: %r'%url
proto, addr = proto_addr
lis = addr.split(':')
assert len(lis) == 2, 'Invalid url: %r'%url
addr,s_port = lis
return proto,addr,s_port
def disambiguate_ip_address(ip, location=None):
"""turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation of location is localhost)."""
if ip in ('0.0.0.0', '*'):
try:
external_ips = socket.gethostbyname_ex(socket.gethostname())[2]
except (socket.gaierror, IndexError):
# couldn't identify this machine, assume localhost
external_ips = []
if location is None or location in external_ips or not external_ips:
# If location is unspecified or cannot be determined, assume local
ip='127.0.0.1'
elif location:
return location
return ip
def disambiguate_url(url, location=None):
"""turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation is localhost).
This is for zeromq urls, such as tcp://*:10101."""
try:
proto,ip,port = split_url(url)
except AssertionError:
# probably not tcp url; could be ipc, etc.
return url
ip = disambiguate_ip_address(ip,location)
return "%s://%s:%s"%(proto,ip,port)
#--------------------------------------------------------------------------
# helpers for implementing old MEC API via view.apply
#--------------------------------------------------------------------------
def interactive(f):
"""decorator for making functions appear as interactively defined.
This results in the function being linked to the user_ns as globals()
instead of the module globals().
"""
f.__module__ = '__main__'
return f
@interactive
def _push(**ns):
"""helper method for implementing `client.push` via `client.apply`"""
globals().update(ns)
@interactive
def _pull(keys):
"""helper method for implementing `client.pull` via `client.apply`"""
user_ns = globals()
if isinstance(keys, (list,tuple, set)):
for key in keys:
if not user_ns.has_key(key):
raise NameError("name '%s' is not defined"%key)
return map(user_ns.get, keys)
else:
if not user_ns.has_key(keys):
raise NameError("name '%s' is not defined"%keys)
return user_ns.get(keys)
@interactive
def _execute(code):
"""helper method for implementing `client.execute` via `client.apply`"""
exec code in globals()
#--------------------------------------------------------------------------
# extra process management utilities
#--------------------------------------------------------------------------
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in xrange(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
def signal_children(children):
"""Relay interupt/term signals to children, for more solid process cleanup."""
def terminate_children(sig, frame):
log = Application.instance().log
log.critical("Got signal %i, terminating children..."%sig)
for child in children:
child.terminate()
sys.exit(sig != SIGINT)
# sys.exit(sig)
for sig in (SIGINT, SIGABRT, SIGTERM):
signal(sig, terminate_children)
def generate_exec_key(keyfile):
import uuid
newkey = str(uuid.uuid4())
with open(keyfile, 'w') as f:
# f.write('ipython-key ')
f.write(newkey+'\n')
# set user-only RW permissions (0600)
# this will have no effect on Windows
os.chmod(keyfile, stat.S_IRUSR|stat.S_IWUSR)
def integer_loglevel(loglevel):
try:
loglevel = int(loglevel)
except ValueError:
if isinstance(loglevel, str):
loglevel = getattr(logging, loglevel)
return loglevel
def connect_logger(logname, context, iface, root="ip", loglevel=logging.DEBUG):
logger = logging.getLogger(logname)
if any([isinstance(h, handlers.PUBHandler) for h in logger.handlers]):
# don't add a second PUBHandler
return
loglevel = integer_loglevel(loglevel)
lsock = context.socket(zmq.PUB)
lsock.connect(iface)
handler = handlers.PUBHandler(lsock)
handler.setLevel(loglevel)
handler.root_topic = root
logger.addHandler(handler)
logger.setLevel(loglevel)
def connect_engine_logger(context, iface, engine, loglevel=logging.DEBUG):
logger = logging.getLogger()
if any([isinstance(h, handlers.PUBHandler) for h in logger.handlers]):
# don't add a second PUBHandler
return
loglevel = integer_loglevel(loglevel)
lsock = context.socket(zmq.PUB)
lsock.connect(iface)
handler = EnginePUBHandler(engine, lsock)
handler.setLevel(loglevel)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
def local_logger(logname, loglevel=logging.DEBUG):
loglevel = integer_loglevel(loglevel)
logger = logging.getLogger(logname)
if any([isinstance(h, logging.StreamHandler) for h in logger.handlers]):
# don't add a second StreamHandler
return
handler = logging.StreamHandler()
handler.setLevel(loglevel)
formatter = logging.Formatter("%(asctime)s.%(msecs).03d [%(name)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
| bsd-3-clause |
sysadmin75/ansible-modules-core | network/junos/junos_config.py | 19 | 6663 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on remote devices running Junos
description:
- The M(junos_config) module provides an abstraction for working
with the configuration running on remote devices. It can perform
operations that influence the configuration state.
- This module provides an implementation for configuring Juniper
JUNOS devices. The configuration statements must start with either
`set` or `delete` and are compared against the current device
configuration and only changes are pushed to the device.
extends_documentation_fragment: junos
options:
lines:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: false
default: null
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0
required: false
default: null
zeroize:
description:
- The C(zeroize) argument is used to completely ssantaize the
remote device configuration back to initial defaults. This
argument will effectively remove all current configuration
statements on the remote device
required: false
default: null
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_config
replace:
description:
- The C(replace) argument will instruct the remote device to
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuraiton loaded
from this module.
required: true
default: false
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
- name: load configuration lines in device
junos_config:
lines:
- set system host-name {{ inventory_hostname }}
- delete interfaces ge-0/0/0 description
comment: update config
- name: rollback the configuration to id 10
junos_config:
rollback: 10
- name: zero out the current configuration
junos_config:
zeroize: yes
- name: confirm a candidate configuration
junos_config:
"""
import re
DEFAULT_COMMENT = 'configured by junos_config'
def diff_config(candidate, config):
updates = set()
for line in candidate:
parts = line.split()
action = parts[0]
cfgline = ' '.join(parts[1:])
if action not in ['set', 'delete']:
module.fail_json(msg='line must start with either `set` or `delete`')
elif action == 'set' and cfgline not in config:
updates.add(line)
elif action == 'delete' and not config:
updates.add(line)
elif action == 'delete':
for cfg in config:
if cfg.startswith(cfgline):
updates.add(cfgline)
return list(updates)
def main():
argument_spec = dict(
lines=dict(type='list'),
rollback=dict(type='int'),
zeroize=dict(default=False, type='bool'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
replace=dict(default=False, type='bool'),
transport=dict(default='netconf', choices=['netconf'])
)
mutually_exclusive = [('lines', 'rollback'), ('lines', 'zeroize'),
('rollback', 'zeroize')]
module = get_module(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
rollback = module.params['rollback']
zeroize = module.params['zeroize']
comment = module.params['comment']
confirm = module.params['confirm']
if module.params['replace']:
action = 'replace'
else:
action = 'merge'
lines = module.params['lines']
commit = not module.check_mode
results = dict(changed=False)
if lines:
config = str(module.get_config(config_format='set')).split('\n')
updates = diff_config(lines, config)
if updates:
updates = '\n'.join(updates)
diff = module.load_config(updates, action=action, comment=comment,
format='set', commit=commit, confirm=confirm)
if diff:
results['changed'] = True
results['diff'] = dict(prepared=diff)
elif rollback is not None:
diff = module.rollback_config(rollback, commit=commit)
if diff:
results['changed'] = True
results['diff'] = dict(prepared=diff)
elif zeroize:
if not module.check_mode:
module.run_commands('request system zeroize')
results['changed'] = True
module.exit_json(**results)
from ansible.module_utils.basic import *
from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| gpl-3.0 |
quentinlautischer/291MiniProject2 | lib/python3.5/site-packages/bsddb3/tests/test_compat.py | 1 | 6164 | """
Copyright (c) 2008-2015, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import os, string
import unittest
from .test_all import db, hashopen, btopen, rnopen, verbose, \
get_new_database_path
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = get_new_database_path()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = "The quick brown fox jumped over the lazy dog.".split()
if verbose:
print("\nTesting: rnopen")
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print('%s %s %s' % getTest)
self.assertEqual(getTest[1], 'quick', 'data mismatch!')
rv = f.set_location(3)
if rv != (3, 'brown'):
self.fail('recno database set_location failed: '+repr(rv))
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print(rec)
try:
rec = next(f)
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print('\nTesting: ', what)
f = factory(self.filename, 'c')
if verbose:
print('creation...')
# truth test
if f:
if verbose: print("truth test: true")
else:
if verbose: print("truth test: false")
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
# 'e' intentionally left out
f['f'] = 'Python'
if verbose:
print('%s %s %s' % (f['a'], f['b'], f['c']))
if verbose:
print('key ordering...')
start = f.set_location(f.first()[0])
if start != ('0', ''):
self.fail("incorrect first() result: "+repr(start))
while 1:
try:
rec = next(f)
except KeyError:
self.assertEqual(rec, f.last(), 'Error, last <> last!')
f.previous()
break
if verbose:
print(rec)
self.assertTrue('f' in f, 'Error, missing key!')
# test that set_location() returns the next nearest key, value
# on btree databases and raises KeyError on others.
if factory == btopen:
e = f.set_location('e')
if e != ('f', 'Python'):
self.fail('wrong key,value returned: '+repr(e))
else:
try:
e = f.set_location('e')
except KeyError:
pass
else:
self.fail("set_location on non-existent key did not raise KeyError")
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print("truth test: true")
else:
if verbose: print("truth test: false")
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print('modification...')
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print('access...')
for key in list(f.keys()):
word = f[key]
if verbose:
print(word)
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 |
ipafw/version1.0 | lib/ipafw_protocol_check.py | 2 | 6061 | import yaml
import pyshark
import time
import os
import re
import pyshark
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from collections import OrderedDict
from sip_message_validation import SipMessageValidation
from http_message_validation import HTTPMessageValidation
from cmn_Pass_Fail import Pass_Fail,html_fail,html_pass,FAIL,PASS
from parse_global_variables import parse_TcVariable, ParseGlobalVariable
from ipafw_mmse_proto import MMSEMessageValidation
class ipafwProtocolCheck () :
def __init__ (self, ipafw_self) :
self.ipafw = ipafw_self
self.mmse_mv_instance = MMSEMessageValidation ()
def doProtocolCheck (self, pkt, in_dict, protocol_check_list, identify=False) :
pstr = "frameworkTestDriver: doProtocolCheck: "
result = 0
print "\t-----------------------------------------------------------------\n"
result_max = len (protocol_check_list)
for protocol_check_dict in protocol_check_list :
print pstr, "PROTOCOL CHECK DICT: ", protocol_check_dict
ptype = protocol_check_dict ["TYPE"]
print pstr, "PROTOCOL CHECK TYPE IS: ", ptype
if ptype == "method_match" :
if protocol_check_dict ["LAYER"] == "mmse" :
if hasattr (pkt, "mmse") == False:
return False
if hasattr (pkt.mmse, "message_type") == False:
return False
mname = protocol_check_dict ["METHOD_NAME"]
mID = protocol_check_dict ["METHOD_ID"]
packet_mID = self.mmse_mv_instance.getMMSEMessageType (pkt)
if packet_mID == mID :
result = result + 1
print pstr, "Method name: ", mname, " MATCH FOUND"
print pstr, "frame number is: ", pkt.frame_info.number, "result: ", result
else :
print pstr, "Method name: ", mname, " MATCH NOT FOUND"
return False
elif ptype == "parse_header" :
if protocol_check_dict ["LAYER"] == "frame" :
hdr_value = self.ipafw.frame.getFrameHeaderValue (pkt, in_dict, protocol_check_dict)
if protocol_check_dict ["LAYER"] == "http" :
hdr_value = self.ipafw.http.getHTTPHeaderValue (pkt, in_dict, protocol_check_dict)
if protocol_check_dict ["LAYER"] == "mmse" :
hdr_value = self.ipafw.mmse.getMMSEHeaderValue (pkt, in_dict, protocol_check_dict)
if protocol_check_dict ["LAYER"] == "diameter" :
hdr_value = self.ipafw.dia.getDiameterHeaderValue (pkt, in_dict, protocol_check_dict)
if "SETVAR" in protocol_check_dict :
hdr_set_var = protocol_check_dict ["SETVAR"]
in_dict ["VARIABLES"][hdr_set_var] = hdr_value
#NOTE: Check this below 'if' condition with 'MATCH' value in validation file
if "MATCH" in protocol_check_dict :
if hdr_value == None :
return None
match = protocol_check_dict ["MATCH"]
print pstr, "MATCH VALUE IS: ", match
if "GETVAR" in protocol_check_dict :
hdr_get_var = protocol_check_dict ["GETVAR"]
hdr_get_val = in_dict ["VARIABLES"][hdr_get_var]
print pstr, "HEADER GETVAR IS: ", hdr_get_var, " HEADER GET VALUE IS: ", hdr_get_val
elif "VALUE" in protocol_check_dict :
hdr_get_val = protocol_check_dict ["VALUE"]
print pstr, "HEADER VALUE IS: ", hdr_get_val
if match == "exists" :
if hdr_get_val in hdr_value :
result = result + 1
print pstr, "HEADER NAME: ", protocol_check_dict ["HEADER_NAME"], " MATCHED: ", hdr_value
else :
print pstr, "HEADER NAME: ", protocol_check_dict ["HEADER_NAME"], " NOT MATCHED: ", hdr_value
continue
if match == "equals" :
if hdr_get_val == hdr_value :
result = result + 1
print pstr, "HEADER NAME: ", protocol_check_dict ["HEADER_NAME"], " MATCHED: ", hdr_value
else :
print pstr, "HEADER NAME: ", protocol_check_dict ["HEADER_NAME"], " NOT MATCHED: ", hdr_value
continue
# We matched necessary src/dst checks. Skip rest of the packets
# Check results, only for IDENTIFY_PACKET section of PROTOCOL_CHECKs
if identify :
if result == result_max :
print pstr, "result: ", result, " matched with result_max: ", result_max
print pstr, "frame number is: ", pkt.frame_info.number
return True
print "\t-----------------------------------------------------------------\n"
return False
else :
return True
if __name__ == '__main__':
'''
obj.resetTestContext ()
print " Reset MSISDN A current dict: ", obj.current_dict ["YAML_VARIABLE_INPUTS"][0]["YAML_MSISDN_A"]
print " Reset MSISDN A context dict: ", obj.context_dict ["YAML_VARIABLE_INPUTS"]["YAML_MSISDN_A"]
print " Reset IMSI A context dict: ", obj.context_dict ["YAML_VARIABLE_INPUTS"]["YAML_IMSI_A"]
print " Reset MSISDN B context dict: ", obj.context_dict ["YAML_VARIABLE_INPUTS"]["YAML_MSISDN_B"]
print " Reset IMSI B context dict: ", obj.context_dict ["YAML_VARIABLE_INPUTS"]["YAML_IMSI_B"]
'''
pass
| gpl-2.0 |
nashve/mythbox | resources/lib/twisted/twisted/protocols/finger.py | 81 | 1246 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""The Finger User Information Protocol (RFC 1288)"""
from twisted.protocols import basic
import string
class Finger(basic.LineReceiver):
def lineReceived(self, line):
parts = string.split(line)
if not parts:
parts = ['']
if len(parts) == 1:
slash_w = 0
else:
slash_w = 1
user = parts[-1]
if '@' in user:
host_place = string.rfind(user, '@')
user = user[:host_place]
host = user[host_place+1:]
return self.forwardQuery(slash_w, user, host)
if user:
return self.getUser(slash_w, user)
else:
return self.getDomain(slash_w)
def _refuseMessage(self, message):
self.transport.write(message+"\n")
self.transport.loseConnection()
def forwardQuery(self, slash_w, user, host):
self._refuseMessage('Finger forwarding service denied')
def getDomain(self, slash_w):
self._refuseMessage('Finger online list denied')
def getUser(self, slash_w, user):
self.transport.write('Login: '+user+'\n')
self._refuseMessage('No such user')
| gpl-2.0 |
pipermerriam/django | tests/gis_tests/geos_tests/test_geos_mutation.py | 183 | 5388 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from unittest import skipUnless
from django.contrib.gis.geos import (
HAS_GEOS, LinearRing, LineString, MultiPoint, Point, Polygon, fromstr,
)
def api_get_distance(x):
return x.distance(Point(-200, -200))
def api_get_buffer(x):
return x.buffer(10)
def api_get_geom_typeid(x):
return x.geom_typeid
def api_get_num_coords(x):
return x.num_coords
def api_get_centroid(x):
return x.centroid
def api_get_empty(x):
return x.empty
def api_get_valid(x):
return x.valid
def api_get_simple(x):
return x.simple
def api_get_ring(x):
return x.ring
def api_get_boundary(x):
return x.boundary
def api_get_convex_hull(x):
return x.convex_hull
def api_get_extent(x):
return x.extent
def api_get_area(x):
return x.area
def api_get_length(x):
return x.length
geos_function_tests = [val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_')]
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry IndexError'
p = Point(1, 2)
for i in range(-2, 2):
p._checkindex(i)
self.assertRaises(IndexError, p._checkindex, 2)
self.assertRaises(IndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1, 2, 3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0, 100)
self.assertEqual(p.coords, (100.0, 2.0, 3.0), 'Point _set_single')
# _set_list
p._set_list(2, (50, 3141))
self.assertEqual(p.coords, (50.0, 3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4, 5, 3)
for p in (Point(1, 2, 3), fromstr('POINT (1 2 3)')):
p[0:2] = [4, 5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1, 0), (4, 1), (6, -1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0, 1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0, (-50, 25))
self.assertEqual(ls.coords, ((-50.0, 25.0), (4.0, 1.0), (6.0, -1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0, 25.0), (6.0, -1.0)))
self.assertEqual(ls.coords, ((-50.0, 25.0), (6.0, -1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1, 0), (4, 1), (6, -1), (8, 10), (1, 0)),
((5, 4), (6, 4), (6, 3), (5, 4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1, 0), (4, 1), (6, -1), (8, 10), (1, 0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5, 4), (6, 4), (6, 3), (5, 4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1, 2), (10, 0), (12, 9), (-1, 15), (1, 2)),
((4, 2), (5, 2), (5, 3), (4, 2))))
self.assertEqual(
pg.coords,
(((1.0, 2.0), (10.0, 0.0), (12.0, 9.0), (-1.0, 15.0), (1.0, 2.0)),
((4.0, 2.0), (5.0, 2.0), (5.0, 3.0), (4.0, 2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point, ((3, 4), (-1, 2), (5, -4), (2, 8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5, -4), 'Collection _get_single_external')
mp._set_list(3, map(Point, ((5, 5), (3, -2), (8, 1))))
self.assertEqual(mp.coords, ((5.0, 5.0), (3.0, -2.0), (8.0, 1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point, ((5, 5), (3, -2), (8, 1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
| bsd-3-clause |
bala4901/odoo | openerpcommand/module.py | 10 | 2435 | """
Show module information for a given database or from the file-system.
"""
import os
import sys
import textwrap
from . import common
# TODO provide a --rpc flag to use XML-RPC (with a specific username) instead
# of server-side library.
def run(args):
assert args.database
import openerp
config = openerp.tools.config
config['log_handler'] = [':CRITICAL']
if args.addons:
args.addons = args.addons.split(':')
else:
args.addons = []
config['addons_path'] = ','.join(args.addons)
openerp.netsvc.init_logger()
if args.filesystem:
module_names = common.get_addons_from_paths(args.addons, [])
print "Modules (addons path %s):" % (', '.join(args.addons),)
for x in sorted(module_names):
print x
else:
registry = openerp.modules.registry.RegistryManager.get(
args.database, update_module=False)
xs = []
ir_module_module = registry.get('ir.module.module')
with registry.cursor() as cr:
ids = ir_module_module.search(cr, openerp.SUPERUSER_ID, [], {})
xs = ir_module_module.read(cr, openerp.SUPERUSER_ID, ids, [], {})
if xs:
print "Modules (database `%s`):" % (args.database,)
for x in xs:
if args.short:
print '%3d %s' % (x['id'], x['name'])
else:
print '%3d %s %s' % (x['id'], x['name'], {'installed': '(installed)'}.get(x['state'], ''))
else:
print "No module found (database `%s`)." % (args.database,)
def add_parser(subparsers):
parser = subparsers.add_parser('module',
description='Display modules known from a given database or on file-system.')
parser.add_argument('-d', '--database', metavar='DATABASE',
**common.required_or_default('DATABASE', 'the database to modify'))
common.add_addons_argument(parser)
parser.add_argument('-m', '--module', metavar='MODULE', required=False,
help='the module for which information should be shown')
parser.add_argument('-v', '--verbose', action='store_true',
help='display more information')
parser.add_argument('--short', action='store_true',
help='display less information')
parser.add_argument('-f', '--filesystem', action='store_true',
help='display module in the addons path, not in db')
parser.set_defaults(run=run)
| agpl-3.0 |
xylar/acciv | tests/convertImages/Vicar.py | 2 | 6017 | #!/usr/bin/python
import numpy
# based on the vicar2png module by Jessica McKellar (jesstess at mit.edu)
# substantial modifications have been made to the code. However for
# thoroughness, I am including her Copyright under the MIT License below:
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Jessica McKellar
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
'''
class VICARMetadata(object):
"""
Contains VICAR metadata accessible as uppercase class attributes,
e.g.:
vicar.RECSIZE
vicar.FORMAT
"""
def __init__(self, metadata):
"""
metadata: A dictionary of VICAR label/value pairs.
"""
for key, value in metadata.iteritems():
if value.isdigit():
value = int(value)
setattr(self, key.upper(), value)
def addMetadataToDict(metadata, metadata_dict):
gettingTag = True
has_lparen = False
has_lquote = False
tag_buf = ''
for char in metadata:
if gettingTag:
if char == '=':
tag = tag_buf
tag_buf = ''
gettingTag = False
has_lparen = False
has_lquote = False
elif char != ' ':
tag_buf += char
else: # getting value
if char == "'":
has_lquote = not has_lquote
if has_lparen:
tag_buf += char
elif char == "(" and not has_lquote:
has_lparen = True
tag_buf += char
elif char == ")" and not has_lquote:
has_lparen = False
tag_buf += char
elif char == " " and tag_buf and not (has_lquote or has_lparen):
# We have a full value, save it.
value = tag_buf
metadata_dict[tag] = value
gettingTag = True
has_lparen = False
has_lquote = False
tag_buf = ""
elif char == " " and not has_lquote:
continue
else:
tag_buf += char
return metadata_dict
def process_metadata(metadata_fd):
# A VICAR file must start with 'LBLSIZE=<integer label size>'.
lblsize_field = metadata_fd.read(len("LBLSIZE="))
if lblsize_field.upper() != "LBLSIZE=":
raise ValueError("Malformed VICAR file: doesn't start with LBLSIZE.")
lblsize = ""
while True:
char = metadata_fd.read(1)
if char == " ":
break
else:
lblsize += char
try:
lblsize = int(lblsize)
except ValueError:
raise ValueError("Malformed VICAR file: contains non-integer LBLSIZE.")
# Read in the rest of the VICAR metadata.
metadata_fd.seek(0)
metadata = metadata_fd.read(lblsize)
metadata_dict = {}
metadata_dict = addMetadataToDict(metadata, metadata_dict)
vicar = VICARMetadata(metadata_dict)
if(hasattr(vicar, 'EOL')):
if vicar.EOL == 1:
if vicar.FORMAT == 'BYTE':
byteCount = 1
elif vicar.FORMAT == 'HALF':
byteCount = 2
elif vicar.FORMAT == 'FULL':
byteCount = 4
elif vicar.FORMAT == 'REAL':
byteCount = 4
elif vicar.FORMAT == 'DOUB':
byteCount = 8
else:
raise ValueError('Unrecognized Vicar FORMAT: %s in file: %s'%(vicar.FORMAT,metadata_fd.name))
# Read in the VICAR metadata from the end of the file
metadata_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE
+ byteCount*vicar.N1*vicar.N2*vicar.N3)
# A VICAR file must start with 'LBLSIZE=<integer label size>'.
lblsize_field = metadata_fd.read(len("LBLSIZE="))
if lblsize_field.upper() != "LBLSIZE=":
raise ValueError("Malformed VICAR file: EOL doesn't start with LBLSIZE.")
lblsize = ""
while True:
char = metadata_fd.read(1)
if char == " ":
break
else:
lblsize += char
try:
lblsize = int(lblsize)
except ValueError:
raise ValueError("Malformed VICAR file: contains non-integer LBLSIZE.")
metadata_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE
+ byteCount*vicar.N1*vicar.N2*vicar.N3)
metadata = metadata_fd.read(lblsize)
metadata_dict = addMetadataToDict(metadata, metadata_dict)
metadata_fd.close()
return VICARMetadata(metadata_dict)
def extract_image(vicar, image_fd):
image_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE)
if vicar.FORMAT == 'BYTE':
outType = numpy.int8
elif vicar.FORMAT == 'HALF':
outType = numpy.int16
elif vicar.FORMAT == 'FULL':
outType = numpy.int32
elif vicar.FORMAT == 'REAL':
outType = numpy.float32
elif vicar.FORMAT == 'DOUB':
outType = numpy.float64
else:
raise ValueError('Unrecognized Vicar FORMAT: %s in file: %s'%(vicar.FORMAT,image_fd.name))
if vicar.ORG != 'BSQ':
raise ValueError('Vicar ORG: %i is not supported.'%vicar.ORG)
if vicar.NB > 1:
print 'Reading only the first image of %i images in the file'%vicar.NB
nx = vicar.NS
ny = vicar.NL
image = numpy.fromfile(image_fd,dtype=outType,count=nx*ny).reshape(ny,nx)
return image
def readVicar(infile):
metadata_fd = open(infile, "r")
vicar_metadata = process_metadata(metadata_fd)
image_fd = open(infile, "rb")
image = extract_image(vicar_metadata, image_fd)
return (image,vicar_metadata)
| mit |
RobertABT/heightmap | build/matplotlib/examples/axes_grid/make_room_for_ylabel_using_axesgrid.py | 15 | 1723 | from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable
if __name__ == "__main__":
import matplotlib.pyplot as plt
def ex1():
plt.figure(1)
ax = plt.axes([0,0,1,1])
# ax = plt.subplot(111)
ax.set_yticks([0.5])
ax.set_yticklabels(["very long label"])
make_axes_area_auto_adjustable(ax)
def ex2():
plt.figure(2)
ax1 = plt.axes([0,0,1,0.5])
ax2 = plt.axes([0,0.5,1,0.5])
ax1.set_yticks([0.5])
ax1.set_yticklabels(["very long label"])
ax1.set_ylabel("Y label")
ax2.set_title("Title")
make_axes_area_auto_adjustable(ax1, pad=0.1, use_axes=[ax1, ax2])
make_axes_area_auto_adjustable(ax2, pad=0.1, use_axes=[ax1, ax2])
def ex3():
fig = plt.figure(3)
ax1 = plt.axes([0,0,1,1])
divider = make_axes_locatable(ax1)
ax2 = divider.new_horizontal("100%", pad=0.3, sharey=ax1)
ax2.tick_params(labelleft="off")
fig.add_axes(ax2)
divider.add_auto_adjustable_area(use_axes=[ax1], pad=0.1,
adjust_dirs=["left"])
divider.add_auto_adjustable_area(use_axes=[ax2], pad=0.1,
adjust_dirs=["right"])
divider.add_auto_adjustable_area(use_axes=[ax1, ax2], pad=0.1,
adjust_dirs=["top", "bottom"])
ax1.set_yticks([0.5])
ax1.set_yticklabels(["very long label"])
ax2.set_title("Title")
ax2.set_xlabel("X - Label")
ex1()
ex2()
ex3()
plt.show()
| mit |
eventql/eventql | deps/3rdparty/spidermonkey/mozjs/python/pyyaml/lib/yaml/serializer.py | 560 | 4171 |
__all__ = ['Serializer', 'SerializerError']
from error import YAMLError
from events import *
from nodes import *
class SerializerError(YAMLError):
pass
class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| agpl-3.0 |
chiller/pytest | _pytest/standalonetemplate.py | 203 | 3281 | #! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# a fully functional basic pytest script.
#
# Pytest is a thing that tests packages, pytest itself is a package that some-
# one might want to install, especially if they're looking to run tests inside
# some package they want to install. Pytest has a lot of code to collect and
# execute tests, and other such sort of "tribal knowledge" that has been en-
# coded in its code base. Because of this we basically include a basic copy
# of pytest inside this blob. We do this because it let's you as a maintainer
# or application developer who wants people who don't deal with python much to
# easily run tests without installing the complete pytest package.
#
# If you're wondering how this is created: you can create it yourself if you
# have a complete pytest installation by using this command on the command-
# line: ``py.test --genscript=runtests.py``.
sources = """
@SOURCES@"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
try:
import pkg_resources # noqa
except ImportError:
sys.stderr.write("ERROR: setuptools not installed\n")
sys.exit(2)
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "@ENTRY@"
do_exec(entry, locals()) # noqa
| mit |
awkspace/ansible | lib/ansible/plugins/terminal/ironware.py | 49 | 2700 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?(?:\w+@)?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$")
]
terminal_stderr_re = [
re.compile(br"[\r\n]Error - "),
re.compile(br"[\r\n](?:incomplete|ambiguous|unrecognised|invalid) (?:command|input)", re.I)
]
def on_open_shell(self):
self.disable_pager()
def disable_pager(self):
cmd = {u'command': u'terminal length 0'}
try:
self._exec_cli_command(u'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to disable terminal pager')
def on_become(self, passwd=None):
if self._get_prompt().strip().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: ?$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'exit')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'exit')
| gpl-3.0 |
PlunderKnowledge/SumCrawler | SumCrawlerProducer/SumCrawlerSpider/spiders/__init__.py | 1 | 1529 | # This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import scrapy
import functools
import requests
def _link_is_http(l):
return l.startswith('https://') or l.startswith('http://')
def _link_is_archive(l):
return l.endswith('.tar.gz')
class GnuSpider(scrapy.Spider):
name = "gnu-barrymore"
allowed_domains = ["gnu.org"]
start_urls = ["http://www.gnu.org"]
keyring_url = "http://ftp.gnu.org/gnu/gnu-keyring.gpg"
visited = set()
def parse(self, response):
all_urls = [sel.extract() for sel in response.xpath('//a/@href') if _link_is_http(sel.extract())]
archive_urls = [url for url in all_urls if _link_is_archive(sel.extract())]
sig_urls = [url+".sig" for url in archive_urls]
for (archive_url, sig_url) in zip(archive_urls, sig_urls):
item = SumcrawlerspiderItem()
item['fileUrl'] = archive_url
sig_reponse = requests.get(sig_url)
if sig_response.code == 200:
item['signature'] = sig_response.text
else:
item['signature'] = ''
item['signatureType'] = 'gpg'
yield item
follow_links = [url for url in all_urls if not _link_is_archive(url)]
for link in follow_links:
if(not link in self.visited):
self.visited.add(link)
yield scrapy.Request(link, callback=self.parse)
| mit |
HiSPARC/station-software | user/python/Lib/site-packages/win32comext/adsi/adsicon.py | 29 | 12541 | ADS_ATTR_CLEAR = ( 1 )
ADS_ATTR_UPDATE = ( 2 )
ADS_ATTR_APPEND = ( 3 )
ADS_ATTR_DELETE = ( 4 )
ADS_EXT_MINEXTDISPID = ( 1 )
ADS_EXT_MAXEXTDISPID = ( 16777215 )
ADS_EXT_INITCREDENTIALS = ( 1 )
ADS_EXT_INITIALIZE_COMPLETE = ( 2 )
ADS_SEARCHPREF_ASYNCHRONOUS = 0
ADS_SEARCHPREF_DEREF_ALIASES = 1
ADS_SEARCHPREF_SIZE_LIMIT = 2
ADS_SEARCHPREF_TIME_LIMIT = 3
ADS_SEARCHPREF_ATTRIBTYPES_ONLY = 4
ADS_SEARCHPREF_SEARCH_SCOPE = 5
ADS_SEARCHPREF_TIMEOUT = 6
ADS_SEARCHPREF_PAGESIZE = 7
ADS_SEARCHPREF_PAGED_TIME_LIMIT = 8
ADS_SEARCHPREF_CHASE_REFERRALS = 9
ADS_SEARCHPREF_SORT_ON = 10
ADS_SEARCHPREF_CACHE_RESULTS = 11
ADS_SEARCHPREF_DIRSYNC = 12
ADS_SEARCHPREF_TOMBSTONE = 13
ADS_SCOPE_BASE = 0
ADS_SCOPE_ONELEVEL = 1
ADS_SCOPE_SUBTREE = 2
ADS_SECURE_AUTHENTICATION = 0x1
ADS_USE_ENCRYPTION = 0x2
ADS_USE_SSL = 0x2
ADS_READONLY_SERVER = 0x4
ADS_PROMPT_CREDENTIALS = 0x8
ADS_NO_AUTHENTICATION = 0x10
ADS_FAST_BIND = 0x20
ADS_USE_SIGNING = 0x40
ADS_USE_SEALING = 0x80
ADS_USE_DELEGATION = 0x100
ADS_SERVER_BIND = 0x200
ADSTYPE_INVALID = 0
ADSTYPE_DN_STRING = ADSTYPE_INVALID + 1
ADSTYPE_CASE_EXACT_STRING = ADSTYPE_DN_STRING + 1
ADSTYPE_CASE_IGNORE_STRING = ADSTYPE_CASE_EXACT_STRING + 1
ADSTYPE_PRINTABLE_STRING = ADSTYPE_CASE_IGNORE_STRING + 1
ADSTYPE_NUMERIC_STRING = ADSTYPE_PRINTABLE_STRING + 1
ADSTYPE_BOOLEAN = ADSTYPE_NUMERIC_STRING + 1
ADSTYPE_INTEGER = ADSTYPE_BOOLEAN + 1
ADSTYPE_OCTET_STRING = ADSTYPE_INTEGER + 1
ADSTYPE_UTC_TIME = ADSTYPE_OCTET_STRING + 1
ADSTYPE_LARGE_INTEGER = ADSTYPE_UTC_TIME + 1
ADSTYPE_PROV_SPECIFIC = ADSTYPE_LARGE_INTEGER + 1
ADSTYPE_OBJECT_CLASS = ADSTYPE_PROV_SPECIFIC + 1
ADSTYPE_CASEIGNORE_LIST = ADSTYPE_OBJECT_CLASS + 1
ADSTYPE_OCTET_LIST = ADSTYPE_CASEIGNORE_LIST + 1
ADSTYPE_PATH = ADSTYPE_OCTET_LIST + 1
ADSTYPE_POSTALADDRESS = ADSTYPE_PATH + 1
ADSTYPE_TIMESTAMP = ADSTYPE_POSTALADDRESS + 1
ADSTYPE_BACKLINK = ADSTYPE_TIMESTAMP + 1
ADSTYPE_TYPEDNAME = ADSTYPE_BACKLINK + 1
ADSTYPE_HOLD = ADSTYPE_TYPEDNAME + 1
ADSTYPE_NETADDRESS = ADSTYPE_HOLD + 1
ADSTYPE_REPLICAPOINTER = ADSTYPE_NETADDRESS + 1
ADSTYPE_FAXNUMBER = ADSTYPE_REPLICAPOINTER + 1
ADSTYPE_EMAIL = ADSTYPE_FAXNUMBER + 1
ADSTYPE_NT_SECURITY_DESCRIPTOR = ADSTYPE_EMAIL + 1
ADSTYPE_UNKNOWN = ADSTYPE_NT_SECURITY_DESCRIPTOR + 1
ADSTYPE_DN_WITH_BINARY = ADSTYPE_UNKNOWN + 1
ADSTYPE_DN_WITH_STRING = ADSTYPE_DN_WITH_BINARY + 1
ADS_PROPERTY_CLEAR = 1
ADS_PROPERTY_UPDATE = 2
ADS_PROPERTY_APPEND = 3
ADS_PROPERTY_DELETE = 4
ADS_SYSTEMFLAG_DISALLOW_DELETE = -2147483648
ADS_SYSTEMFLAG_CONFIG_ALLOW_RENAME = 0x40000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_MOVE = 0x20000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_LIMITED_MOVE = 0x10000000
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_RENAME = -2147483648
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_MOVE = 0x4000000
ADS_SYSTEMFLAG_CR_NTDS_NC = 0x1
ADS_SYSTEMFLAG_CR_NTDS_DOMAIN = 0x2
ADS_SYSTEMFLAG_ATTR_NOT_REPLICATED = 0x1
ADS_SYSTEMFLAG_ATTR_IS_CONSTRUCTED = 0x4
ADS_GROUP_TYPE_GLOBAL_GROUP = 0x2
ADS_GROUP_TYPE_DOMAIN_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_UNIVERSAL_GROUP = 0x8
ADS_GROUP_TYPE_SECURITY_ENABLED = -2147483648
ADS_UF_SCRIPT = 0x1
ADS_UF_ACCOUNTDISABLE = 0x2
ADS_UF_HOMEDIR_REQUIRED = 0x8
ADS_UF_LOCKOUT = 0x10
ADS_UF_PASSWD_NOTREQD = 0x20
ADS_UF_PASSWD_CANT_CHANGE = 0x40
ADS_UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED = 0x80
ADS_UF_TEMP_DUPLICATE_ACCOUNT = 0x100
ADS_UF_NORMAL_ACCOUNT = 0x200
ADS_UF_INTERDOMAIN_TRUST_ACCOUNT = 0x800
ADS_UF_WORKSTATION_TRUST_ACCOUNT = 0x1000
ADS_UF_SERVER_TRUST_ACCOUNT = 0x2000
ADS_UF_DONT_EXPIRE_PASSWD = 0x10000
ADS_UF_MNS_LOGON_ACCOUNT = 0x20000
ADS_UF_SMARTCARD_REQUIRED = 0x40000
ADS_UF_TRUSTED_FOR_DELEGATION = 0x80000
ADS_UF_NOT_DELEGATED = 0x100000
ADS_UF_USE_DES_KEY_ONLY = 0x200000
ADS_UF_DONT_REQUIRE_PREAUTH = 0x400000
ADS_UF_PASSWORD_EXPIRED = 0x800000
ADS_UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION = 0x1000000
ADS_RIGHT_DELETE = 0x10000
ADS_RIGHT_READ_CONTROL = 0x20000
ADS_RIGHT_WRITE_DAC = 0x40000
ADS_RIGHT_WRITE_OWNER = 0x80000
ADS_RIGHT_SYNCHRONIZE = 0x100000
ADS_RIGHT_ACCESS_SYSTEM_SECURITY = 0x1000000
ADS_RIGHT_GENERIC_READ = -2147483648
ADS_RIGHT_GENERIC_WRITE = 0x40000000
ADS_RIGHT_GENERIC_EXECUTE = 0x20000000
ADS_RIGHT_GENERIC_ALL = 0x10000000
ADS_RIGHT_DS_CREATE_CHILD = 0x1
ADS_RIGHT_DS_DELETE_CHILD = 0x2
ADS_RIGHT_ACTRL_DS_LIST = 0x4
ADS_RIGHT_DS_SELF = 0x8
ADS_RIGHT_DS_READ_PROP = 0x10
ADS_RIGHT_DS_WRITE_PROP = 0x20
ADS_RIGHT_DS_DELETE_TREE = 0x40
ADS_RIGHT_DS_LIST_OBJECT = 0x80
ADS_RIGHT_DS_CONTROL_ACCESS = 0x100
ADS_ACETYPE_ACCESS_ALLOWED = 0
ADS_ACETYPE_ACCESS_DENIED = 0x1
ADS_ACETYPE_SYSTEM_AUDIT = 0x2
ADS_ACETYPE_ACCESS_ALLOWED_OBJECT = 0x5
ADS_ACETYPE_ACCESS_DENIED_OBJECT = 0x6
ADS_ACETYPE_SYSTEM_AUDIT_OBJECT = 0x7
ADS_ACETYPE_SYSTEM_ALARM_OBJECT = 0x8
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK = 0x9
ADS_ACETYPE_ACCESS_DENIED_CALLBACK = 0xa
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK_OBJECT = 0xb
ADS_ACETYPE_ACCESS_DENIED_CALLBACK_OBJECT = 0xc
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK = 0xd
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK = 0xe
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK_OBJECT = 0xf
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK_OBJECT = 0x10
ADS_ACEFLAG_INHERIT_ACE = 0x2
ADS_ACEFLAG_NO_PROPAGATE_INHERIT_ACE = 0x4
ADS_ACEFLAG_INHERIT_ONLY_ACE = 0x8
ADS_ACEFLAG_INHERITED_ACE = 0x10
ADS_ACEFLAG_VALID_INHERIT_FLAGS = 0x1f
ADS_ACEFLAG_SUCCESSFUL_ACCESS = 0x40
ADS_ACEFLAG_FAILED_ACCESS = 0x80
ADS_FLAG_OBJECT_TYPE_PRESENT = 0x1
ADS_FLAG_INHERITED_OBJECT_TYPE_PRESENT = 0x2
ADS_SD_CONTROL_SE_OWNER_DEFAULTED = 0x1
ADS_SD_CONTROL_SE_GROUP_DEFAULTED = 0x2
ADS_SD_CONTROL_SE_DACL_PRESENT = 0x4
ADS_SD_CONTROL_SE_DACL_DEFAULTED = 0x8
ADS_SD_CONTROL_SE_SACL_PRESENT = 0x10
ADS_SD_CONTROL_SE_SACL_DEFAULTED = 0x20
ADS_SD_CONTROL_SE_DACL_AUTO_INHERIT_REQ = 0x100
ADS_SD_CONTROL_SE_SACL_AUTO_INHERIT_REQ = 0x200
ADS_SD_CONTROL_SE_DACL_AUTO_INHERITED = 0x400
ADS_SD_CONTROL_SE_SACL_AUTO_INHERITED = 0x800
ADS_SD_CONTROL_SE_DACL_PROTECTED = 0x1000
ADS_SD_CONTROL_SE_SACL_PROTECTED = 0x2000
ADS_SD_CONTROL_SE_SELF_RELATIVE = 0x8000
ADS_SD_REVISION_DS = 4
ADS_NAME_TYPE_1779 = 1
ADS_NAME_TYPE_CANONICAL = 2
ADS_NAME_TYPE_NT4 = 3
ADS_NAME_TYPE_DISPLAY = 4
ADS_NAME_TYPE_DOMAIN_SIMPLE = 5
ADS_NAME_TYPE_ENTERPRISE_SIMPLE = 6
ADS_NAME_TYPE_GUID = 7
ADS_NAME_TYPE_UNKNOWN = 8
ADS_NAME_TYPE_USER_PRINCIPAL_NAME = 9
ADS_NAME_TYPE_CANONICAL_EX = 10
ADS_NAME_TYPE_SERVICE_PRINCIPAL_NAME = 11
ADS_NAME_TYPE_SID_OR_SID_HISTORY_NAME = 12
ADS_NAME_INITTYPE_DOMAIN = 1
ADS_NAME_INITTYPE_SERVER = 2
ADS_NAME_INITTYPE_GC = 3
ADS_OPTION_SERVERNAME = 0
ADS_OPTION_REFERRALS = ADS_OPTION_SERVERNAME + 1
ADS_OPTION_PAGE_SIZE = ADS_OPTION_REFERRALS + 1
ADS_OPTION_SECURITY_MASK = ADS_OPTION_PAGE_SIZE + 1
ADS_OPTION_MUTUAL_AUTH_STATUS = ADS_OPTION_SECURITY_MASK + 1
ADS_OPTION_QUOTA = ADS_OPTION_MUTUAL_AUTH_STATUS + 1
ADS_OPTION_PASSWORD_PORTNUMBER = ADS_OPTION_QUOTA + 1
ADS_OPTION_PASSWORD_METHOD = ADS_OPTION_PASSWORD_PORTNUMBER + 1
ADS_SECURITY_INFO_OWNER = 0x1
ADS_SECURITY_INFO_GROUP = 0x2
ADS_SECURITY_INFO_DACL = 0x4
ADS_SECURITY_INFO_SACL = 0x8
ADS_SETTYPE_FULL = 1
ADS_SETTYPE_PROVIDER = 2
ADS_SETTYPE_SERVER = 3
ADS_SETTYPE_DN = 4
ADS_FORMAT_WINDOWS = 1
ADS_FORMAT_WINDOWS_NO_SERVER = 2
ADS_FORMAT_WINDOWS_DN = 3
ADS_FORMAT_WINDOWS_PARENT = 4
ADS_FORMAT_X500 = 5
ADS_FORMAT_X500_NO_SERVER = 6
ADS_FORMAT_X500_DN = 7
ADS_FORMAT_X500_PARENT = 8
ADS_FORMAT_SERVER = 9
ADS_FORMAT_PROVIDER = 10
ADS_FORMAT_LEAF = 11
ADS_DISPLAY_FULL = 1
ADS_DISPLAY_VALUE_ONLY = 2
ADS_ESCAPEDMODE_DEFAULT = 1
ADS_ESCAPEDMODE_ON = 2
ADS_ESCAPEDMODE_OFF = 3
ADS_ESCAPEDMODE_OFF_EX = 4
ADS_PATH_FILE = 1
ADS_PATH_FILESHARE = 2
ADS_PATH_REGISTRY = 3
ADS_SD_FORMAT_IID = 1
ADS_SD_FORMAT_RAW = 2
ADS_SD_FORMAT_HEXSTRING = 3
# Generated by h2py from AdsErr.h
def _HRESULT_TYPEDEF_(_sc): return _sc
E_ADS_BAD_PATHNAME = _HRESULT_TYPEDEF_((-2147463168))
E_ADS_INVALID_DOMAIN_OBJECT = _HRESULT_TYPEDEF_((-2147463167))
E_ADS_INVALID_USER_OBJECT = _HRESULT_TYPEDEF_((-2147463166))
E_ADS_INVALID_COMPUTER_OBJECT = _HRESULT_TYPEDEF_((-2147463165))
E_ADS_UNKNOWN_OBJECT = _HRESULT_TYPEDEF_((-2147463164))
E_ADS_PROPERTY_NOT_SET = _HRESULT_TYPEDEF_((-2147463163))
E_ADS_PROPERTY_NOT_SUPPORTED = _HRESULT_TYPEDEF_((-2147463162))
E_ADS_PROPERTY_INVALID = _HRESULT_TYPEDEF_((-2147463161))
E_ADS_BAD_PARAMETER = _HRESULT_TYPEDEF_((-2147463160))
E_ADS_OBJECT_UNBOUND = _HRESULT_TYPEDEF_((-2147463159))
E_ADS_PROPERTY_NOT_MODIFIED = _HRESULT_TYPEDEF_((-2147463158))
E_ADS_PROPERTY_MODIFIED = _HRESULT_TYPEDEF_((-2147463157))
E_ADS_CANT_CONVERT_DATATYPE = _HRESULT_TYPEDEF_((-2147463156))
E_ADS_PROPERTY_NOT_FOUND = _HRESULT_TYPEDEF_((-2147463155))
E_ADS_OBJECT_EXISTS = _HRESULT_TYPEDEF_((-2147463154))
E_ADS_SCHEMA_VIOLATION = _HRESULT_TYPEDEF_((-2147463153))
E_ADS_COLUMN_NOT_SET = _HRESULT_TYPEDEF_((-2147463152))
S_ADS_ERRORSOCCURRED = _HRESULT_TYPEDEF_(0x00005011)
S_ADS_NOMORE_ROWS = _HRESULT_TYPEDEF_(0x00005012)
S_ADS_NOMORE_COLUMNS = _HRESULT_TYPEDEF_(0x00005013)
E_ADS_INVALID_FILTER = _HRESULT_TYPEDEF_((-2147463148))
# ADS_DEREFENUM enum
ADS_DEREF_NEVER = 0
ADS_DEREF_SEARCHING = 1
ADS_DEREF_FINDING = 2
ADS_DEREF_ALWAYS = 3
# ADS_PREFERENCES_ENUM
ADSIPROP_ASYNCHRONOUS = 0
ADSIPROP_DEREF_ALIASES = 0x1
ADSIPROP_SIZE_LIMIT = 0x2
ADSIPROP_TIME_LIMIT = 0x3
ADSIPROP_ATTRIBTYPES_ONLY = 0x4
ADSIPROP_SEARCH_SCOPE = 0x5
ADSIPROP_TIMEOUT = 0x6
ADSIPROP_PAGESIZE = 0x7
ADSIPROP_PAGED_TIME_LIMIT = 0x8
ADSIPROP_CHASE_REFERRALS = 0x9
ADSIPROP_SORT_ON = 0xa
ADSIPROP_CACHE_RESULTS = 0xb
ADSIPROP_ADSIFLAG = 0xc
# ADSI_DIALECT_ENUM
ADSI_DIALECT_LDAP = 0
ADSI_DIALECT_SQL = 0x1
# ADS_CHASE_REFERRALS_ENUM
ADS_CHASE_REFERRALS_NEVER = 0
ADS_CHASE_REFERRALS_SUBORDINATE = 0x20
ADS_CHASE_REFERRALS_EXTERNAL = 0x40
ADS_CHASE_REFERRALS_ALWAYS = ADS_CHASE_REFERRALS_SUBORDINATE | ADS_CHASE_REFERRALS_EXTERNAL
# Generated by h2py from ObjSel.h
DSOP_SCOPE_TYPE_TARGET_COMPUTER = 0x00000001
DSOP_SCOPE_TYPE_UPLEVEL_JOINED_DOMAIN = 0x00000002
DSOP_SCOPE_TYPE_DOWNLEVEL_JOINED_DOMAIN = 0x00000004
DSOP_SCOPE_TYPE_ENTERPRISE_DOMAIN = 0x00000008
DSOP_SCOPE_TYPE_GLOBAL_CATALOG = 0x00000010
DSOP_SCOPE_TYPE_EXTERNAL_UPLEVEL_DOMAIN = 0x00000020
DSOP_SCOPE_TYPE_EXTERNAL_DOWNLEVEL_DOMAIN = 0x00000040
DSOP_SCOPE_TYPE_WORKGROUP = 0x00000080
DSOP_SCOPE_TYPE_USER_ENTERED_UPLEVEL_SCOPE = 0x00000100
DSOP_SCOPE_TYPE_USER_ENTERED_DOWNLEVEL_SCOPE = 0x00000200
DSOP_SCOPE_FLAG_STARTING_SCOPE = 0x00000001
DSOP_SCOPE_FLAG_WANT_PROVIDER_WINNT = 0x00000002
DSOP_SCOPE_FLAG_WANT_PROVIDER_LDAP = 0x00000004
DSOP_SCOPE_FLAG_WANT_PROVIDER_GC = 0x00000008
DSOP_SCOPE_FLAG_WANT_SID_PATH = 0x00000010
DSOP_SCOPE_FLAG_WANT_DOWNLEVEL_BUILTIN_PATH = 0x00000020
DSOP_SCOPE_FLAG_DEFAULT_FILTER_USERS = 0x00000040
DSOP_SCOPE_FLAG_DEFAULT_FILTER_GROUPS = 0x00000080
DSOP_SCOPE_FLAG_DEFAULT_FILTER_COMPUTERS = 0x00000100
DSOP_SCOPE_FLAG_DEFAULT_FILTER_CONTACTS = 0x00000200
DSOP_FILTER_INCLUDE_ADVANCED_VIEW = 0x00000001
DSOP_FILTER_USERS = 0x00000002
DSOP_FILTER_BUILTIN_GROUPS = 0x00000004
DSOP_FILTER_WELL_KNOWN_PRINCIPALS = 0x00000008
DSOP_FILTER_UNIVERSAL_GROUPS_DL = 0x00000010
DSOP_FILTER_UNIVERSAL_GROUPS_SE = 0x00000020
DSOP_FILTER_GLOBAL_GROUPS_DL = 0x00000040
DSOP_FILTER_GLOBAL_GROUPS_SE = 0x00000080
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_DL = 0x00000100
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_SE = 0x00000200
DSOP_FILTER_CONTACTS = 0x00000400
DSOP_FILTER_COMPUTERS = 0x00000800
DSOP_DOWNLEVEL_FILTER_USERS = (-2147483647)
DSOP_DOWNLEVEL_FILTER_LOCAL_GROUPS = (-2147483646)
DSOP_DOWNLEVEL_FILTER_GLOBAL_GROUPS = (-2147483644)
DSOP_DOWNLEVEL_FILTER_COMPUTERS = (-2147483640)
DSOP_DOWNLEVEL_FILTER_WORLD = (-2147483632)
DSOP_DOWNLEVEL_FILTER_AUTHENTICATED_USER = (-2147483616)
DSOP_DOWNLEVEL_FILTER_ANONYMOUS = (-2147483584)
DSOP_DOWNLEVEL_FILTER_BATCH = (-2147483520)
DSOP_DOWNLEVEL_FILTER_CREATOR_OWNER = (-2147483392)
DSOP_DOWNLEVEL_FILTER_CREATOR_GROUP = (-2147483136)
DSOP_DOWNLEVEL_FILTER_DIALUP = (-2147482624)
DSOP_DOWNLEVEL_FILTER_INTERACTIVE = (-2147481600)
DSOP_DOWNLEVEL_FILTER_NETWORK = (-2147479552)
DSOP_DOWNLEVEL_FILTER_SERVICE = (-2147475456)
DSOP_DOWNLEVEL_FILTER_SYSTEM = (-2147467264)
DSOP_DOWNLEVEL_FILTER_EXCLUDE_BUILTIN_GROUPS = (-2147450880)
DSOP_DOWNLEVEL_FILTER_TERMINAL_SERVER = (-2147418112)
DSOP_DOWNLEVEL_FILTER_ALL_WELLKNOWN_SIDS = (-2147352576)
DSOP_DOWNLEVEL_FILTER_LOCAL_SERVICE = (-2147221504)
DSOP_DOWNLEVEL_FILTER_NETWORK_SERVICE = (-2146959360)
DSOP_DOWNLEVEL_FILTER_REMOTE_LOGON = (-2146435072)
DSOP_FLAG_MULTISELECT = 0x00000001
DSOP_FLAG_SKIP_TARGET_COMPUTER_DC_CHECK = 0x00000002
CFSTR_DSOP_DS_SELECTION_LIST = "CFSTR_DSOP_DS_SELECTION_LIST"
| gpl-3.0 |
arborh/tensorflow | tensorflow/python/keras/engine/data_adapter_test.py | 3 | 30720 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_iterator_expect_batch_size_numpy(self):
with self.assertRaisesRegexp(
ValueError, r'`batch_size` or `steps` is required'):
self.adapter_cls(self.numpy_input, self.numpy_target)
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GenericArrayLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(
self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input,
self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input,
self.arraylike_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.numpy_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.tensor_target))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_iterator_expect_batch_size_generic_arraylike(self):
with self.assertRaisesRegexp(
ValueError, r'`batch_size` or `steps` is required'):
self.adapter_cls(self.arraylike_input,
self.arraylike_target)
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
# memory), only the sliced data should be converted.
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle=True, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle='batch', batch_size=5)
self.model.evaluate(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.predict(self.arraylike_input, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.numpy_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.numpy_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.tensor_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.tensor_target, batch_size=5)
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target,
batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super(DatasetAdapterTest, self).setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GeneratorDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.generator_input, steps_per_epoch=10)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input)
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super(KerasSequenceAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
voutilad/courtlistener | cl/audio/tasks.py | 1 | 7928 | import hashlib
import json
import subprocess
import traceback
from tempfile import NamedTemporaryFile
import httplib2
from celery.canvas import chain
from django.conf import settings
from google.cloud import storage
from google.cloud.exceptions import Forbidden, NotFound
from google.cloud.storage import Blob
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from cl.audio.models import Audio
from cl.celery import app
TRANSCRIPTS_BUCKET_NAME = 'freelawproject-transcripts'
def make_transcription_chain(path, phrases, af_id):
"""Make a celery chain that combines the whole transcription workflow.
The way this works is that the return values from each task are appended as
positional arguments to the task that follows.
:type path: str
:param path: The path to the input file.
:type phrases: list
:param phrases: A list of phrases or words that are expected to be in the
audio. Typically, this will be proper nouns like the case name, for example.
:type af_id: int
:param af_id: The ID of the Audio item that will be updated in the end.
"""
return chain(
upload_item_as_raw_file.s(path),
do_speech_to_text.s(phrases),
poll_for_result_and_save.s(af_id),
delete_blob_from_google.s(),
)
def get_storage_client():
"""Build a storage client for the user, and return it."""
return storage.Client.from_service_account_json(
settings.GOOGLE_AUTH['PATH'],
project=settings.GOOGLE_AUTH['PROJECT'],
)
def get_speech_service():
"""Make a speech service that we can use to make requests.
This is lifted from the API examples provided here:
https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/speech/api-client/transcribe_async.py#L35
"""
credentials = GoogleCredentials.from_stream(
settings.GOOGLE_AUTH['PATH'],
).create_scoped(
['https://www.googleapis.com/auth/cloud-platform'],
)
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('speech', 'v1beta1', http=http)
def encode_as_linear16(path, tmp):
# From: https://cloud.google.com/speech/support#troubleshooting:
# "The LINEAR16 encoding must be 16-bits, signed-integer,
# little-endian."
# In avconv, this translates to "s16le". See also:
# http://stackoverflow.com/a/4854627/64911 and
# https://trac.ffmpeg.org/wiki/audio%20types
assert isinstance(path, basestring), "path argument is not a str."
avconv_command = [
'avconv',
'-y', # Assume yes (clobber existing files)
'-i', path, # Input file
'-f', 's16le', # Force output format
'-ac', '1', # Mono
'-ar', '16k', # Sample rate of 16000Mhz
tmp.name, # Output file
]
try:
_ = subprocess.check_output(
avconv_command,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError, e:
print('avconv failed command: %s\n'
'error code: %s\n'
'output: %s\n' % (avconv_command, e.returncode, e.output))
print traceback.format_exc()
raise e
@app.task
def upload_item_as_raw_file(path, client=None):
"""Set things up, convert the file, and upload it."""
if client is None:
client = get_storage_client()
# Check that the bucket exists, make it if not.
try:
b = client.get_bucket(TRANSCRIPTS_BUCKET_NAME)
except Forbidden as e:
print("Received Forbidden (403) error while getting bucket. This could "
"mean that you do not have billing set up for this "
"account/project, or that somebody else has taken this bucket "
"from the global namespace.")
raise e
except NotFound:
b = client.bucket(TRANSCRIPTS_BUCKET_NAME)
b.lifecycle_rules = [{
'action': {'type': 'Delete'},
'condition': {'age': 7},
}]
b.create()
b.make_public(future=True)
# Re-encode the file as a temp file and upload it. When we leave the context
# manager, the temp file gets automatically deleted.
with NamedTemporaryFile(prefix='transcode_', suffix='.raw') as tmp:
encode_as_linear16(path, tmp)
# Name it after a SHA2 hash of the item, to avoid collisions.
file_name = 'transcripts-%s' % hashlib.sha256(tmp.read()).hexdigest()
blob = Blob(file_name, b)
blob.upload_from_file(tmp, rewind=True)
return {'blob_name': blob.name, 'bucket_name': blob.bucket.name}
@app.task
def do_speech_to_text(returned_info, phrases, service=None):
"""Convert the file to text
This creates an operation on Google's servers to convert the item to text.
In general, this process takes about as long as the file to complete (so a
10 minute MP3 will take about 10 minutes to complete).
When this task is completed, it hands off the remote task ID to another
Celery task that polls for the final result.
"""
if service is None:
service = get_speech_service()
default_phrases = [
'remand', 'appellant', 'appellee', 'et al.', 'deposition', 'officer',
'factual', 'reasonable', 'claimant', 'complainant', 'defendant',
'devisee', 'executor', 'executrix', 'petitioner', 'plaintiff',
'respondant',
]
default_phrases.extend(phrases)
assert len(default_phrases) <= 500, "phrase API limit exceeded."
response = service.speech().asyncrecognize(
body={
'config': {
'encoding': 'LINEAR16',
'sampleRate': 16000,
'maxAlternatives': 10,
'speechContext': {'phrases': default_phrases},
},
'audio': {
'uri': 'gs://%s/%s' % (returned_info['bucket_name'],
returned_info['blob_name']),
}
}).execute()
# Use a dict to send all values to the next task as a single var
returned_info.update({'operation_name': response['name']})
return returned_info
@app.task(bind=True, max_retries=6)
def poll_for_result_and_save(self, returned_info, af_id, service=None):
"""Poll Google for the completed STT and save it to the DB.
Using an exponential backoff, ask google for the completed operation. If
it's complete, save the results.
"""
# 5, 10, 20, 40, 80, 160, 320 minutes (longest item is currently 240 min.)
countdown = 5 * 60 * (2 ** self.request.retries)
if service is None:
service = get_speech_service()
af = Audio.objects.get(pk=af_id)
polling_response = (service.operations()
.get(name=returned_info['operation_name'])
.execute())
if 'done' in polling_response and polling_response['done']:
af.stt_google_response = json.dumps(polling_response, indent=2)
af.stt_status = af.STT_COMPLETE
af.save()
return returned_info
else:
last_try = (self.request.retries == self.max_retries)
if last_try:
af.stt_status = af.STT_FAILED
af.save(index=False) # Skip indexing if we have no new content.
return returned_info
else:
try:
raise Exception("STT not yet complete.")
except Exception as exc:
raise self.retry(exc=exc, countdown=countdown)
@app.task
def delete_blob_from_google(returned_info, client=None):
"""Delete the blob from Google Storage.
If the bucket is set up properly, the lifecycle rules will automatically
delete items, however, the sooner we do so the better.
"""
if client is None:
client = get_storage_client()
b = client.get_bucket(returned_info['bucket_name'])
blob = b.get_blob(returned_info['blob_name'])
blob.delete()
| agpl-3.0 |
jmetzen/scikit-learn | sklearn/externals/joblib/func_inspect.py | 239 | 11338 | """
My own variation on function-specific inspect-like features.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from itertools import islice
import inspect
import warnings
import re
import os
from ._compat import _basestring
from .logger import pformat
from ._memory_helpers import open_py_source
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
The reason we don't use inspect.getsource is that it caches the
source, whereas we want this to be modified on the fly when the
function is modified.
Returns
-------
func_code: string
The function code
source_file: string
The path to the file in which the function is defined.
first_line: int
The first line of the code in the source file.
Notes
------
This function does a bit more magic than inspect, and is thus
more robust.
"""
source_file = None
try:
code = func.__code__
source_file = code.co_filename
if not os.path.exists(source_file):
# Use inspect for lambda functions and functions defined in an
# interactive shell, or in doctests
source_code = ''.join(inspect.getsourcelines(func)[0])
line_no = 1
if source_file.startswith('<doctest '):
source_file, line_no = re.match(
'\<doctest (.*\.rst)\[(.*)\]\>',
source_file).groups()
line_no = int(line_no)
source_file = '<doctest %s>' % source_file
return source_code, source_file, line_no
# Try to retrieve the source code.
with open_py_source(source_file) as source_file_obj:
first_line = code.co_firstlineno
# All the lines after the function definition:
source_lines = list(islice(source_file_obj, first_line - 1, None))
return ''.join(inspect.getblock(source_lines)), source_file, first_line
except:
# If the source code fails, we use the hash. This is fragile and
# might change from one session to another.
if hasattr(func, '__code__'):
# Python 3.X
return str(func.__code__.__hash__()), source_file, -1
else:
# Weird objects like numpy ufunc don't have __code__
# This is fragile, as quite often the id of the object is
# in the repr, so it might not persist across sessions,
# however it will work for ufuncs.
return repr(func), source_file, -1
def _clean_win_chars(string):
"""Windows cannot encode some characters in filename."""
import urllib
if hasattr(urllib, 'quote'):
quote = urllib.quote
else:
# In Python 3, quote is elsewhere
import urllib.parse
quote = urllib.parse.quote
for char in ('<', '>', '!', ':', '\\'):
string = string.replace(char, quote(char))
return string
def get_func_name(func, resolv_alias=True, win_characters=True):
""" Return the function import path (as a list of module names), and
a name for the function.
Parameters
----------
func: callable
The func to inspect
resolv_alias: boolean, optional
If true, possible local aliases are indicated.
win_characters: boolean, optional
If true, substitute special characters using urllib.quote
This is useful in Windows, as it cannot encode some filenames
"""
if hasattr(func, '__module__'):
module = func.__module__
else:
try:
module = inspect.getmodule(func)
except TypeError:
if hasattr(func, '__class__'):
module = func.__class__.__module__
else:
module = 'unknown'
if module is None:
# Happens in doctests, eg
module = ''
if module == '__main__':
try:
filename = os.path.abspath(inspect.getsourcefile(func))
except:
filename = None
if filename is not None:
# mangling of full path to filename
parts = filename.split(os.sep)
if parts[-1].startswith('<ipython-input'):
# function is defined in an IPython session. The filename
# will change with every new kernel instance. This hack
# always returns the same filename
parts[-1] = '__ipython-input__'
filename = '-'.join(parts)
if filename.endswith('.py'):
filename = filename[:-3]
module = module + '-' + filename
module = module.split('.')
if hasattr(func, 'func_name'):
name = func.func_name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = 'unknown'
# Hack to detect functions not defined at the module-level
if resolv_alias:
# TODO: Maybe add a warning here?
if hasattr(func, 'func_globals') and name in func.func_globals:
if not func.func_globals[name] is func:
name = '%s-alias' % name
if inspect.ismethod(func):
# We need to add the name of the class
if hasattr(func, 'im_class'):
klass = func.im_class
module.append(klass.__name__)
if os.name == 'nt' and win_characters:
# Stupid windows can't encode certain characters in filenames
name = _clean_win_chars(name)
module = [_clean_win_chars(s) for s in module]
return module, name
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
Parameters
----------
func: callable
Function giving the argument specification
ignore_lst: list of strings
List of arguments to ignore (either a name of an argument
in the function spec, or '*', or '**')
*args: list
Positional arguments passed to the function.
**kwargs: dict
Keyword arguments passed to the function
Returns
-------
filtered_args: list
List of filtered positional and keyword arguments.
"""
args = list(args)
if isinstance(ignore_lst, _basestring):
# Catch a common mistake
raise ValueError('ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
arg_spec = inspect.getargspec(func)
# We need to if/them to account for different versions of Python
if hasattr(arg_spec, 'args'):
arg_names = arg_spec.args
arg_defaults = arg_spec.defaults
arg_keywords = arg_spec.keywords
arg_varargs = arg_spec.varargs
else:
arg_names, arg_varargs, arg_keywords, arg_defaults = arg_spec
arg_defaults = arg_defaults or {}
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
# we need to add it back:
args = [func.__self__, ] + args
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
# as on ndarrays.
_, name = get_func_name(func, resolv_alias=False)
arg_dict = dict()
arg_position = -1
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
arg_dict[arg_name] = args[arg_position]
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
arg_dict[arg_name] = kwargs.pop(arg_name)
else:
try:
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError):
# Missing argument
raise ValueError('Wrong number of arguments for %s%s:\n'
' %s(%s, %s) was called.'
% (name,
inspect.formatargspec(*inspect.getargspec(func)),
name,
repr(args)[1:-1],
', '.join('%s=%s' % (k, v)
for k, v in kwargs.items())
)
)
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
elif arg_keywords is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
if arg_keywords is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
arg_dict['*'] = varargs
# Now remove the arguments to be ignored
for item in ignore_lst:
if item in arg_dict:
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
"function %s%s" %
(item, name,
inspect.formatargspec(arg_names,
arg_varargs,
arg_keywords,
arg_defaults,
)))
# XXX: Return a sorted list of pairs?
return arg_dict
def format_signature(func, *args, **kwargs):
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
arg = pformat(arg, indent=2)
if len(arg) > 1500:
arg = '%s...' % arg[:700]
if previous_length > 80:
arg = '\n%s' % arg
previous_length = len(arg)
arg_str.append(arg)
arg_str.extend(['%s=%s' % (v, pformat(i)) for v, i in kwargs.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
def format_call(func, args, kwargs, object_name="Memory"):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = format_signature(func, *args, **kwargs)
msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
path, signature)
return msg
# XXX: Not using logging framework
#self.debug(msg)
| bsd-3-clause |
dana-i2cat/felix | optin_manager/src/python/openflow/optin_manager/opts/forms.py | 4 | 5414 | from django import forms
from openflow.optin_manager.flowspace.utils import dotted_ip_to_int, mac_to_int
from django.forms.util import ErrorList
import re
from openflow.optin_manager.opts.models import AdminFlowSpace
class MACAddressForm(forms.Field):
def clean(self, value):
pattern = re.compile(r"^([0-9a-fA-F]{1,2}[:-]){5}[0-9a-fA-F]{1,2}$")
if (not pattern.match(value)):
raise forms.ValidationError("Not a valid MAC Address")
return value
class UploadFileForm(forms.Form):
file = forms.FileField(label="Add Rules from File:")
class AdminOptInForm(forms.Form):
mac_from_s = MACAddressForm(initial = "00:00:00:00:00:00")
mac_from_e = MACAddressForm(initial = "FF:FF:FF:FF:FF:FF")
mac_to_s = MACAddressForm(initial = "00:00:00:00:00:00")
mac_to_e = MACAddressForm(initial = "FF:FF:FF:FF:FF:FF")
eth_type_s = forms.IntegerField(max_value = 0xFFFF, initial = 0)
eth_type_e = forms.IntegerField(max_value = 0xFFFF, initial = 0xFFFF)
vlan_id_s = forms.IntegerField(max_value = 4095, initial = 0)
vlan_id_e = forms.IntegerField(max_value = 4095, initial = 4095)
ip_from_s = forms.IPAddressField(initial = "0.0.0.0")
ip_from_e = forms.IPAddressField(initial = "255.255.255.255")
ip_to_s = forms.IPAddressField(initial = "0.0.0.0")
ip_to_e = forms.IPAddressField(initial = "255.255.255.255")
ip_proto_s = forms.IntegerField(max_value = 255, initial = 0)
ip_proto_e = forms.IntegerField(max_value = 255, initial = 255)
tp_from_s = forms.IntegerField(max_value = 0xFFFF, initial = 0)
tp_from_e = forms.IntegerField(max_value = 0xFFFF, initial = 0xFFFF)
tp_to_s = forms.IntegerField(max_value = 0xFFFF, initial = 0)
tp_to_e = forms.IntegerField(max_value = 0xFFFF, initial = 0xFFFF)
def clean(self):
if self._errors:
return self.cleaned_data
cleaned_data = self.cleaned_data
self.saved_cleaned_data = cleaned_data
mac_from_s = cleaned_data.get("mac_from_s")
mac_from_e = cleaned_data.get("mac_from_e")
mac_to_s = cleaned_data.get("mac_to_s")
mac_to_e = cleaned_data.get("mac_to_e")
eth_type_s = cleaned_data.get("eth_type_s")
eth_type_e = cleaned_data.get("eth_type_e")
vlan_id_s = cleaned_data.get("vlan_id_s")
vlan_id_e = cleaned_data.get("vlan_id_e")
ip_from_s = cleaned_data.get("ip_from_s")
ip_from_e = cleaned_data.get("ip_from_e")
ip_to_s = cleaned_data.get("ip_to_s")
ip_to_e = cleaned_data.get("ip_to_e")
ip_proto_s = cleaned_data.get("ip_proto_s")
ip_proto_e = cleaned_data.get("ip_proto_e")
tp_from_s = cleaned_data.get("tp_from_s")
tp_from_e = cleaned_data.get("tp_from_e")
tp_to_s = cleaned_data.get("tp_to_s")
tp_to_e = cleaned_data.get("tp_to_e")
if (mac_to_int(mac_from_s) > mac_to_int(mac_from_e)):
raise forms.ValidationError("Empty Source MAC Range")
if (mac_to_int(mac_to_s) > mac_to_int(mac_to_e)):
raise forms.ValidationError("Empty Destination MAC Range")
if (eth_type_s > eth_type_e):
raise forms.ValidationError("Empty Ethernet Type Range")
if (vlan_id_s > vlan_id_e):
raise forms.ValidationError("Empty VLAN Range")
if (dotted_ip_to_int(ip_from_s) > dotted_ip_to_int(ip_from_e)):
raise forms.ValidationError("Empty Source IP Range")
if (dotted_ip_to_int(ip_to_s) > dotted_ip_to_int(ip_to_e)):
raise forms.ValidationError("Empty Destination IP Range")
if (ip_proto_s > ip_proto_e):
raise forms.ValidationError("Empty IP Protocol Range")
if (tp_from_s > tp_from_e):
raise forms.ValidationError("Empty Source Transport Port Range")
if (tp_to_s > tp_to_e):
raise forms.ValidationError("Empty Destination Transport Port Range")
return cleaned_data
def get_flowspace(self,FS_Object):
cleaned_data = self.saved_cleaned_data
if self._errors:
return None
return FS_Object(
mac_src_s = mac_to_int(cleaned_data.get("mac_from_s")),
mac_src_e = mac_to_int(cleaned_data.get("mac_from_e")),
mac_dst_s = mac_to_int(cleaned_data.get("mac_to_s")),
mac_dst_e = mac_to_int(cleaned_data.get("mac_to_e")),
eth_type_s = int(cleaned_data.get("eth_type_s")),
eth_type_e = int(cleaned_data.get("eth_type_e")),
vlan_id_s = int(cleaned_data.get("vlan_id_s")),
vlan_id_e = int(cleaned_data.get("vlan_id_e")),
ip_src_s = dotted_ip_to_int(cleaned_data.get("ip_from_s")),
ip_src_e = dotted_ip_to_int(cleaned_data.get("ip_from_e")),
ip_dst_s = dotted_ip_to_int(cleaned_data.get("ip_to_s")),
ip_dst_e = dotted_ip_to_int(cleaned_data.get("ip_to_e")),
ip_proto_s = int(cleaned_data.get("ip_proto_s")),
ip_proto_e = int(cleaned_data.get("ip_proto_e")),
tp_src_s = int(cleaned_data.get("tp_from_s")),
tp_src_e = int(cleaned_data.get("tp_from_e")),
tp_dst_s = int(cleaned_data.get("tp_to_s")),
tp_dst_e = int(cleaned_data.get("tp_to_e")),
)
| apache-2.0 |
pfarnach/altvote | migrations/versions/4517f9cbe1f9_.py | 1 | 1246 | """empty message
Revision ID: 4517f9cbe1f9
Revises: 42ef4a834d23
Create Date: 2016-04-13 22:34:37.727821
"""
# revision identifiers, used by Alembic.
revision = '4517f9cbe1f9'
down_revision = '42ef4a834d23'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'ballot_option_ballot_id_fkey', 'ballot_option', type_='foreignkey')
op.create_foreign_key(None, 'ballot_option', 'ballot', ['ballot_id'], ['id'], ondelete='cascade')
op.drop_constraint(u'ballot_vote_ballot_option_fkey', 'ballot_vote', type_='foreignkey')
op.create_foreign_key(None, 'ballot_vote', 'ballot_option', ['ballot_option'], ['id'], ondelete='cascade')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'ballot_vote', type_='foreignkey')
op.create_foreign_key(u'ballot_vote_ballot_option_fkey', 'ballot_vote', 'ballot_option', ['ballot_option'], ['id'])
op.drop_constraint(None, 'ballot_option', type_='foreignkey')
op.create_foreign_key(u'ballot_option_ballot_id_fkey', 'ballot_option', 'ballot', ['ballot_id'], ['id'])
### end Alembic commands ###
| gpl-3.0 |
TomBaxter/waterbutler | waterbutler/providers/figshare/provider.py | 2 | 42750 | import json
import asyncio
import hashlib
from http import HTTPStatus
import aiohttp
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.providers.figshare.path import FigsharePath
from waterbutler.providers.figshare import metadata, settings
class FigshareProvider:
"""Provider for Figshare repositories.
**On paths:** Figshare does not have any notion of paths and has a very flat structure. Top
level objects are one of the following.
A 'project' that contains 'articles'. The project can be either public or private.
A 'collection' that points to private and/or public 'articles' and can itself be either public
or private.
An 'article' that contains 0 or more 'files' and may or may not be associated with a project.
Articles may be either public or private.
'Articles' may contain 'files'.
'Articles' are one of (currently) ten types. All but one of these 'defined_types' may contain no
more than one file. The exception is the 'fileset' 'defined_type' which may contain more than
one 'file'.
The FigshareProvider allows for the possibility of a private 'article', a private 'project', or
a private 'collection' as the root of a waterbutler provider instance. The FigshareProvider's
default configuration treats 'articles' with a 'defined_type' of 'fileset' as a folder, and all
other 'defined_type's as a file.
In practice, this means that when returning the metadata for the root(/) folder, only 'articles'
of 'defined_type' 'fileset' will be returned as a folder. All other 'articles' will be returned
as a file if they contain a file and ignored if they do not contain a file.
If the root is configured as a provider, it will contain 0 or more files.
Valid FigsharePaths for root project/collection::
/
/<article_id for type fileset>/ (default configuration)
/<article_id of any type>/<file_id>
Valid FigsharePaths for root article::
/
/<file_id>
Invalid FigsharePaths for root project/collection examples::
/<article_id of any type>
/<article_id of any type other then fileset>/ (default configuration)
/<article_id of any type>/<file_id>/
path of any depth greater then 2
Invalid FigsharePaths for root article examples::
/<any_id>/
/<any_id other then a file_id>
path of any depth greater then 1
API docs: https://docs.figshare.com/
"""
def __new__(cls, auth, credentials, settings):
if settings['container_type'] == 'project':
return FigshareProjectProvider(
auth, credentials,
dict(settings, container_id=settings['container_id'])
)
if settings['container_type'] in ('article', 'fileset'):
return FigshareArticleProvider(
auth, credentials, dict(settings, container_id=settings['container_id'])
)
raise exceptions.ProviderError(
'Invalid "container_type" {0}'.format(settings['container_type'])
)
class BaseFigshareProvider(provider.BaseProvider):
NAME = 'figshare'
BASE_URL = settings.BASE_URL
VIEW_URL = settings.VIEW_URL
DOWNLOAD_URL = settings.DOWNLOAD_URL
VALID_CONTAINER_TYPES = settings.VALID_CONTAINER_TYPES
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.token = self.credentials['token']
self.container_type = self.settings['container_type']
if self.container_type not in self.VALID_CONTAINER_TYPES:
raise exceptions.ProviderError('{} is not a valid container type.'.format(self.container_type))
if self.container_type == 'fileset':
self.container_type = 'article'
self.container_id = self.settings['container_id']
self.metrics.add('container', {
'given_type': self.settings['container_type'],
'actual_type': self.container_type,
})
@property
def root_path_parts(self):
return (self.container_type + 's', self.container_id)
@property
def default_headers(self):
return {
'Authorization': 'token {}'.format(self.token),
}
def build_url(self, is_public: bool, *segments, **query) -> str: # type: ignore
"""A nice wrapper around furl, builds urls based on self.BASE_URL
:param bool is_public: ``True`` if addressing public resource
:param tuple \*segments: A tuple of strings joined into ``/foo/bar/``
:param dict \*\*query: A dictionary that will be turned into query parameters ``?foo=bar``
:rtype: str
Subclassed to include handling of ``is_public`` argument. ``collection`` containers may
contain public articles which are accessed through an URN with a different prefix.
"""
if not is_public:
segments = ('account', (*segments))
return (super().build_url(*segments, **query))
async def make_request(self, method, url, *args, **kwargs):
"""JSONifies ``data`` kwarg, if present and a ``dict``.
:param str method: HTTP method
:param str url: URL
:param tuple \*args:
:param dict \*\*kwargs:
"""
if isinstance(kwargs.get('data'), dict):
kwargs['data'] = json.dumps(kwargs['data'])
return await super().make_request(method, url, *args, **kwargs)
def can_duplicate_names(self):
"""Figshare allows articles to have duplicate titles and files to have duplicate names, but
does not allow the creation of duplicate files and folders.
"""
return False
async def _get_url_super(self, url):
# Use super to avoid is_public logic
# Allows for taking advantage of asyncio.gather
response = await super().make_request('GET', url, expects=(200, ))
return await response.json()
def _path_split(self, path):
"""Strip trailing slash from path string, then split on remaining slashes.
:param str path: url path string to be split.
"""
return path.rstrip('/').split('/')
async def download(self, path, **kwargs):
"""Download the file identified by ``path`` from this project.
:param FigsharePath path: FigsharePath to file you want to download
:rtype ResponseStreamReader:
"""
if not path.is_file:
raise exceptions.NotFoundError(str(path))
file_metadata = await self.metadata(path)
download_url = file_metadata.extra['downloadUrl']
if download_url is None:
raise exceptions.DownloadError('Download not available', code=HTTPStatus.FORBIDDEN)
params = {} if file_metadata.is_public else {'token': self.token}
resp = await aiohttp.request('GET', download_url, params=params)
if resp.status == 404:
await resp.release()
raise exceptions.DownloadError('Download not available', code=HTTPStatus.FORBIDDEN)
return streams.ResponseStreamReader(resp)
def path_from_metadata(self, parent_path, metadata):
"""Build FigsharePath for child entity given child's metadata and parent's path object.
:param FigsharePath parent_path: path obj for child's parent
:param metadata: Figshare*Metadata object for child
"""
return parent_path.child(metadata.name, _id=str(metadata.id),
folder=(metadata.kind == 'folder'))
async def revisions(self, path, **kwargs):
# Public articles have revisions, but projects, collections, and private articles do not.
# For now, return a single Revision labeled "latest".
return [metadata.FigshareFileRevisionMetadata()]
async def _upload_file(self, article_id, name, stream):
"""Uploads a file to Figshare and returns the file id.
:param str article_id: the id of the parent article
:param str name: the name of the file
:param stream: the file stream to upload
:rtype: `str`
:return: id of new file
"""
# Process for creating a file:
# 1. Get file ID
file_id = await self._make_file_placeholder(article_id, name, stream.size)
# 2. Get upload url and file parts info
# added sleep() as file was not availble right away after getting 201 back.
# polling with HEADs is another possible solution
await asyncio.sleep(settings.FILE_CREATE_WAIT)
upload_url, parts = await self._get_file_upload_url(article_id, file_id)
# 3. Upload parts
self.metrics.add('upload.parts.count', len(parts))
await self._upload_file_parts(stream, upload_url, parts)
# 4. Mark upload complete
await self._mark_upload_complete(article_id, file_id)
return file_id
async def _make_file_placeholder(self, article_id, name, size):
"""Create a placeholder for a file to be uploaded later. Takes the id of the parent
article, a name for the file, and the size. Returns the id set aside for the file.
:param str article_id: the id of the parent article
:param str name: the name of the file
:param int size: the size of the file
:returns str: the id of the file placeholder
"""
file_resp = await self.make_request(
'POST',
self.build_url(False, 'articles', article_id, 'files'),
data=json.dumps({'name': name, 'size': size}),
expects=(201, ),
)
file_json = await file_resp.json()
return file_json['location'].rsplit('/', 1)[1]
async def _get_file_upload_url(self, article_id, file_id):
"""Request an upload url and partitioning spec from Figshare.
See: https://docs.figshare.com/api/file_uploader/
:param str article_id: the id of the parent article
:param str file_id: the name of the file
:returns (str, list): the upload url and the parts specification
"""
# TODO: retry with backoff
resp = await self.make_request(
'GET',
self.build_url(False, 'articles', article_id, 'files', file_id),
expects=(200, 404),
)
if resp.status == 404:
await resp.release()
raise exceptions.ProviderError(
'Could not get upload_url. File creation may have taken more '
'than {} seconds to finish.'.format(str(settings.FILE_CREATE_WAIT)))
upload_json = await resp.json()
upload_url = upload_json['upload_url']
parts_resp = await self.make_request('GET', upload_url, expects=(200, ),)
parts_json = await parts_resp.json()
return upload_url, parts_json['parts'] # str, list
async def _upload_file_parts(self, stream, upload_url, parts):
"""Takes a stream, the upload url, and a list of parts to upload, and send the chunks
dictated by ``parts`` to figshare.
See: https://docs.figshare.com/api/file_uploader/
:param stream: the file stream to upload
:param str upload_url: the base url to upload to
:param list parts: a structure describing the expected partitioning of the file
"""
for part in parts:
size = part['endOffset'] - part['startOffset'] + 1
part_number = part['partNo']
upload_response = await self.make_request(
'PUT',
upload_url + '/' + str(part_number),
data=stream.read(size),
expects=(200, ),
)
await upload_response.release()
async def _mark_upload_complete(self, article_id, file_id):
"""Signal to Figshare that all of the parts of the file have been uploaded successfully.
See: https://docs.figshare.com/api/file_uploader/
:param str article_id: the id of the parent article
:param str file_id: the name of the file
"""
resp = await self.make_request(
'POST',
self.build_url(False, 'articles', article_id, 'files', file_id),
expects=(202, ),
)
await resp.release()
class FigshareProjectProvider(BaseFigshareProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def validate_v1_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this project.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise throws a 404 Not Found. Will also assert that the entity type inferred from the
path matches the type of the entity at that url.
:param str path: entity path from the v1 API
:rtype FigsharePath:
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) not in (2, 3):
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
article_id = path_parts[1]
file_id = path_parts[2] if len(path_parts) == 3 else None
articles = await self._get_all_articles()
# TODO: need better way to get public/private
# This call's return value is currently busted at figshare for collections. Figshare always
# returns private-looking urls.
is_public = False
for item in articles:
if '/articles/' + article_id in item['url']:
article_name = item['title']
if settings.PRIVATE_IDENTIFIER not in item['url']:
is_public = True
article_segments = (*self.root_path_parts, 'articles', article_id)
if file_id:
file_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments, 'files', file_id),
expects=(200, ),
)
file_json = await file_response.json()
file_name = file_json['name']
if path[-1] == '/':
raise exceptions.NotFoundError('File paths must not end with "/". '
'{} not found.'.format(path))
return FigsharePath('/' + article_name + '/' + file_name,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=is_public)
article_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments),
expects=(200, ),
)
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
if not path[-1] == '/':
raise exceptions.NotFoundError('Folder paths must end with "/". {} not found.'.format(path))
return FigsharePath('/' + article_name + '/', _ids=(self.container_id, article_id),
folder=True, is_public=is_public)
raise exceptions.NotFoundError('This article is not configured as a folder defined_type. '
'{} not found.'.format(path))
async def validate_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this project.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise returns a FigsharePath with empty identifiers.
:param str path: identifier_path URN as passed through the v0 API
:rtype FigsharePath:
Quirks:
* v0 may pass an identifier_path whose last part is a name and not an identifier, in the
case of file/folder creation calls.
* validate_path validates parent and returns a FigsharePath as accurately as possible.
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) not in (2, 3):
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
article_id = path_parts[1]
file_id = path_parts[2] if len(path_parts) == 3 else None
articles = await self._get_all_articles()
# TODO: need better way to get public/private
# This call's return value is currently busted at figshare for collections. Figshare always
# returns private-looking urls.
is_public = False
for item in articles:
if '/articles/' + article_id in item['url']:
article_name = item['title']
if settings.PRIVATE_IDENTIFIER not in item['url']:
is_public = True
article_segments = (*self.root_path_parts, 'articles', article_id)
if file_id:
file_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments, 'files', file_id),
expects=(200, 404, ),
)
if file_response.status == 200:
file_response_json = await file_response.json()
file_name = file_response_json['name']
return FigsharePath('/' + article_name + '/' + file_name,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=is_public)
await file_response.release()
article_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments),
expects=(200, 404, ),
)
if article_response.status == 200:
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
# Case of v0 file creation
if file_id:
ids = ('', article_id, '')
folder = False
path_urn = '/' + article_name + '/' + file_id
else:
ids = ('', article_id)
folder = True
path_urn = '/' + article_name + '/'
return FigsharePath(path_urn, _ids=ids, folder=folder, is_public=is_public)
else:
await article_response.release()
if file_id:
# Catch for if neither file nor article exist
raise exceptions.NotFoundError(path)
# Return for v0 folder creation
return FigsharePath(path, _ids=('', ''), folder=True, is_public=False)
async def revalidate_path(self, parent_path, child_name, folder):
"""Look for file or folder named ``child_name`` under ``parent_path``. If it finds a match,
it returns a FigsharePath object with the appropriate ids set. Otherwise, it returns a
FigsharePath where the ids are set to ``None``.
Due to the fact that figshare allows duplicate titles/names for
articles/files, revalidate_path can not be relied on to always return
the correct id of an existing child_name. It will return the first id that
matches the folder and child_name arguments or '' if no match.
:param FigsharePath parent_path: Path of parent
:param str child_name: Name of child
:param bool folder: ``True`` if child is folder
:rtype: ``FigsharePath``
:return: a FigsharePath object, with ids set if a match was found
"""
parent_is_folder = False
urn_parts = (*self.root_path_parts, 'articles')
child_id = None
if not parent_path.is_root: # parent is fileset or article
if not folder: # child is article/file
list_children_response = await self.make_request(
'GET',
self.build_url(False, *urn_parts, parent_path.identifier),
expects=(200, ),
)
article_json = await list_children_response.json()
for file in article_json['files']:
if file['name'] == child_name:
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
# parent is root
children = await self._get_all_articles()
articles = await asyncio.gather(*[
self._get_url_super(article_json['url'])
for article_json in children
])
for article in articles:
is_folder = article['defined_type'] in settings.FOLDER_TYPES
article_id = str(article['id'])
article_name = str(article['title'])
if folder != is_folder:
continue
elif folder:
if article_name == child_name:
child_id = article_id
break
else:
parent_is_folder = False
for file in article['files']:
if file['name'] == child_name:
parent_path = parent_path.child(article_name, _id=article_id, folder=False)
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Upload a file to provider root or to an article whose defined_type is
configured to represent a folder.
:param asyncio.StreamReader stream: stream to upload
:param FigsharePath path: FigsharePath to upload the file to.
:param dict \*\*kwargs: Will be passed to returned metadata object
"""
if path.identifier and conflict == 'replace':
raise exceptions.UnsupportedOperationError('Files in Figshare cannot be updated')
path, exists = await self.handle_name_conflict(path, conflict=conflict)
if not path.parent.is_root:
parent_resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parent.identifier),
expects=(200, ),
)
parent_json = await parent_resp.json()
if not parent_json['defined_type'] in settings.FOLDER_TYPES:
del path._parts[1]
# Create article or retrieve article_id from existing article
if not path.parent.is_root:
article_id = path.parent.identifier
else:
article_name = json.dumps({'title': path.name})
if self.container_type == 'project':
article_id = await self._create_article(article_name)
elif self.container_type == 'collection':
# TODO don't think this is correct. Probably should POST to /accounts/articles
article_id = await self._create_article(article_name)
article_list = json.dumps({'articles': [article_id]})
await self.make_request(
'POST',
self.build_url(False, *self.root_path_parts, 'articles'),
data=article_list,
expects=(201, ),
)
stream.add_writer('md5', streams.HashStreamWriter(hashlib.md5))
file_id = await self._upload_file(article_id, path.name, stream)
# Build new file path and return metadata
path = FigsharePath('/' + article_id + '/' + file_id,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=False)
metadata = await self.metadata(path, **kwargs)
if stream.writers['md5'].hexdigest != metadata.extra['hashes']['md5']:
raise exceptions.UploadChecksumMismatchError()
return metadata, True
async def create_folder(self, path, **kwargs):
"""Create a folder at ``path``. Returns a `FigshareFolderMetadata` object if successful.
:param FigsharePath path: FigsharePath representing the folder to create
:rtype: :class:`waterbutler.core.metadata.FigshareFolderMetadata`
:raises: :class:`waterbutler.core.exceptions.CreateFolderError`
"""
if (len(path.parts) == 2) and path.is_folder:
article_name = path.parts[-1].value
else:
raise exceptions.CreateFolderError(
'Only projects and collections may contain folders. Unable to create '
'"{}/"'.format(path.name),
code=400,
)
article_data = json.dumps({'title': article_name, 'defined_type': 'fileset'})
article_id = await self._create_article(article_data)
get_article_response = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', article_id),
expects=(200, ),
throws=exceptions.CreateFolderError,
)
article_json = await get_article_response.json()
return metadata.FigshareFolderMetadata(article_json)
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete the entity at ``path``.
:param FigsharePath path: Path to be deleted
:param int confirm_delete: Must be 1 to confirm root folder delete
:rtype: None
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
Quirks:
* If the FigsharePath given is for the provider root path, then the contents of the
provider root path will be deleted, but not the provider root itself.
"""
if not path.identifier:
raise exceptions.NotFoundError(str(path))
if path.is_root:
if confirm_delete == 1:
return await self._delete_container_contents()
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
if len(path.parts) == 2:
if not path.is_folder:
raise exceptions.NotFoundError(str(path))
delete_path = (*self.root_path_parts, 'articles', path.parts[1]._id)
elif len(path.parts) == 3:
if path.is_folder:
raise exceptions.NotFoundError(str(path))
article_response = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parts[1]._id),
expects=(200, ),
)
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
delete_path = ('articles', path.parts[1]._id, 'files', path.parts[2]._id)
else:
delete_path = (*self.root_path_parts, 'articles', path.parts[1]._id)
delete_article_response = await self.make_request(
'DELETE',
self.build_url(False, *delete_path),
expects=(204, ),
)
await delete_article_response.release()
async def metadata(self, path, **kwargs):
"""Return metadata for entity identified by ``path`` under the parent project.
:param FigsharePath path: entity whose metadata will be returned
:rtype: FigshareFileMetadata obj or list of Metadata objs
"""
if path.is_root:
path.is_public = False
contents = await asyncio.gather(*[
# TODO: collections may need to use each['url'] for correct URN
# Use _get_url_super ? figshare API needs to get fixed first.
self._get_article_metadata(str(each['id']), path.is_public)
for each in await self._get_all_articles()
])
return [each for each in contents if each]
if not path.parts[-1].identifier:
raise exceptions.NotFoundError(str(path))
if len(path.parts) > 3:
raise exceptions.NotFoundError(str(path))
article_response = await self.make_request(
'GET',
self.build_url(path.is_public, *self.root_path_parts,
'articles', path.parts[1].identifier),
expects=(200, 404),
)
if article_response.status == 404:
raise exceptions.NotFoundError(str(path))
article_json = await article_response.json()
if len(path.parts) == 2:
if article_json['defined_type'] not in settings.FOLDER_TYPES:
raise exceptions.NotFoundError(str(path))
contents = []
for file in article_json['files']:
contents.append(metadata.FigshareFileMetadata(article_json, raw_file=file))
return contents
elif len(path.parts) == 3:
for file in article_json['files']:
if file['id'] == int(path.parts[2].identifier):
return metadata.FigshareFileMetadata(article_json, raw_file=file)
raise exceptions.NotFoundError(path.path)
else:
raise exceptions.NotFoundError('{} is not a valid path.'.format(path))
async def _get_article_metadata(self, article_id, is_public: bool):
"""Return Figshare*Metadata object for given article_id. Returns a FolderMetadata object
for filesets, a FileMetadat object for other article types, and ``None`` if the article
is not a fileset and has no files attached.
Defined separately to allow for taking advantage of ``asyncio.gather``.
:param str article_id: id of article whose metadata is requested
:param bool is_public: ``True`` if article is accessed through public URN
"""
response = await self.make_request(
'GET',
self.build_url(is_public, *self.root_path_parts, 'articles', article_id),
expects=(200, ),
)
article_json = await response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
return metadata.FigshareFolderMetadata(article_json)
elif article_json['files']:
return metadata.FigshareFileMetadata(article_json)
return None # article without attached file
async def _delete_container_contents(self):
"""Delete all articles within this Project or Collection."""
# TODO: Needs logic for skipping public articles in collections
articles = await self._get_all_articles()
for article in articles:
delete_article_response = await self.make_request(
'DELETE',
self.build_url(False, *self.root_path_parts, 'articles', str(article['id'])),
expects=(204, ),
)
await delete_article_response.release()
async def _get_all_articles(self):
"""Get all articles under a project or collection. This endpoint is paginated and does not
provide limit metadata, so we keep querying until we receive an empty array response.
See https://docs.figshare.com/api/#searching-filtering-and-pagination for details.
:return: list of article json objects
:rtype: `list`
"""
all_articles, keep_going, page = [], True, 1
while keep_going:
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles'),
params={'page': str(page), 'page_size': str(settings.MAX_PAGE_SIZE)},
expects=(200, ),
)
articles = await resp.json()
all_articles.extend(articles)
page += 1
keep_going = len(articles) > 0
return all_articles
async def _create_article(self, data):
"""Create an article placeholder with the properties given in ``data``. Returns the id of
the new article. See https://docs.figshare.com/api/articles/#create-a-new-article for
valid properties.
:param dict data: properties to set for new article
:return: the id of the newly created article
:rtype: `str`
"""
resp = await self.make_request(
'POST',
self.build_url(False, *self.root_path_parts, 'articles'),
data=data,
expects=(201, ),
throws=exceptions.CreateFolderError,
)
articles_json = await resp.json()
article_id = articles_json['location'].rsplit('/', 1)[1]
return article_id
class FigshareArticleProvider(BaseFigshareProvider):
def __init__(self, auth, credentials, settings, child=False):
super().__init__(auth, credentials, settings)
async def validate_v1_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this article.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise throws a 404 Not Found. Will also assert that the entity type inferred from the
path matches the type of the entity at that url.
:param str path: entity path from the v1 API
:rtype FigsharePath:
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) != 2:
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
file_id = path_parts[1]
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(200, ),
)
file_json = await resp.json()
return FigsharePath('/' + file_json['name'], _ids=('', file_id), folder=False,
is_public=False)
async def validate_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this article.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise returns a FigsharePath with empty identifiers.
:param str path: identifier path URN as passed through the v0 API
:rtype FigsharePath:
Quirks:
* v0 may pass an identifier_path whose last part is a name and not an identifier, in the
case of file/folder creation calls.
* validate_path validates parent and returns a FigsharePath as accurately as possible.
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) != 2:
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
file_id = path_parts[1]
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(200, 404, ),
)
if resp.status == 200:
file_json = await resp.json()
file_name = file_json['name']
return FigsharePath('/' + file_name, _ids=('', file_id), folder=False, is_public=False)
# catch for create file in article root
await resp.release()
return FigsharePath('/' + file_id, _ids=('', ''), folder=False, is_public=False)
async def revalidate_path(self, parent_path, child_name, folder: bool=False):
"""Attempt to get child's id and return FigsharePath of child.
``revalidate_path`` is used to check for the existance of a child_name/folder
within the parent. Returning a FigsharePath of child. Child will have _id
if conflicting child_name/folder exists otherwise _id will be ''.
:param FigsharePath parent_path: Path of parent
:param str child_name: Name of child
:param bool folder: ``True`` if child is folder
Code notes:
Due to the fact that figshare allows duplicate titles/names for
articles/files, revalidate_path can not be relied on to always return
the correct id of an existing child_name. will return the first id that
matches the folder and child_name arguments or '' if no match.
"""
parent_is_folder = False
urn_parts = self.root_path_parts
if not parent_path.is_root:
if folder:
raise exceptions.NotFoundError(
'{} is not a valid parent path of folder={}. Folders can only exist at the '
'root level.'.format(parent_path.identifier_path, str(folder)))
else:
urn_parts = (*urn_parts, (parent_path.identifier))
list_children_response = await self.make_request(
'GET',
self.build_url(False, *urn_parts),
expects=(200, ),
)
child_id = ''
article_json = await list_children_response.json()
for file in article_json['files']:
if file['name'] == child_name:
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Upload a file to provider root or to an article whose defined_type is
configured to represent a folder.
:param asyncio.StreamReader stream: stream to upload
:param FigsharePath path: FigsharePath to upload the file to.
:param dict \*\*kwargs: Will be passed to returned metadata object
"""
path, exists = await self.handle_name_conflict(path, conflict=conflict)
if not path.parent.is_root:
parent_resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parent.identifier),
expects=(200, ),
)
parent_json = await parent_resp.json()
if not parent_json['defined_type'] in settings.FOLDER_TYPES:
del path._parts[1]
stream.add_writer('md5', streams.HashStreamWriter(hashlib.md5))
file_id = await self._upload_file(self.container_id, path.name, stream)
# Build new file path and return metadata
path = FigsharePath('/' + file_id, _ids=('', file_id), folder=False, is_public=False)
metadata = await self.metadata(path, **kwargs)
if stream.writers['md5'].hexdigest != metadata.extra['hashes']['md5']:
raise exceptions.UploadChecksumMismatchError()
return metadata, True
async def create_folder(self, path, **kwargs):
raise exceptions.CreateFolderError('Cannot create folders within articles.', code=400)
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete the file at ``path``. If ``path`` is ``/`` and ``confirm_delete`` is ``1``, then
delete all of the files within the article, but not the article itself.
:param FigsharePath path: Path to be deleted
:param int confirm_delete: Must be 1 to confirm root folder delete
:rtype: None
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
Quirks:
* If the FigsharePath given is for the provider root path, then the contents of the
provider root path will be deleted, but not the provider root itself.
"""
if path.is_root:
if confirm_delete == 1:
return await self._delete_container_contents()
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
await self._delete_file(path.parts[-1]._id)
async def metadata(self, path, **kwargs):
"""Return metadata for entity identified by ``path``. May be the containing article or
a file in a fileset article.
:param FigsharePath path: entity whose metadata will be returned
:rtype FigshareFileMetadata obj or list of Metadata objs:
"""
article = await self._get_article(not path.is_public)
if path.is_root: # list files in article
contents = []
for file in article['files']:
contents.append(metadata.FigshareFileMetadata(article, raw_file=file))
return contents
elif len(path.parts) == 2: # metadata for a particular file
for file in article['files']:
if str(file['id']) == path.parts[1].identifier:
return metadata.FigshareFileMetadata(article, raw_file=file)
# Invalid path, e.g. /422313/67709/1234
raise exceptions.NotFoundError(str(path))
async def _delete_container_contents(self):
"""Delete files within the containing article."""
article = await self._get_article()
for file in article['files']:
await self._delete_file(str(file['id']))
async def _get_article(self, is_owned=True):
"""Get the metadata for the container article. If the article is a public article not owned
by the credentialed user, the request must be sent to a different endpoint.
:param bool is_owned: Is this article owned by the credentialed user? Default: ``True``
"""
resp = await self.make_request(
'GET',
self.build_url(not is_owned, *self.root_path_parts),
expects=(200, ),
)
return await resp.json()
async def _delete_file(self, file_id):
"""Delete a file from the root article. Docs:
https://docs.figshare.com/api/articles/#delete-file-from-article
:param str file: the id of the file to delete
"""
resp = await self.make_request(
'DELETE',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(204, ),
)
await resp.release()
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-relay/azure/mgmt/relay/models/relay_management_client_enums.py | 2 | 1280 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class Relaytype(Enum):
net_tcp = "NetTcp"
http = "Http"
class SkuTier(Enum):
standard = "Standard"
class ProvisioningStateEnum(Enum):
created = "Created"
succeeded = "Succeeded"
deleted = "Deleted"
failed = "Failed"
updating = "Updating"
unknown = "Unknown"
class AccessRights(Enum):
manage = "Manage"
send = "Send"
listen = "Listen"
class KeyType(Enum):
primary_key = "PrimaryKey"
secondary_key = "SecondaryKey"
class UnavailableReason(Enum):
none = "None"
invalid_name = "InvalidName"
subscription_is_disabled = "SubscriptionIsDisabled"
name_in_use = "NameInUse"
name_in_lockdown = "NameInLockdown"
too_many_namespace_in_current_subscription = "TooManyNamespaceInCurrentSubscription"
| mit |
vicky2135/lucious | oscar/lib/python2.7/site-packages/django/utils/feedgenerator.py | 42 | 17789 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import warnings
from django.utils import datetime_safe, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + '-0000'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + 'Z'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None,
enclosures=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosures, which is an iterable of instances of the
Enclosure class.
"""
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
if enclosure is None:
enclosures = [] if enclosures is None else enclosures
else:
warnings.warn(
"The enclosure keyword argument is deprecated, "
"use enclosures instead.",
RemovedInDjango20Warning,
stacklevel=2,
)
enclosures = [enclosure]
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosures': enclosures,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of RssFeed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(
"dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"}
)
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosures']:
enclosures = list(item['enclosures'])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement('enclosure', '', {
'url': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: https://tools.ietf.org/html/rfc4287
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosures.
for enclosure in item['enclosures']:
handler.addQuickElement('link', '', {
'rel': 'enclosure',
'href': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of Atom1Feed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause |
cjh1/StarCluster | starcluster/commands/completers.py | 19 | 4704 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
from starcluster import completion
from starcluster.logger import log
from base import CmdBase
class Completer(CmdBase):
"""
Base class for all completer classes
"""
@property
def completer(self):
return self._completer()
class ClusterCompleter(Completer):
"""
Returns a list of all cluster names as completion options
"""
def _completer(self):
try:
cm = self.cm
clusters = cm.get_cluster_security_groups()
completion_list = [cm.get_tag_from_sg(sg.name)
for sg in clusters]
return completion.ListCompleter(completion_list)
except Exception, e:
log.error('something went wrong fix me: %s' % e)
class NodeCompleter(Completer):
"""
Returns a list of all node names as completion options
"""
def _completer(self):
try:
cm = self.cm
clusters = cm.get_cluster_security_groups()
compl_list = [cm.get_tag_from_sg(sg.name) for sg in clusters]
max_num_nodes = 0
for scluster in clusters:
num_instances = len(scluster.instances())
if num_instances > max_num_nodes:
max_num_nodes = num_instances
compl_list.extend(['master'])
compl_list.extend([str(i) for i in range(0, num_instances)])
compl_list.extend(["node%03d" % i
for i in range(1, num_instances)])
return completion.ListCompleter(compl_list)
except Exception, e:
print e
log.error('something went wrong fix me: %s' % e)
class ImageCompleter(Completer):
"""
Returns a list of all registered image ids as completion options
"""
def _completer(self):
try:
rimages = self.ec2.registered_images
completion_list = [i.id for i in rimages]
return completion.ListCompleter(completion_list)
except Exception, e:
log.error('something went wrong fix me: %s' % e)
class EBSImageCompleter(Completer):
"""
Returns a list of all registered EBS image ids as completion options
"""
def _completer(self):
try:
rimages = self.ec2.registered_images
completion_list = [i.id for i in rimages if
i.root_device_type == "ebs"]
return completion.ListCompleter(completion_list)
except Exception, e:
log.error('something went wrong fix me: %s' % e)
class S3ImageCompleter(Completer):
"""
Returns a list of all registered S3 image ids as completion options
"""
def _completer(self):
try:
rimages = self.ec2.registered_images
completion_list = [i.id for i in rimages if
i.root_device_type == "instance-store"]
return completion.ListCompleter(completion_list)
except Exception, e:
log.error('something went wrong fix me: %s' % e)
class InstanceCompleter(Completer):
"""
Returns a list of all instance ids as completion options
"""
show_dns_names = False
def _completer(self):
try:
instances = self.ec2.get_all_instances()
completion_list = [i.id for i in instances]
if self.show_dns_names:
completion_list.extend([i.dns_name for i in instances])
return completion.ListCompleter(completion_list)
except Exception, e:
log.error('something went wrong fix me: %s' % e)
class VolumeCompleter(Completer):
"""
Returns a list of all volume ids as completion options
"""
def _completer(self):
try:
completion_list = [v.id for v in self.ec2.get_volumes()]
return completion.ListCompleter(completion_list)
except Exception, e:
log.error('something went wrong fix me: %s' % e)
| gpl-3.0 |
jdramani/servo | tests/wpt/css-tests/tools/manifest/item.py | 148 | 5139 | import urlparse
from abc import ABCMeta, abstractmethod, abstractproperty
item_types = ["testharness", "reftest", "manual", "stub", "wdspec"]
def get_source_file(source_files, tests_root, manifest, path):
def make_new():
from sourcefile import SourceFile
return SourceFile(tests_root, path, manifest.url_base)
if source_files is None:
return make_new()
if path not in source_files:
source_files[path] = make_new()
return source_files[path]
class ManifestItem(object):
__metaclass__ = ABCMeta
item_type = None
def __init__(self, source_file, manifest=None):
self.manifest = manifest
self.source_file = source_file
@abstractproperty
def id(self):
"""The test's id (usually its url)"""
pass
@property
def path(self):
"""The test path relative to the test_root"""
return self.source_file.rel_path
@property
def https(self):
return "https" in self.source_file.meta_flags
def key(self):
"""A unique identifier for the test"""
return (self.item_type, self.id)
def meta_key(self):
"""Extra metadata that doesn't form part of the test identity, but for
which changes mean regenerating the manifest (e.g. the test timeout."""
return ()
def __eq__(self, other):
if not hasattr(other, "key"):
return False
return self.key() == other.key()
def __hash__(self):
return hash(self.key() + self.meta_key())
def to_json(self):
return {"path": self.path}
@classmethod
def from_json(self, manifest, tests_root, obj, source_files=None):
raise NotImplementedError
class URLManifestItem(ManifestItem):
def __init__(self, source_file, url, url_base="/", manifest=None):
ManifestItem.__init__(self, source_file, manifest=manifest)
self._url = url
self.url_base = url_base
@property
def id(self):
return self.url
@property
def url(self):
return urlparse.urljoin(self.url_base, self._url)
def to_json(self):
rv = ManifestItem.to_json(self)
rv["url"] = self._url
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, obj["path"])
return cls(source_file,
obj["url"],
url_base=manifest.url_base,
manifest=manifest)
class TestharnessTest(URLManifestItem):
item_type = "testharness"
def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
self.timeout = timeout
def meta_key(self):
return (self.timeout,)
def to_json(self):
rv = URLManifestItem.to_json(self)
if self.timeout is not None:
rv["timeout"] = self.timeout
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, obj["path"])
return cls(source_file,
obj["url"],
url_base=manifest.url_base,
timeout=obj.get("timeout"),
manifest=manifest)
class RefTest(URLManifestItem):
item_type = "reftest"
def __init__(self, source_file, url, references, url_base="/", timeout=None,
manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
for _, ref_type in references:
if ref_type not in ["==", "!="]:
raise ValueError, "Unrecognised ref_type %s" % ref_type
self.references = tuple(references)
self.timeout = timeout
@property
def is_reference(self):
return self.source_file.name_is_reference
def meta_key(self):
return (self.timeout,)
def to_json(self):
rv = URLManifestItem.to_json(self)
rv["references"] = self.references
if self.timeout is not None:
rv["timeout"] = self.timeout
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, obj["path"])
return cls(source_file,
obj["url"],
obj["references"],
url_base=manifest.url_base,
timeout=obj.get("timeout"),
manifest=manifest)
class ManualTest(URLManifestItem):
item_type = "manual"
class Stub(URLManifestItem):
item_type = "stub"
class WebdriverSpecTest(ManifestItem):
item_type = "wdspec"
@property
def id(self):
return self.path
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, obj["path"])
return cls(source_file, manifest=manifest)
| mpl-2.0 |
haoxli/web-testing-service | tools/pywebsocket/src/test/test_util.py | 449 | 7538 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for util module."""
import os
import random
import sys
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import util
_TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata')
class UtilTest(unittest.TestCase):
"""A unittest for util module."""
def test_get_stack_trace(self):
self.assertEqual('None\n', util.get_stack_trace())
try:
a = 1 / 0 # Intentionally raise exception.
except Exception:
trace = util.get_stack_trace()
self.failUnless(trace.startswith('Traceback'))
self.failUnless(trace.find('ZeroDivisionError') != -1)
def test_prepend_message_to_exception(self):
exc = Exception('World')
self.assertEqual('World', str(exc))
util.prepend_message_to_exception('Hello ', exc)
self.assertEqual('Hello World', str(exc))
def test_get_script_interp(self):
cygwin_path = 'c:\\cygwin\\bin'
cygwin_perl = os.path.join(cygwin_path, 'perl')
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README')))
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README'), cygwin_path))
self.assertEqual('/usr/bin/perl -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl')))
self.assertEqual(cygwin_perl + ' -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl'), cygwin_path))
def test_hexify(self):
self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff',
util.hexify('azAZ09 \t\r\n\x00\xff'))
class RepeatedXorMaskerTest(unittest.TestCase):
"""A unittest for RepeatedXorMasker class."""
def test_mask(self):
# Sample input e6,97,a5 is U+65e5 in UTF-8
masker = util.RepeatedXorMasker('\xff\xff\xff\xff')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x19\x68\x5a', result)
masker = util.RepeatedXorMasker('\x00\x00\x00\x00')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\xe6\x97\xa5', result)
masker = util.RepeatedXorMasker('\xe6\x97\xa5\x20')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x00\x00\x00', result)
def test_mask_twice(self):
masker = util.RepeatedXorMasker('\x00\x7f\xff\x20')
# mask[0], mask[1], ... will be used.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x00\x7f\xff\x20\x00', result)
# mask[2], mask[0], ... will be used for the next call.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x7f\xff\x20\x00\x7f', result)
def test_mask_large_data(self):
masker = util.RepeatedXorMasker('mASk')
original = ''.join([chr(i % 256) for i in xrange(1000)])
result = masker.mask(original)
expected = ''.join(
[chr((i % 256) ^ ord('mASk'[i % 4])) for i in xrange(1000)])
self.assertEqual(expected, result)
masker = util.RepeatedXorMasker('MaSk')
first_part = 'The WebSocket Protocol enables two-way communication.'
result = masker.mask(first_part)
self.assertEqual(
'\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#'
'\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9'
'\x08<\x05c',
result)
second_part = 'It has two parts: a handshake and the data transfer.'
result = masker.mask(second_part)
self.assertEqual(
"('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#"
"\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c",
result)
def get_random_section(source, min_num_chunks):
chunks = []
bytes_chunked = 0
while bytes_chunked < len(source):
chunk_size = random.randint(
1,
min(len(source) / min_num_chunks, len(source) - bytes_chunked))
chunk = source[bytes_chunked:bytes_chunked + chunk_size]
chunks.append(chunk)
bytes_chunked += chunk_size
return chunks
class InflaterDeflaterTest(unittest.TestCase):
"""A unittest for _Inflater and _Deflater class."""
def test_inflate_deflate_default(self):
input = b'hello' + '-' * 30000 + b'hello'
inflater15 = util._Inflater(15)
deflater15 = util._Deflater(15)
inflater8 = util._Inflater(8)
deflater8 = util._Deflater(8)
compressed15 = deflater15.compress_and_finish(input)
compressed8 = deflater8.compress_and_finish(input)
inflater15.append(compressed15)
inflater8.append(compressed8)
self.assertNotEqual(compressed15, compressed8)
self.assertEqual(input, inflater15.decompress(-1))
self.assertEqual(input, inflater8.decompress(-1))
def test_random_section(self):
random.seed(a=0)
source = ''.join(
[chr(random.randint(0, 255)) for i in xrange(100 * 1024)])
chunked_input = get_random_section(source, 10)
print "Input chunk sizes: %r" % [len(c) for c in chunked_input]
deflater = util._Deflater(15)
compressed = []
for chunk in chunked_input:
compressed.append(deflater.compress(chunk))
compressed.append(deflater.compress_and_finish(''))
chunked_expectation = get_random_section(source, 10)
print ("Expectation chunk sizes: %r" %
[len(c) for c in chunked_expectation])
inflater = util._Inflater(15)
inflater.append(''.join(compressed))
for chunk in chunked_expectation:
decompressed = inflater.decompress(len(chunk))
self.assertEqual(chunk, decompressed)
self.assertEqual('', inflater.decompress(-1))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| bsd-3-clause |
doismellburning/edx-platform | common/djangoapps/track/tracker.py | 239 | 2374 | """
Module that tracks analytics events by sending them to different
configurable backends.
The backends can be configured using Django settings as the example
below::
TRACKING_BACKENDS = {
'tracker_name': {
'ENGINE': 'class.name.for.backend',
'OPTIONS': {
'host': ... ,
'port': ... ,
...
}
}
}
"""
import inspect
from importlib import import_module
from dogapi import dog_stats_api
from django.conf import settings
from track.backends import BaseBackend
__all__ = ['send']
backends = {}
def _initialize_backends_from_django_settings():
"""
Initialize the event tracking backends according to the
configuration in django settings
"""
backends.clear()
config = getattr(settings, 'TRACKING_BACKENDS', {})
for name, values in config.iteritems():
# Ignore empty values to turn-off default tracker backends
if values:
engine = values['ENGINE']
options = values.get('OPTIONS', {})
backends[name] = _instantiate_backend_from_name(engine, options)
def _instantiate_backend_from_name(name, options):
"""
Instantiate an event tracker backend from the full module path to
the backend class. Useful when setting backends from configuration
files.
"""
# Parse backend name
try:
parts = name.split('.')
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
except IndexError:
raise ValueError('Invalid event track backend %s' % name)
# Get and verify the backend class
try:
module = import_module(module_name)
cls = getattr(module, class_name)
if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):
raise TypeError
except (ValueError, AttributeError, TypeError, ImportError):
raise ValueError('Cannot find event track backend %s' % name)
backend = cls(**options)
return backend
@dog_stats_api.timed('track.send')
def send(event):
"""
Send an event object to all the initialized backends.
"""
dog_stats_api.increment('track.send.count')
for name, backend in backends.iteritems():
with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
backend.send(event)
_initialize_backends_from_django_settings()
| agpl-3.0 |
llvm-mirror/lldb | packages/Python/lldbsuite/test/functionalities/tty/TestTerminal.py | 9 | 1924 | """
Test lldb command aliases.
"""
from __future__ import print_function
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class LaunchInTerminalTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
# Darwin is the only platform that I know of that supports optionally launching
# a program in a separate terminal window. It would be great if other platforms
# added support for this.
@skipUnlessDarwin
# If the test is being run under sudo, the spawned terminal won't retain that elevated
# privilege so it can't open the socket to talk back to the test case
@unittest2.skipIf(hasattr(os, 'geteuid') and os.geteuid()
== 0, "test cannot be run as root")
# Do we need to disable this test if the testsuite is being run on a remote system?
# This env var is only defined when the shell is running in a local mac
# terminal window
@unittest2.skipUnless(
'TERM_PROGRAM' in os.environ,
"test must be run on local system")
@no_debug_info_test
def test_launch_in_terminal(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
launch_info = lldb.SBLaunchInfo(["-lAF", "/tmp/"])
launch_info.SetLaunchFlags(
lldb.eLaunchFlagLaunchInTTY | lldb.eLaunchFlagCloseTTYOnExit)
error = lldb.SBError()
process = target.Launch(launch_info, error)
print("Error was: %s."%(error.GetCString()))
self.assertTrue(
error.Success(),
"Make sure launch happened successfully in a terminal window")
# Running in synchronous mode our process should have run and already
# exited by the time target.Launch() returns
self.assertTrue(process.GetState() == lldb.eStateExited)
| apache-2.0 |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py | 1 | 3139 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.config.committers import CommitterList, Committer, Reviewer
class CommitInfoTest(unittest.TestCase):
def test_commit_info_creation(self):
author = Committer("Author", "author@example.com")
committer = Committer("Committer", "committer@example.com")
reviewer = Reviewer("Reviewer", "reviewer@example.com")
committer_list = CommitterList(committers=[author, committer], reviewers=[reviewer])
changelog_data = {
"bug_id": 1234,
"author_name": "Committer",
"author_email": "author@example.com",
"author": author,
"reviewer_text": "Reviewer",
"reviewer": reviewer,
"bug_description": "Bug description",
}
commit = CommitInfo(123, "committer@example.com", changelog_data, committer_list)
self.assertEqual(commit.revision(), 123)
self.assertEqual(commit.bug_id(), 1234)
self.assertEqual(commit.author_name(), "Committer")
self.assertEqual(commit.author_email(), "author@example.com")
self.assertEqual(commit.author(), author)
self.assertEqual(commit.reviewer_text(), "Reviewer")
self.assertEqual(commit.reviewer(), reviewer)
self.assertEqual(commit.committer(), committer)
self.assertEqual(commit.committer_email(), "committer@example.com")
self.assertEqual(commit.responsible_parties(), set([author, committer, reviewer]))
self.assertEqual(commit.bug_description(), "Bug description")
| gpl-2.0 |
isida/vi | plugins/event.py | 1 | 4035 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------- #
# #
# iSida bot VI plugin #
# Copyright (C) diSabler <dsy@dsy.name> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# --------------------------------------------------------------------------- #
EVENTS = {}
def make_name(FROM):
USERNAME = FROM.get('username', '')
FIRST = FROM.get('first_name', '')
LAST = FROM.get('last_name', '')
NAME = ' '.join(t for t in [FIRST, LAST] if t)
if NAME and USERNAME:
return '%s [@%s]' % (NAME, USERNAME)
elif USERNAME:
return '@%s' % USERNAME
elif NAME:
return NAME
else:
return 'id%' % FROM['id']
def cmd_event(raw_in, less):
global EVENTS
MESSAGE = raw_in.get('message', {})
CHAT = MESSAGE.get('chat', {})
if CHAT.get('type', '') in ['supergroup', 'group']:
if not less and 'reply_to_message' in raw_in['message']:
if 'text' in raw_in['message']['reply_to_message']:
less = raw_in['message']['reply_to_message']['text']
if less:
CHAT_ID = CHAT['id']
FROM = MESSAGE['from']
USER_ID = FROM['id']
EVENT_NAME = less.capitalize().replace(' ', '_')
EVENTS[CHAT_ID] = EVENTS.get(CHAT_ID, {})
if EVENT_NAME in EVENTS[CHAT_ID]:
if USER_ID == EVENTS[CHAT_ID][EVENT_NAME]['id']:
msg = '❎ Event `%s` removed' % EVENT_NAME
_ = EVENTS[CHAT_ID].pop(EVENT_NAME)
else:
name = make_name(EVENTS[CHAT_ID][EVENT_NAME])
msg = '⛔️ Event `%s` already exists\nCreated by %s' % (EVENT_NAME, name)
else:
EVENTS[CHAT_ID][EVENT_NAME] = FROM
msg = '✅ Created new event `%s`\nUse %s for remove it' % (EVENT_NAME, MESSAGE['text'].lower().replace(' ', '_'))
else:
msg = '⚠️ Required parameter missed!'
else:
msg = 'Works only in groups!'
send_msg(raw_in, msg)
def cmd_events(raw_in):
global EVENTS
MESSAGE = raw_in.get('message', {})
CHAT = MESSAGE.get('chat', {})
if CHAT.get('type', '') in ['supergroup', 'group']:
CHAT_ID = CHAT['id']
FROM = MESSAGE['from']
USER_ID = FROM['id']
EVENTS[CHAT_ID] = EVENTS.get(CHAT_ID, {})
if EVENTS[CHAT_ID]:
msg = []
for EVENT_NAME in EVENTS[CHAT_ID].keys():
if USER_ID == EVENTS[CHAT_ID][EVENT_NAME]['id']:
msg.append('✅ /event_%s' % EVENT_NAME)
else:
name = make_name(EVENTS[CHAT_ID][EVENT_NAME])
msg.append('⛔️ /event_%s - %s' % (EVENT_NAME, name))
msg.sort()
msg = '\n'.join(msg)
else:
msg = '✅ No events in current group'
else:
msg = 'Works only in groups!'
send_msg(raw_in, msg)
commands = [
['event', cmd_event, False, 'all', 'Create lock for event'],
['events', cmd_events, False, 'raw', 'Show current events']
]
# The end is near!
| gpl-3.0 |
psiinon/addons-server | src/olympia/tags/models.py | 3 | 3249 | from django.db import models
from django.urls import NoReverseMatch
from olympia import activity, amo
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.urlresolvers import reverse
class TagManager(ManagerBase):
def not_denied(self):
"""Get allowed tags only"""
return self.filter(denied=False)
class Tag(ModelBase):
id = PositiveAutoField(primary_key=True)
tag_text = models.CharField(max_length=128)
denied = models.BooleanField(default=False)
restricted = models.BooleanField(default=False)
addons = models.ManyToManyField(
'addons.Addon', through='AddonTag', related_name='tags')
num_addons = models.IntegerField(default=0)
objects = TagManager()
class Meta:
db_table = 'tags'
ordering = ('tag_text',)
indexes = [
models.Index(fields=('denied', 'num_addons'),
name='tag_blacklisted_num_addons_idx')
]
constraints = [
models.UniqueConstraint(fields=('tag_text',), name='tag_text')
]
def __str__(self):
return self.tag_text
@property
def popularity(self):
return self.num_addons
def can_reverse(self):
try:
self.get_url_path()
return True
except NoReverseMatch:
return False
def get_url_path(self):
return reverse('tags.detail', args=[self.tag_text])
def save_tag(self, addon):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
AddonTag.objects.get_or_create(addon=addon, tag=tag)
activity.log_create(amo.LOG.ADD_TAG, tag, addon)
return tag
def remove_tag(self, addon):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
for addon_tag in AddonTag.objects.filter(addon=addon, tag=tag):
addon_tag.delete()
activity.log_create(amo.LOG.REMOVE_TAG, tag, addon)
def update_stat(self):
if self.denied:
return
self.num_addons = self.addons.count()
self.save()
class AddonTag(ModelBase):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(
'addons.Addon', related_name='addon_tags', on_delete=models.CASCADE)
tag = models.ForeignKey(
Tag, related_name='addon_tags', on_delete=models.CASCADE)
class Meta:
db_table = 'users_tags_addons'
indexes = [
models.Index(fields=('tag',), name='tag_id'),
models.Index(fields=('addon',), name='addon_id'),
]
constraints = [
models.UniqueConstraint(fields=('tag', 'addon'), name='tag_id_2'),
]
def update_tag_stat_signal(sender, instance, **kw):
from .tasks import update_tag_stat
if not kw.get('raw'):
try:
update_tag_stat.delay(instance.tag.pk)
except Tag.DoesNotExist:
pass
models.signals.post_save.connect(update_tag_stat_signal, sender=AddonTag,
dispatch_uid='update_tag_stat')
models.signals.post_delete.connect(update_tag_stat_signal, sender=AddonTag,
dispatch_uid='delete_tag_stat')
| bsd-3-clause |
AniruddhaSAtre/dd-agent | checks.d/vsphere.py | 27 | 33304 | # stdlib
from copy import deepcopy
from datetime import datetime, timedelta
from hashlib import md5
from Queue import Empty, Queue
import re
import time
import traceback
# 3p
from pyVim import connect
from pyVmomi import vim
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from checks.libs.vmware.basic_metrics import BASIC_METRICS
from util import Timer
SOURCE_TYPE = 'vsphere'
REAL_TIME_INTERVAL = 20 # Default vCenter sampling interval
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of jobs batched at the same time in the queue to query available metrics
BATCH_MORLIST_SIZE = 50
# Time after which we reap the jobs that clog the queue
# TODO: use it
JOB_TIMEOUT = 10
EXCLUDE_FILTERS = {
'AlarmStatusChangedEvent': [r'Gray'],
'TaskEvent': [
r'Initialize powering On',
r'Power Off virtual machine',
r'Power On virtual machine',
r'Reconfigure virtual machine',
r'Relocate virtual machine',
r'Suspend virtual machine',
r'Migrate virtual machine',
],
'VmBeingHotMigratedEvent': [],
'VmMessageEvent': [],
'VmMigratedEvent': [],
'VmPoweredOnEvent': [],
'VmPoweredOffEvent': [],
'VmReconfiguredEvent': [],
'VmResumedEvent': [],
'VmSuspendedEvent': [],
}
MORLIST = 'morlist'
METRICS_METADATA = 'metrics_metadata'
LAST = 'last'
INTERVAL = 'interval'
class VSphereEvent(object):
UNKNOWN = 'unknown'
def __init__(self, raw_event, event_config=None):
self.raw_event = raw_event
if self.raw_event and self.raw_event.__class__.__name__.startswith('vim.event'):
self.event_type = self.raw_event.__class__.__name__[10:]
else:
self.event_type = VSphereEvent.UNKNOWN
self.timestamp = int((self.raw_event.createdTime.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds())
self.payload = {
"timestamp": self.timestamp,
"event_type": SOURCE_TYPE,
"source_type_name": SOURCE_TYPE,
}
if event_config is None:
self.event_config = {}
else:
self.event_config = event_config
def _is_filtered(self):
# Filter the unwanted types
if self.event_type not in EXCLUDE_FILTERS:
return True
filters = EXCLUDE_FILTERS[self.event_type]
for f in filters:
if re.search(f, self.raw_event.fullFormattedMessage):
return True
return False
def get_datadog_payload(self):
if self._is_filtered():
return None
transform_method = getattr(self, 'transform_%s' % self.event_type.lower(), None)
if callable(transform_method):
return transform_method()
# Default event transformation
self.payload["msg_title"] = u"{0}".format(self.event_type)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
return self.payload
def transform_vmbeinghotmigratedevent(self):
self.payload["msg_title"] = u"VM {0} is being migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} has launched a hot migration of this virtual machine:\n".format(user=self.raw_event.userName)
changes = []
pre_host = self.raw_event.host.name
new_host = self.raw_event.destHost.name
pre_dc = self.raw_event.datacenter.name
new_dc = self.raw_event.destDatacenter.name
pre_ds = self.raw_event.ds.name
new_ds = self.raw_event.destDatastore.name
if pre_host == new_host:
changes.append(u"- No host migration: still {0}".format(new_host))
else:
# Insert in front if it's a change
changes = [u"- Host MIGRATION: from {0} to {1}".format(pre_host, new_host)] + changes
if pre_dc == new_dc:
changes.append(u"- No datacenter migration: still {0}".format(new_dc))
else:
# Insert in front if it's a change
changes = [u"- Datacenter MIGRATION: from {0} to {1}".format(pre_dc, new_dc)] + changes
if pre_ds == new_ds:
changes.append(u"- No datastore migration: still {0}".format(new_ds))
else:
# Insert in front if it's a change
changes = [u"- Datastore MIGRATION: from {0} to {1}".format(pre_ds, new_ds)] + changes
self.payload["msg_text"] += "\n".join(changes)
self.payload['host'] = self.raw_event.vm.name
self.payload['tags'] = [
'vsphere_host:%s' % pre_host,
'vsphere_host:%s' % new_host,
'vsphere_datacenter:%s' % pre_dc,
'vsphere_datacenter:%s' % new_dc,
]
return self.payload
def transform_alarmstatuschangedevent(self):
if self.event_config.get('collect_vcenter_alarms') is None:
return None
def get_transition(before, after):
vals = {
'gray': -1,
'green': 0,
'yellow': 1,
'red': 2
}
before = before.lower()
after = after.lower()
if before not in vals or after not in vals:
return None
if vals[before] < vals[after]:
return 'Triggered'
else:
return 'Recovered'
TO_ALERT_TYPE = {
'green': 'success',
'yellow': 'warning',
'red': 'error'
}
def get_agg_key(alarm_event):
return 'h:{0}|dc:{1}|a:{2}'.format(
md5(alarm_event.entity.name).hexdigest()[:10],
md5(alarm_event.datacenter.name).hexdigest()[:10],
md5(alarm_event.alarm.name).hexdigest()[:10]
)
# Get the entity type/name
if self.raw_event.entity.entity.__class__ == vim.VirtualMachine:
host_type = 'VM'
elif self.raw_event.entity.entity.__class__ == vim.HostSystem:
host_type = 'host'
else:
return None
host_name = self.raw_event.entity.name
# Need a getattr because from is a reserved keyword...
trans_before = getattr(self.raw_event, 'from')
trans_after = self.raw_event.to
transition = get_transition(trans_before, trans_after)
# Bad transition, we shouldn't have got this transition
if transition is None:
return None
self.payload['msg_title'] = u"[{transition}] {monitor} on {host_type} {host_name} is now {status}".format(
transition=transition,
monitor=self.raw_event.alarm.name,
host_type=host_type,
host_name=host_name,
status=trans_after
)
self.payload['alert_type'] = TO_ALERT_TYPE[trans_after]
self.payload['event_object'] = get_agg_key(self.raw_event)
self.payload['msg_text'] = u"""vCenter monitor status changed on this alarm, it was {before} and it's now {after}.""".format(
before=trans_before,
after=trans_after
)
self.payload['host'] = host_name
return self.payload
def transform_vmmessageevent(self):
self.payload["msg_title"] = u"VM {0} is reporting".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmmigratedevent(self):
self.payload["msg_title"] = u"VM {0} has been migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredoffevent(self):
self.payload["msg_title"] = u"VM {0} has been powered OFF".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered off this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredonevent(self):
self.payload["msg_title"] = u"VM {0} has been powered ON".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered on this virtual machine. It is running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmresumingevent(self):
self.payload["msg_title"] = u"VM {0} is RESUMING".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has resumed {vm}. It will soon be powered on.""".format(
user=self.raw_event.userName,
vm=self.raw_event.vm.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmsuspendedevent(self):
self.payload["msg_title"] = u"VM {0} has been SUSPENDED".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has suspended this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmreconfiguredevent(self):
self.payload["msg_title"] = u"VM {0} configuration has been changed".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} saved the new configuration:\n@@@\n".format(user=self.raw_event.userName)
# Add lines for configuration change don't show unset, that's hacky...
config_change_lines = [line for line in self.raw_event.configSpec.__repr__().splitlines() if 'unset' not in line]
self.payload["msg_text"] += u"\n".join(config_change_lines)
self.payload["msg_text"] += u"\n@@@"
self.payload['host'] = self.raw_event.vm.name
return self.payload
def atomic_method(method):
""" Decorator to catch the exceptions that happen in detached thread atomic tasks
and display them in the logs.
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception as e:
args[0].exceptionq.put("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Datadog
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.pool_started = False
self.exceptionq = Queue()
# Connections open to vCenter instances
self.server_instances = {}
# Event configuration
self.event_config = {}
# Caching resources, timeouts
self.cache_times = {}
for instance in self.instances:
i_key = self._instance_key(instance)
self.cache_times[i_key] = {
MORLIST: {
LAST: 0,
INTERVAL: init_config.get('refresh_morlist_interval',
REFRESH_MORLIST_INTERVAL)
},
METRICS_METADATA: {
LAST: 0,
INTERVAL: init_config.get('refresh_metrics_metadata_interval',
REFRESH_METRICS_METADATA_INTERVAL)
}
}
self.event_config[i_key] = instance.get('event_config')
# First layer of cache (get entities from the tree)
self.morlist_raw = {}
# Second layer, processed from the first one
self.morlist = {}
# Metrics metadata, basically perfCounterId -> {name, group, description}
self.metrics_metadata = {}
self.latest_event_query = {}
def stop(self):
self.stop_pool()
def start_pool(self):
self.log.info("Starting Thread Pool")
self.pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(self.pool_size)
self.pool_started = True
self.jobs_status = {}
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
self.pool_started = False
def restart_pool(self):
self.stop_pool()
self.start_pool()
def _clean(self):
now = time.time()
# TODO: use that
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > JOB_TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck.")
self.restart_pool()
break
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = self.latest_event_query[i_key] = \
event_manager.latestEvent.createdTime + timedelta(seconds=1)
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=self.latest_event_query[i_key])
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got {0} events from vCenter event manager".format(len(new_events)))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key])
# Can return None if the event if filtered out
event_payload = normalized_event.get_datadog_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
def _instance_key(self, instance):
i_key = instance.get('name')
if i_key is None:
raise Exception("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
i_key = self._instance_key(instance)
now = time.time()
return now - self.cache_times[i_key][entity][LAST] > self.cache_times[i_key][entity][INTERVAL]
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
service_check_tags = [
'vcenter_server:{0}'.format(instance.get('name')),
'vcenter_host:{0}'.format(instance.get('host')),
]
if i_key not in self.server_instances:
try:
server_instance = connect.SmartConnect(
host=instance.get('host'),
user=instance.get('username'),
pwd=instance.get('password')
)
except Exception as e:
err_msg = "Connection to %s failed: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
self.server_instances[i_key] = server_instance
# Test if the connection is working
try:
self.server_instances[i_key].RetrieveContent()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
except Exception as e:
err_msg = "Connection to %s died unexpectedly: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
if instance.get('all_metrics', False):
return available_metrics
i_key = self._instance_key(instance)
wanted_metrics = []
# Get only the basic metrics
for metric in available_metrics:
# No cache yet, skip it for now
if (i_key not in self.metrics_metadata
or metric.counterId not in self.metrics_metadata[i_key]):
continue
if self.metrics_metadata[i_key][metric.counterId]['name'] in BASIC_METRICS:
wanted_metrics.append(metric)
return wanted_metrics
def get_external_host_tags(self):
""" Returns a list of tags for every host that is detected by the vSphere
integration.
List of pairs (hostname, list_of_tags)
"""
self.log.info("Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
mor_list = self.morlist[i_key].items()
for mor_name, mor in mor_list:
external_host_tags.append((mor['hostname'], {SOURCE_TYPE: mor['tags']}))
return external_host_tags
@atomic_method
def _cache_morlist_raw_atomic(self, i_key, obj_type, obj, tags, regexes=None):
""" Compute tags for a single node in the vCenter rootFolder
and queue other such jobs for children nodes.
Usual hierarchy:
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
If it's a node we want to query metric for, queue it in self.morlist_raw
that will be processed by another job.
"""
### <TEST-INSTRUMENTATION>
t = Timer()
self.log.debug("job_atomic: Exploring MOR {0} (type={1})".format(obj, obj_type))
### </TEST-INSTRUMENTATION>
tags_copy = deepcopy(tags)
if obj_type == 'rootFolder':
for datacenter in obj.childEntity:
# Skip non-datacenter
if not hasattr(datacenter, 'hostFolder'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'datacenter', datacenter, tags_copy, regexes)
)
elif obj_type == 'datacenter':
dc_tag = "vsphere_datacenter:%s" % obj.name
tags_copy.append(dc_tag)
for compute_resource in obj.hostFolder.childEntity:
# Skip non-compute resource
if not hasattr(compute_resource, 'host'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'compute_resource', compute_resource, tags_copy, regexes)
)
elif obj_type == 'compute_resource':
if obj.__class__ == vim.ClusterComputeResource:
cluster_tag = "vsphere_cluster:%s" % obj.name
tags_copy.append(cluster_tag)
for host in obj.host:
# Skip non-host
if not hasattr(host, 'vm'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'host', host, tags_copy, regexes)
)
elif obj_type == 'host':
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of host_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='host', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:host'])
self.morlist_raw[i_key].append(watched_mor)
host_tag = "vsphere_host:%s" % obj.name
tags_copy.append(host_tag)
for vm in obj.vm:
if vm.runtime.powerState != 'poweredOn':
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'vm', vm, tags_copy, regexes)
)
elif obj_type == 'vm':
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of vm_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='vm', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:vm'])
self.morlist_raw[i_key].append(watched_mor)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_raw_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_raw(self, instance):
""" Initiate the first layer to refresh self.morlist by queueing
_cache_morlist_raw_atomic on the rootFolder in a recursive/asncy approach
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s" % i_key)
if i_key in self.morlist_raw and len(self.morlist_raw[i_key]) > 0:
self.log.debug(
"Skipping morlist collection now, RAW results "
"processing not over (latest refresh was {0}s ago)".format(
time.time() - self.cache_times[i_key][MORLIST][LAST])
)
return
self.morlist_raw[i_key] = []
server_instance = self._get_server_instance(instance)
root_folder = server_instance.content.rootFolder
instance_tag = "vcenter_server:%s" % instance.get('name')
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex')
}
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'rootFolder', root_folder, [instance_tag], regexes)
)
self.cache_times[i_key][MORLIST][LAST] = time.time()
@atomic_method
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug(
"job_atomic: Querying available metrics"
" for MOR {0} (type={1})".format(mor['mor'], mor['mor_type'])
)
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=REAL_TIME_INTERVAL)
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_process(self, instance):
""" Empties the self.morlist_raw by popping items and running asynchronously
the _cache_morlist_process_atomic operation that will get the available
metrics for this MOR and put it in self.morlist
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.morlist[i_key] = {}
batch_size = self.init_config.get('batch_morlist_size', BATCH_MORLIST_SIZE)
for i in xrange(batch_size):
try:
mor = self.morlist_raw[i_key].pop()
self.pool.apply_async(self._cache_morlist_process_atomic, args=(instance, mor))
except (IndexError, KeyError):
self.log.debug("No more work to process in morlist_raw")
return
def _vacuum_morlist(self, instance):
""" Check if self.morlist doesn't have some old MORs that are gone, ie
we cannot get any metrics from them anyway (or =0)
"""
i_key = self._instance_key(instance)
morlist = self.morlist[i_key].items()
for mor_name, mor in morlist:
last_seen = mor['last_seen']
if (time.time() - last_seen) > 2 * REFRESH_MORLIST_INTERVAL:
del self.morlist[i_key][mor_name]
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' # FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
### </TEST-INSTRUMENTATION>
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
if counter_id in self.metrics_metadata[i_key]:
unit = self.metrics_metadata[i_key][counter_id]['unit']
if unit == 'percent':
return float(value) / 100
# Defaults to return the value without transformation
return value
@atomic_method
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=20,
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
self.gauge(
"vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'],
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
### </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
""" Calls asynchronously _collect_metrics_atomic on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.log.debug("Not collecting metrics for this instance, nothing to do yet: {0}".format(i_key))
return
mors = self.morlist[i_key].items()
self.log.debug("Collecting metrics of %d mors" % len(mors))
vm_count = 0
for mor_name, mor in mors:
if mor['mor_type'] == 'vm':
vm_count += 1
if 'metrics' not in mor:
# self.log.debug("Skipping entity %s collection because we didn't cache its metrics yet" % mor['hostname'])
continue
self.pool.apply_async(self._collect_metrics_atomic, args=(instance, mor))
self.gauge('vsphere.vm.count', vm_count, tags=["vcenter_server:%s" % instance.get('name')])
def check(self, instance):
if not self.pool_started:
self.start_pool()
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:initial'])
### </TEST-INSTRUMENTATION>
# First part: make sure our object repository is neat & clean
if self._should_cache(instance, METRICS_METADATA):
self._cache_metrics_metadata(instance)
if self._should_cache(instance, MORLIST):
self._cache_morlist_raw(instance)
self._cache_morlist_process(instance)
self._vacuum_morlist(instance)
# Second part: do the job
self.collect_metrics(instance)
self._query_event(instance)
# For our own sanity
self._clean()
thread_crashed = False
try:
while True:
self.log.critical(self.exceptionq.get_nowait())
thread_crashed = True
except Empty:
pass
if thread_crashed:
self.stop_pool()
raise Exception("One thread in the pool crashed, check the logs")
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:final'])
### </TEST-INSTRUMENTATION>
if __name__ == '__main__':
check, _instances = VSphereCheck.from_yaml('conf.d/vsphere.yaml')
try:
for i in xrange(200):
print "Loop %d" % i
for instance in check.instances:
check.check(instance)
if check.has_events():
print 'Events: %s' % (check.get_events())
print 'Metrics: %d' % (len(check.get_metrics()))
time.sleep(10)
except Exception as e:
print "Whoops something happened {0}".format(traceback.format_exc())
finally:
check.stop()
| bsd-3-clause |
jannopet/LEGO-WeDo-2.0-Python-SDK | wedo2/bluetooth/service_manager.py | 1 | 1809 |
from wedo2.bluetooth import bluetooth_helper
from wedo2.bluetooth.connect_info import ConnectInfo
from wedo2.services.lego_service_factory import LegoServiceFactory
HUB_CHARACTERISTIC_ATTACHED_IO = "0x1527"
class ServiceManager:
def __init__(self, io):
self.io = io
self.services = set()
self.services_data = {}
self.find_available_services()
def find_available_services(self):
attached_io_uuid = bluetooth_helper.uuid_with_prefix_custom_base(HUB_CHARACTERISTIC_ATTACHED_IO)
self.io.subscribe_to_char(attached_io_uuid, self.handle_attached_io_data)
# End subscription when the service for port 6 (RGB LED light) has been found
while 6 not in self.services_data.keys():
pass
self.io.unsubscribe_from_char(attached_io_uuid)
self.create_services(self.services_data)
def create_services(self, services_data):
for connect_id in services_data.keys():
connect_info = services_data[connect_id]
service = LegoServiceFactory.create(connect_info, self.io)
self.services.add(service)
def find_service(self, io_type):
for service in self.services:
if service.connect_info.type_id == io_type.value:
return service
return None
def handle_attached_io_data(self, handle, data):
if len(data) < 2:
print("Something went wrong when retrieving attached io data")
connect_id = data[0:1][0]
attached = data[1:2][0]
if attached == 1:
hub_index = data[2:3][0]
io_type = data[3:4][0]
connect_info = ConnectInfo(connect_id, hub_index, io_type)
self.services_data[connect_id] = connect_info
| mit |
holmes/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/win32/context_i386.py | 102 | 16108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
CONTEXT structure for i386.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
from winappdbg.win32.version import ARCH_I386
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- CONTEXT structures and constants -----------------------------------------
# The following values specify the type of access in the first parameter
# of the exception record when the exception code specifies an access
# violation.
EXCEPTION_READ_FAULT = 0 # exception caused by a read
EXCEPTION_WRITE_FAULT = 1 # exception caused by a write
EXCEPTION_EXECUTE_FAULT = 8 # exception caused by an instruction fetch
CONTEXT_i386 = 0x00010000 # this assumes that i386 and
CONTEXT_i486 = 0x00010000 # i486 have identical context records
CONTEXT_CONTROL = (CONTEXT_i386 | long(0x00000001)) # SS:SP, CS:IP, FLAGS, BP
CONTEXT_INTEGER = (CONTEXT_i386 | long(0x00000002)) # AX, BX, CX, DX, SI, DI
CONTEXT_SEGMENTS = (CONTEXT_i386 | long(0x00000004)) # DS, ES, FS, GS
CONTEXT_FLOATING_POINT = (CONTEXT_i386 | long(0x00000008)) # 387 state
CONTEXT_DEBUG_REGISTERS = (CONTEXT_i386 | long(0x00000010)) # DB 0-3,6,7
CONTEXT_EXTENDED_REGISTERS = (CONTEXT_i386 | long(0x00000020)) # cpu specific extensions
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS)
CONTEXT_ALL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | \
CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS | \
CONTEXT_EXTENDED_REGISTERS)
SIZE_OF_80387_REGISTERS = 80
MAXIMUM_SUPPORTED_EXTENSION = 512
# typedef struct _FLOATING_SAVE_AREA {
# DWORD ControlWord;
# DWORD StatusWord;
# DWORD TagWord;
# DWORD ErrorOffset;
# DWORD ErrorSelector;
# DWORD DataOffset;
# DWORD DataSelector;
# BYTE RegisterArea[SIZE_OF_80387_REGISTERS];
# DWORD Cr0NpxState;
# } FLOATING_SAVE_AREA;
class FLOATING_SAVE_AREA(Structure):
_pack_ = 1
_fields_ = [
('ControlWord', DWORD),
('StatusWord', DWORD),
('TagWord', DWORD),
('ErrorOffset', DWORD),
('ErrorSelector', DWORD),
('DataOffset', DWORD),
('DataSelector', DWORD),
('RegisterArea', BYTE * SIZE_OF_80387_REGISTERS),
('Cr0NpxState', DWORD),
]
_integer_members = ('ControlWord', 'StatusWord', 'TagWord', 'ErrorOffset', 'ErrorSelector', 'DataOffset', 'DataSelector', 'Cr0NpxState')
@classmethod
def from_dict(cls, fsa):
'Instance a new structure from a Python dictionary.'
fsa = dict(fsa)
s = cls()
for key in cls._integer_members:
setattr(s, key, fsa.get(key))
ra = fsa.get('RegisterArea', None)
if ra is not None:
for index in compat.xrange(0, SIZE_OF_80387_REGISTERS):
s.RegisterArea[index] = ra[index]
return s
def to_dict(self):
'Convert a structure into a Python dictionary.'
fsa = dict()
for key in self._integer_members:
fsa[key] = getattr(self, key)
ra = [ self.RegisterArea[index] for index in compat.xrange(0, SIZE_OF_80387_REGISTERS) ]
ra = tuple(ra)
fsa['RegisterArea'] = ra
return fsa
PFLOATING_SAVE_AREA = POINTER(FLOATING_SAVE_AREA)
LPFLOATING_SAVE_AREA = PFLOATING_SAVE_AREA
# typedef struct _CONTEXT {
# DWORD ContextFlags;
# DWORD Dr0;
# DWORD Dr1;
# DWORD Dr2;
# DWORD Dr3;
# DWORD Dr6;
# DWORD Dr7;
# FLOATING_SAVE_AREA FloatSave;
# DWORD SegGs;
# DWORD SegFs;
# DWORD SegEs;
# DWORD SegDs;
# DWORD Edi;
# DWORD Esi;
# DWORD Ebx;
# DWORD Edx;
# DWORD Ecx;
# DWORD Eax;
# DWORD Ebp;
# DWORD Eip;
# DWORD SegCs;
# DWORD EFlags;
# DWORD Esp;
# DWORD SegSs;
# BYTE ExtendedRegisters[MAXIMUM_SUPPORTED_EXTENSION];
# } CONTEXT;
class CONTEXT(Structure):
arch = ARCH_I386
_pack_ = 1
# Context Frame
#
# This frame has a several purposes: 1) it is used as an argument to
# NtContinue, 2) is is used to constuct a call frame for APC delivery,
# and 3) it is used in the user level thread creation routines.
#
# The layout of the record conforms to a standard call frame.
_fields_ = [
# The flags values within this flag control the contents of
# a CONTEXT record.
#
# If the context record is used as an input parameter, then
# for each portion of the context record controlled by a flag
# whose value is set, it is assumed that that portion of the
# context record contains valid context. If the context record
# is being used to modify a threads context, then only that
# portion of the threads context will be modified.
#
# If the context record is used as an IN OUT parameter to capture
# the context of a thread, then only those portions of the thread's
# context corresponding to set flags will be returned.
#
# The context record is never used as an OUT only parameter.
('ContextFlags', DWORD),
# This section is specified/returned if CONTEXT_DEBUG_REGISTERS is
# set in ContextFlags. Note that CONTEXT_DEBUG_REGISTERS is NOT
# included in CONTEXT_FULL.
('Dr0', DWORD),
('Dr1', DWORD),
('Dr2', DWORD),
('Dr3', DWORD),
('Dr6', DWORD),
('Dr7', DWORD),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_FLOATING_POINT.
('FloatSave', FLOATING_SAVE_AREA),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_SEGMENTS.
('SegGs', DWORD),
('SegFs', DWORD),
('SegEs', DWORD),
('SegDs', DWORD),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_INTEGER.
('Edi', DWORD),
('Esi', DWORD),
('Ebx', DWORD),
('Edx', DWORD),
('Ecx', DWORD),
('Eax', DWORD),
# This section is specified/returned if the
# ContextFlags word contains the flag CONTEXT_CONTROL.
('Ebp', DWORD),
('Eip', DWORD),
('SegCs', DWORD), # MUST BE SANITIZED
('EFlags', DWORD), # MUST BE SANITIZED
('Esp', DWORD),
('SegSs', DWORD),
# This section is specified/returned if the ContextFlags word
# contains the flag CONTEXT_EXTENDED_REGISTERS.
# The format and contexts are processor specific.
('ExtendedRegisters', BYTE * MAXIMUM_SUPPORTED_EXTENSION),
]
_ctx_debug = ('Dr0', 'Dr1', 'Dr2', 'Dr3', 'Dr6', 'Dr7')
_ctx_segs = ('SegGs', 'SegFs', 'SegEs', 'SegDs', )
_ctx_int = ('Edi', 'Esi', 'Ebx', 'Edx', 'Ecx', 'Eax')
_ctx_ctrl = ('Ebp', 'Eip', 'SegCs', 'EFlags', 'Esp', 'SegSs')
@classmethod
def from_dict(cls, ctx):
'Instance a new structure from a Python dictionary.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
setattr(s, 'ContextFlags', ContextFlags)
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in s._ctx_debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT:
fsa = ctx['FloatSave']
s.FloatSave = FLOATING_SAVE_AREA.from_dict(fsa)
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in s._ctx_segs:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in s._ctx_int:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in s._ctx_ctrl:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_EXTENDED_REGISTERS) == CONTEXT_EXTENDED_REGISTERS:
er = ctx['ExtendedRegisters']
for index in compat.xrange(0, MAXIMUM_SUPPORTED_EXTENSION):
s.ExtendedRegisters[index] = er[index]
return s
def to_dict(self):
'Convert a structure into a Python native type.'
ctx = Context()
ContextFlags = self.ContextFlags
ctx['ContextFlags'] = ContextFlags
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in self._ctx_debug:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT:
ctx['FloatSave'] = self.FloatSave.to_dict()
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in self._ctx_segs:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in self._ctx_int:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in self._ctx_ctrl:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_EXTENDED_REGISTERS) == CONTEXT_EXTENDED_REGISTERS:
er = [ self.ExtendedRegisters[index] for index in compat.xrange(0, MAXIMUM_SUPPORTED_EXTENSION) ]
er = tuple(er)
ctx['ExtendedRegisters'] = er
return ctx
PCONTEXT = POINTER(CONTEXT)
LPCONTEXT = PCONTEXT
class Context(dict):
"""
Register context dictionary for the i386 architecture.
"""
arch = CONTEXT.arch
def __get_pc(self):
return self['Eip']
def __set_pc(self, value):
self['Eip'] = value
pc = property(__get_pc, __set_pc)
def __get_sp(self):
return self['Esp']
def __set_sp(self, value):
self['Esp'] = value
sp = property(__get_sp, __set_sp)
def __get_fp(self):
return self['Ebp']
def __set_fp(self, value):
self['Ebp'] = value
fp = property(__get_fp, __set_fp)
#--- LDT_ENTRY structure ------------------------------------------------------
# typedef struct _LDT_ENTRY {
# WORD LimitLow;
# WORD BaseLow;
# union {
# struct {
# BYTE BaseMid;
# BYTE Flags1;
# BYTE Flags2;
# BYTE BaseHi;
# } Bytes;
# struct {
# DWORD BaseMid :8;
# DWORD Type :5;
# DWORD Dpl :2;
# DWORD Pres :1;
# DWORD LimitHi :4;
# DWORD Sys :1;
# DWORD Reserved_0 :1;
# DWORD Default_Big :1;
# DWORD Granularity :1;
# DWORD BaseHi :8;
# } Bits;
# } HighWord;
# } LDT_ENTRY,
# *PLDT_ENTRY;
class _LDT_ENTRY_BYTES_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', BYTE),
('Flags1', BYTE),
('Flags2', BYTE),
('BaseHi', BYTE),
]
class _LDT_ENTRY_BITS_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', DWORD, 8),
('Type', DWORD, 5),
('Dpl', DWORD, 2),
('Pres', DWORD, 1),
('LimitHi', DWORD, 4),
('Sys', DWORD, 1),
('Reserved_0', DWORD, 1),
('Default_Big', DWORD, 1),
('Granularity', DWORD, 1),
('BaseHi', DWORD, 8),
]
class _LDT_ENTRY_HIGHWORD_(Union):
_pack_ = 1
_fields_ = [
('Bytes', _LDT_ENTRY_BYTES_),
('Bits', _LDT_ENTRY_BITS_),
]
class LDT_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('LimitLow', WORD),
('BaseLow', WORD),
('HighWord', _LDT_ENTRY_HIGHWORD_),
]
PLDT_ENTRY = POINTER(LDT_ENTRY)
LPLDT_ENTRY = PLDT_ENTRY
###############################################################################
# BOOL WINAPI GetThreadSelectorEntry(
# __in HANDLE hThread,
# __in DWORD dwSelector,
# __out LPLDT_ENTRY lpSelectorEntry
# );
def GetThreadSelectorEntry(hThread, dwSelector):
_GetThreadSelectorEntry = windll.kernel32.GetThreadSelectorEntry
_GetThreadSelectorEntry.argtypes = [HANDLE, DWORD, LPLDT_ENTRY]
_GetThreadSelectorEntry.restype = bool
_GetThreadSelectorEntry.errcheck = RaiseIfZero
ldt = LDT_ENTRY()
_GetThreadSelectorEntry(hThread, dwSelector, byref(ldt))
return ldt
# BOOL WINAPI GetThreadContext(
# __in HANDLE hThread,
# __inout LPCONTEXT lpContext
# );
def GetThreadContext(hThread, ContextFlags = None, raw = False):
_GetThreadContext = windll.kernel32.GetThreadContext
_GetThreadContext.argtypes = [HANDLE, LPCONTEXT]
_GetThreadContext.restype = bool
_GetThreadContext.errcheck = RaiseIfZero
if ContextFlags is None:
ContextFlags = CONTEXT_ALL | CONTEXT_i386
Context = CONTEXT()
Context.ContextFlags = ContextFlags
_GetThreadContext(hThread, byref(Context))
if raw:
return Context
return Context.to_dict()
# BOOL WINAPI SetThreadContext(
# __in HANDLE hThread,
# __in const CONTEXT* lpContext
# );
def SetThreadContext(hThread, lpContext):
_SetThreadContext = windll.kernel32.SetThreadContext
_SetThreadContext.argtypes = [HANDLE, LPCONTEXT]
_SetThreadContext.restype = bool
_SetThreadContext.errcheck = RaiseIfZero
if isinstance(lpContext, dict):
lpContext = CONTEXT.from_dict(lpContext)
_SetThreadContext(hThread, byref(lpContext))
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| apache-2.0 |
ctrlaltdylan/CouchPotato | app/lib/provider/yarr/sources/x264.py | 8 | 2981 | from app.config.cplog import CPLog
from app.lib.provider.yarr.base import torrentBase
from imdb.parser.http.bsouplxml._bsoup import SoupStrainer, BeautifulSoup
from urllib import quote_plus
from urllib2 import URLError
import time
import urllib
import urllib2
log = CPLog(__name__)
class x264(torrentBase):
"""Provider for #alt.binaries.hdtv.x264 @ EFnet"""
name = 'x264'
searchUrl = 'http://85.214.105.230/x264/requests.php?release=%s&status=FILLED&age=1300&sort=ID'
downloadUrl = 'http://85.214.105.230/get_nzb.php?id=%s§ion=hd'
def __init__(self, config):
log.info('Using #alt.binaries.hdtv.x264@EFnet provider')
self.config = config
def conf(self, option):
return self.config.get('x264', option)
def enabled(self):
return self.conf('enabled') and self.config.get('NZB', 'enabled')
def find(self, movie, quality, type):
results = []
if not self.enabled() or not self.isAvailable(self.searchUrl):
return results
url = self.searchUrl % quote_plus(self.toSearchString(movie.name + ' ' + quality))
log.info('Searching: %s' % url)
try:
data = urllib2.urlopen(url, timeout = self.timeout).read()
except (IOError, URLError):
log.error('Failed to open %s.' % url)
return results
try:
tables = SoupStrainer('table')
html = BeautifulSoup(data, parseOnlyThese = tables)
resultTable = html.find('table', attrs = {'class':'requests'})
for result in resultTable.findAll('tr', attrs = {'class':'req_filled'}):
new = self.feedItem()
id = result.find('td', attrs = {'class':'reqid'})
new.id = id.contents[0]
name = result.find('td', attrs = {'class':'release'})
new.name = self.toSaveString(name.contents[0])
new.size = 9999
new.content = 'x264'
new.type = 'nzb'
new.url = self.downloadUrl % (new.id)
new.date = time.time()
new.score = self.calcScore(new, movie)
if self.isCorrectMovie(new, movie, type):
results.append(new)
log.info('Found: %s' % new.name)
return results
except AttributeError:
log.debug('No search results found.')
return results
def makeIgnoreString(self, type):
return ''
def getInfo(self, url):
log.debug('Getting info: %s' % url)
try:
data = urllib2.urlopen(url, timeout = self.timeout).read()
pass
except IOError:
log.error('Failed to open %s.' % url)
return ''
tables = SoupStrainer('table')
html = BeautifulSoup(data)
movieInformation = html.find('div', attrs = {'class':'i_info'})
return str(movieInformation).decode("utf-8", "replace")
| gpl-3.0 |
repotvsupertuga/tvsupertuga.repository | script.module.schism.common/lib/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| gpl-2.0 |
DHI-GRAS/processing_workflow | ProcessingWorkflowPlugin.py | 1 | 3497 | """
***************************************************************************
ProcessingWorkflowPlugin.py
-------------------------------------
Copyright (C) 2014 TIGER-NET (www.tiger-net.org)
***************************************************************************
* This plugin is part of the Water Observation Information System (WOIS) *
* developed under the TIGER-NET project funded by the European Space *
* Agency as part of the long-term TIGER initiative aiming at promoting *
* the use of Earth Observation (EO) for improved Integrated Water *
* Resources Management (IWRM) in Africa. *
* *
* WOIS is a free software i.e. you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* WOIS is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************
"""
from builtins import object
import os
import sys
import inspect
from qgis.core import QgsApplication
from processing_workflow.WorkflowProvider import WorkflowProvider
from processing_workflow.WorkflowAlgListListener import WorkflowAlgListListener
from processing_workflow.WorkflowOnlyAlgorithmProvider import WorkflowOnlyAlgorithmProvider
cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
class ProcessingWorkflowPlugin(object):
def __init__(self, iface):
self.workflowOnlyAlgorithmProvider = WorkflowOnlyAlgorithmProvider()
self.provider = WorkflowProvider()
# Save reference to the QGIS interface
self.iface = iface
def initGui(self):
self.algListener = WorkflowAlgListListener(self.provider,
self.workflowOnlyAlgorithmProvider)
QgsApplication.processingRegistry().providerAdded.connect(
self.algListener.algsListHasChanged)
QgsApplication.processingRegistry().providerRemoved.connect(
self.algListener.algsListHasChanged)
QgsApplication.processingRegistry().addProvider(self.workflowOnlyAlgorithmProvider)
QgsApplication.processingRegistry().addProvider(self.provider)
def unload(self):
QgsApplication.processingRegistry().providerAdded.disconnect(
self.algListener.algsListHasChanged)
QgsApplication.processingRegistry().providerRemoved.disconnect(
self.algListener.algsListHasChanged)
QgsApplication.processingRegistry().removeProvider(self.provider)
QgsApplication.processingRegistry().removeProvider(self.workflowOnlyAlgorithmProvider)
| gpl-3.0 |
bokeh/bokeh | bokeh/util/__init__.py | 1 | 2529 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a collection of general utilities useful for implementing Bokeh
functionality.
.. _bokeh.util.browser:
``bokeh.util.browser``
----------------------
.. automodule:: bokeh.util.browser
:members:
.. _bokeh.util.callback_manager:
``bokeh.util.callback_manager``
-------------------------------
.. automodule:: bokeh.util.callback_manager
:members:
.. _bokeh.util.compiler:
``bokeh.util.compiler``
-----------------------
.. automodule:: bokeh.util.compiler
:members:
.. _bokeh.util.dependencies:
``bokeh.util.dependencies``
---------------------------
.. automodule:: bokeh.util.dependencies
:members:
.. _bokeh.util.deprecation:
``bokeh.util.deprecation``
--------------------------
.. automodule:: bokeh.util.deprecation
.. _bokeh.util.functions:
``bokeh.util.functions``
------------------------
.. automodule:: bokeh.util.functions
:members:
``bokeh.util.hex``
------------------
.. automodule:: bokeh.util.hex
:members:
.. _bokeh.util.logconfig:
``bokeh.util.logconfig``
------------------------
.. automodule:: bokeh.util.logconfig
:members:
.. _bokeh.util.options:
``bokeh.util.options``
----------------------
.. automodule:: bokeh.util.options
:members:
.. _bokeh.util.paths:
``bokeh.util.paths``
--------------------
.. automodule:: bokeh.util.paths
:members:
.. _bokeh.util.serialization:
``bokeh.util.serialization``
----------------------------
.. automodule:: bokeh.util.serialization
:members:
.. _bokeh.util.token:
``bokeh.util.token``
-------------------------
.. automodule:: bokeh.util.token
:members:
.. _bokeh.util.string:
``bokeh.util.string``
---------------------
.. automodule:: bokeh.util.string
:members:
.. _bokeh.util.tornado:
``bokeh.util.tornado``
----------------------
.. automodule:: bokeh.util.tornado
:members:
.. _bokeh.util.terminal:
``bokeh.util.terminal``
-----------------------
.. automodule:: bokeh.util.terminal
:members:
.. _bokeh.util.version:
``bokeh.util.version``
----------------------
.. automodule:: bokeh.util.version
:members:
.. _bokeh.util.warnings:
``bokeh.util.warnings``
-----------------------
.. automodule:: bokeh.util.warnings
:members:
'''
| bsd-3-clause |
AloneRoad/Inforlearn | vendor/django/contrib/gis/utils/layermapping.py | 1 | 29869 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
This grew out of my personal needs, specifically the code repetition
that went into pulling geometries and fields out of an OGR layer,
converting to another coordinate system (e.g. WGS84), and then inserting
into a GeoDjango model.
Please report any bugs encountered using this utility.
Requirements: OGR C Library (from GDAL) required.
Usage:
lm = LayerMapping(model, source_file, mapping) where,
model:
GeoDjango model (not an instance)
data:
OGR-supported data source file (e.g. a shapefile) or
gdal.DataSource instance
mapping:
A python dictionary, keys are strings corresponding
to the GeoDjango model field, and values correspond to
string field names for the OGR feature, or if the model field
is a geographic then it should correspond to the OGR
geometry type, e.g. 'POINT', 'LINESTRING', 'POLYGON'.
Keyword Args:
layer:
The index of the layer to use from the Data Source (defaults to 0)
source_srs:
Use this to specify the source SRS manually (for example,
some shapefiles don't come with a '.prj' file). An integer SRID,
a string WKT, and SpatialReference objects are valid parameters.
encoding:
Specifies the encoding of the string in the OGR data source.
For example, 'latin-1', 'utf-8', and 'cp437' are all valid
encoding parameters.
transaction_mode:
May be 'commit_on_success' (default) or 'autocommit'.
transform:
Setting this to False will disable all coordinate transformations.
unique:
Setting this to the name, or a tuple of names, from the given
model will create models unique only to the given name(s).
Geometries will from each feature will be added into the collection
associated with the unique model. Forces transaction mode to
be 'autocommit'.
Example:
1. You need a GDAL-supported data source, like a shapefile.
Assume we're using the test_poly SHP file:
>>> from django.contrib.gis.gdal import DataSource
>>> ds = DataSource('test_poly.shp')
>>> layer = ds[0]
>>> print layer.fields # Exploring the fields in the layer, we only want the 'str' field.
['float', 'int', 'str']
>>> print len(layer) # getting the number of features in the layer (should be 3)
3
>>> print layer.geom_type # Should be 3 (a Polygon)
3
>>> print layer.srs # WGS84
GEOGCS["GCS_WGS_1984",
DATUM["WGS_1984",
SPHEROID["WGS_1984",6378137,298.257223563]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]]
2. Now we define our corresponding Django model (make sure to use syncdb):
from django.contrib.gis.db import models
class TestGeo(models.Model, models.GeoMixin):
name = models.CharField(maxlength=25) # corresponds to the 'str' field
poly = models.PolygonField(srid=4269) # we want our model in a different SRID
objects = models.GeoManager()
def __str__(self):
return 'Name: %s' % self.name
3. Use LayerMapping to extract all the features and place them in the database:
>>> from django.contrib.gis.utils import LayerMapping
>>> from geoapp.models import TestGeo
>>> mapping = {'name' : 'str', # The 'name' model field maps to the 'str' layer field.
'poly' : 'POLYGON', # For geometry fields use OGC name.
} # The mapping is a dictionary
>>> lm = LayerMapping(TestGeo, 'test_poly.shp', mapping)
>>> lm.save(verbose=True) # Save the layermap, imports the data.
Saved: Name: 1
Saved: Name: 2
Saved: Name: 3
LayerMapping just transformed the three geometries from the SHP file from their
source spatial reference system (WGS84) to the spatial reference system of
the GeoDjango model (NAD83). If no spatial reference system is defined for
the layer, use the `source_srs` keyword with a SpatialReference object to
specify one.
"""
import sys
from datetime import date, datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.gdal import CoordTransform, DataSource, \
OGRException, OGRGeometry, OGRGeomType, SpatialReference
from django.contrib.gis.gdal.field import \
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
from django.contrib.gis.models import GeometryColumns, SpatialRefSys
from django.db import models, transaction
from django.contrib.localflavor.us.models import USStateField
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
USStateField : OFTString,
models.XMLField : OFTString,
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
# Setting the mapping
self.mapping = mapping
# Setting the model, and getting the geometry column associated
# with the model (an exception will be raised if there is no
# geometry column).
self.model = model
self.geo_col = self.geometry_column()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
if transaction_mode in self.TRANSACTION_MODES:
self.transaction_decorator = self.TRANSACTION_MODES[transaction_mode]
self.transaction_mode = transaction_mode
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
try:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (gtype == ltype or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s, feature has %s.' % (fld_name, gtype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry.
self.geom_field = field_name
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_field = rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if not model_field.__class__ in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, SpatialRefSys):
sr = source_srs.srs
elif isinstance(source_srs, (int, basestring)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if not attr in self.mapping: raise ValueError
elif isinstance(unique, basestring):
# Only a single field passed in.
if unique not in self.mapping: raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
#### Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
val = self.verify_geom(feat.geom, model_field)
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, basestring):
return {self.unique : kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = unicode(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform: g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.get(srid=self.geo_col.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception, msg:
raise LayerMapError('Could not translate between the data source and model geometry: %s' % msg)
def geometry_column(self):
"Returns the GeometryColumn model associated with the geographic column."
# Getting the GeometryColumn object.
try:
db_table = self.model._meta.db_table
if SpatialBackend.name == 'oracle': db_table = db_table.upper()
gc_kwargs = {GeometryColumns.table_name_col() : db_table}
return GeometryColumns.objects.get(**gc_kwargs)
except Exception, msg:
raise LayerMapError('Geometry column does not exist for model. (did you run syncdb?):\n %s' % msg)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
@self.transaction_decorator
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save()
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i+1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| apache-2.0 |
christophlsa/odoo | addons/sales_team/res_config.py | 366 | 1922 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class sales_team_configuration(osv.TransientModel):
_name = 'sale.config.settings'
_inherit = ['sale.config.settings']
def set_group_multi_salesteams(self, cr, uid, ids, context=None):
""" This method is automatically called by res_config as it begins
with set. It is used to implement the 'one group or another'
behavior. We have to perform some group manipulation by hand
because in res_config.execute(), set_* methods are called
after group_*; therefore writing on an hidden res_config file
could not work.
If group_multi_salesteams is checked: remove group_mono_salesteams
from group_user, remove the users. Otherwise, just add
group_mono_salesteams in group_user.
The inverse logic about group_multi_salesteams is managed by the
normal behavior of 'group_multi_salesteams' field.
"""
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return self.pool['ir.model.data'].get_object(cr, uid, mod, xml, context)
for obj in self.browse(cr, uid, ids, context=context):
config_group = ref('base.group_mono_salesteams')
base_group = ref('base.group_user')
if obj.group_multi_salesteams:
base_group.write({'implied_ids': [(3, config_group.id)]})
config_group.write({'users': [(3, u.id) for u in base_group.users]})
else:
base_group.write({'implied_ids': [(4, config_group.id)]})
return True
_columns = {
'group_multi_salesteams': fields.boolean("Organize Sales activities into multiple Sales Teams",
implied_group='base.group_multi_salesteams',
help="""Allows you to use Sales Teams to manage your leads and opportunities."""),
}
| agpl-3.0 |
krintoxi/NoobSec-Toolkit | NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/chardet/langbulgarianmodel.py | 235 | 12820 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = { \
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = { \
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1251"
}
| gpl-2.0 |
kartoza/geonode | geonode/contrib/metadataxsl/tests.py | 8 | 1090 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test import TestCase
class MetadataXSLTest(TestCase):
"""
Tests geonode.contrib.metadataxsl app/module
"""
def setUp(self):
self.adm_un = "admin"
self.adm_pw = "admin"
# create_models(type="layer")
| gpl-3.0 |
jhawkesworth/ansible-modules-core | cloud/amazon/ec2_snapshot.py | 53 | 9982 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg = 'snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id = dict(),
description = dict(),
instance_id = dict(),
snapshot_id = dict(),
device_name = dict(),
wait = dict(type='bool', default=True),
wait_timeout = dict(type='int', default=0),
last_snapshot_min_age = dict(type='int', default=0),
snapshot_tags = dict(type='dict', default=dict()),
state = dict(choices=['absent','present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Taranys/Sick-Beard | lib/hachoir_core/error.py | 90 | 1350 | """
Functions to display an error (error, warning or information) message.
"""
from lib.hachoir_core.log import log
from lib.hachoir_core.tools import makePrintable
import sys, traceback
def getBacktrace(empty="Empty backtrace."):
"""
Try to get backtrace as string.
Returns "Error while trying to get backtrace" on failure.
"""
try:
info = sys.exc_info()
trace = traceback.format_exception(*info)
sys.exc_clear()
if trace[0] != "None\n":
return "".join(trace)
except:
# No i18n here (imagine if i18n function calls error...)
return "Error while trying to get backtrace"
return empty
class HachoirError(Exception):
"""
Parent of all errors in Hachoir library
"""
def __init__(self, message):
message_bytes = makePrintable(message, "ASCII")
Exception.__init__(self, message_bytes)
self.text = message
def __unicode__(self):
return self.text
# Error classes which may be raised by Hachoir core
# FIXME: Add EnvironmentError (IOError or OSError) and AssertionError?
# FIXME: Remove ArithmeticError and RuntimeError?
HACHOIR_ERRORS = (HachoirError, LookupError, NameError, AttributeError,
TypeError, ValueError, ArithmeticError, RuntimeError)
info = log.info
warning = log.warning
error = log.error
| gpl-3.0 |
LethusTI/supportcenter | vendor/django/django/contrib/sitemaps/tests/https.py | 77 | 2364 | from datetime import date
from django.test.utils import override_settings
from .base import SitemapTestsBase
class HTTPSSitemapTests(SitemapTestsBase):
protocol = 'https'
urls = 'django.contrib.sitemaps.tests.urls.https'
def test_secure_sitemap_index(self):
"A secure sitemap index can be rendered"
response = self.client.get('/secure/index.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/secure/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_secure_sitemap_section(self):
"A secure sitemap section can be rendered"
response = self.client.get('/secure/sitemap-simple.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today()))
#@override_settings(SECURE_PROXY_SSL_HEADER=False)
class HTTPSDetectionSitemapTests(SitemapTestsBase):
extra = {'wsgi.url_scheme': 'https'}
def test_sitemap_index_with_https_request(self):
"A sitemap index requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/index.xml', **self.extra)
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url.replace('http://', 'https://'))
def test_sitemap_section_with_https_request(self):
"A sitemap section requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/sitemap-simple.xml', **self.extra)
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url.replace('http://', 'https://'), date.today()))
HTTPSDetectionSitemapTests = override_settings(SECURE_PROXY_SSL_HEADER=False)(HTTPSDetectionSitemapTests)
| gpl-3.0 |
blrm/openshift-tools | ansible/roles/lib_zabbix/library/zbx_triggerprototype.py | 13 | 5976 | #!/usr/bin/env python
'''
ansible module for zabbix triggerprototypes
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix triggerprototypes ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_priority(priority):
''' determine priority
'''
prior = 0
if 'info' in priority:
prior = 1
elif 'warn' in priority:
prior = 2
elif 'avg' == priority or 'ave' in priority:
prior = 3
elif 'high' in priority:
prior = 4
elif 'dis' in priority:
prior = 5
return prior
def get_trigger_status(inc_status):
''' Determine the trigger's status
0 is enabled
1 is disabled
'''
r_status = 0
if inc_status == 'disabled':
r_status = 1
return r_status
def main():
'''
Create a triggerprototype in zabbix
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
expression=dict(default=None, type='str'),
description=dict(default=None, type='str'),
priority=dict(default='avg', type='str'),
url=dict(default=None, type='str'),
status=dict(default=None, type='str'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'triggerprototype'
idname = "triggerid"
state = module.params['state']
tname = module.params['name']
content = zapi.get_content(zbx_class_name,
'get',
{'filter': {'description': tname},
'expandExpression': True,
'selectDependencies': 'triggerid',
})
# Get
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
# Delete
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
params = {'description': tname,
'comments': module.params['description'],
'expression': module.params['expression'],
'priority': get_priority(module.params['priority']),
'url': module.params['url'],
'status': get_trigger_status(module.params['status']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
#******#
# CREATE
#******#
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.fail_json(msg=content['error'])
module.exit_json(changed=True, results=content['result'], state='present')
########
# UPDATE
########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.fail_json(msg=content['error'])
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| apache-2.0 |
sergiocorato/partner-contact | partner_multi_relation/tests/test_partner_relation.py | 5 | 10771 | # -*- coding: utf-8 -*-
# Copyright 2016 Therp BV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from datetime import date
from dateutil.relativedelta import relativedelta
from openerp import fields
from openerp.exceptions import ValidationError
from .test_partner_relation_common import TestPartnerRelationCommon
class TestPartnerRelation(TestPartnerRelationCommon):
def test_selection_name_search(self):
"""Test wether we can find type selection on reverse name."""
selection_types = self.selection_model.name_search(
name=self.selection_person2company.name
)
self.assertTrue(selection_types)
self.assertTrue(
(self.selection_person2company.id,
self.selection_person2company.name) in selection_types
)
def test_self_allowed(self):
"""Test creation of relation to same partner when type allows."""
type_allow = self.type_model.create({
'name': 'allow',
'name_inverse': 'allow_inverse',
'contact_type_left': 'p',
'contact_type_right': 'p',
'allow_self': True
})
self.assertTrue(type_allow)
reflexive_relation = self.relation_model.create({
'type_id': type_allow.id,
'left_partner_id': self.partner_01_person.id,
'right_partner_id': self.partner_01_person.id,
})
self.assertTrue(reflexive_relation)
def test_self_disallowed(self):
"""Test creating relation to same partner when disallowed.
Attempt to create a relation of a partner to the same partner should
raise an error when the type of relation explicitly disallows this.
"""
type_disallow = self.type_model.create({
'name': 'disallow',
'name_inverse': 'disallow_inverse',
'contact_type_left': 'p',
'contact_type_right': 'p',
'allow_self': False
})
self.assertTrue(type_disallow)
with self.assertRaises(ValidationError):
self.relation_model.create({
'type_id': type_disallow.id,
'left_partner_id': self.partner_01_person.id,
'right_partner_id': self.partner_01_person.id,
})
def test_self_default(self):
"""Test default not to allow relation with same partner.
Attempt to create a relation of a partner to the same partner
raise an error when the type of relation does not explicitly allow
this.
"""
type_default = self.type_model.create({
'name': 'default',
'name_inverse': 'default_inverse',
'contact_type_left': 'p',
'contact_type_right': 'p',
})
self.assertTrue(type_default)
with self.assertRaises(ValidationError):
self.relation_model.create({
'type_id': type_default.id,
'left_partner_id': self.partner_01_person.id,
'right_partner_id': self.partner_01_person.id,
})
def test_self_mixed(self):
"""Test creation of relation with wrong types.
Trying to create a relation between partners with an inappropiate
type should raise an error.
"""
with self.assertRaises(ValidationError):
self.relation_model.create({
'type_id': self.type_company2person.id,
'left_partner_id': self.partner_01_person.id,
'right_partner_id': self.partner_02_company.id,
})
def test_symmetric(self):
"""Test creating symmetric relation."""
# Start out with non symmetric relation:
type_symmetric = self.type_model.create({
'name': 'not yet symmetric',
'name_inverse': 'the other side of not symmetric',
'is_symmetric': False,
'contact_type_left': False,
'contact_type_right': 'p',
})
# not yet symmetric relation should result in two records in
# selection:
selection_symmetric = self.selection_model.search([
('type_id', '=', type_symmetric.id),
])
self.assertEqual(len(selection_symmetric), 2)
# Now change to symmetric and test name and inverse name:
with self.env.do_in_draft():
type_symmetric.write(
vals={
'name': 'sym',
'is_symmetric': True,
}
)
with self.env.do_in_onchange():
type_symmetric.onchange_is_symmetric()
self.assertEqual(type_symmetric.is_symmetric, True)
self.assertEqual(
type_symmetric.name_inverse,
type_symmetric.name
)
self.assertEqual(
type_symmetric.contact_type_right,
type_symmetric.contact_type_left
)
# now update the database:
type_symmetric.write(
vals={
'name': type_symmetric.name,
'is_symmetric': type_symmetric.is_symmetric,
'name_inverse': type_symmetric.name_inverse,
'contact_type_right': type_symmetric.contact_type_right,
}
)
# symmetric relation should result in only one record in
# selection:
selection_symmetric = self.selection_model.search([
('type_id', '=', type_symmetric.id),
])
self.assertEqual(len(selection_symmetric), 1)
relation = self.relation_all_model.create({
'type_selection_id': selection_symmetric.id,
'this_partner_id': self.partner_02_company.id,
'other_partner_id': self.partner_01_person.id,
})
partners = self.partner_model.search([
('search_relation_type_id', '=', relation.type_selection_id.id)
])
self.assertTrue(self.partner_01_person in partners)
self.assertTrue(self.partner_02_company in partners)
def test_category_domain(self):
"""Test check on category in relations."""
# Check on left side:
with self.assertRaises(ValidationError):
self.relation_model.create({
'type_id': self.type_ngo2volunteer.id,
'left_partner_id': self.partner_02_company.id,
'right_partner_id': self.partner_04_volunteer.id,
})
# Check on right side:
with self.assertRaises(ValidationError):
self.relation_model.create({
'type_id': self.type_ngo2volunteer.id,
'left_partner_id': self.partner_03_ngo.id,
'right_partner_id': self.partner_01_person.id,
})
def test_relation_type_change(self):
"""Test change in relation type conditions."""
# First create a relation type having no particular conditions.
(type_school2student,
school2student,
school2student_inverse) = (
self._create_relation_type_selection({
'name': 'school has student',
'name_inverse': 'studies at school',
})
)
# Second create relations based on those conditions.
partner_school = self.partner_model.create({
'name': 'Test School',
'is_company': True,
'ref': 'TS',
})
partner_bart = self.partner_model.create({
'name': 'Bart Simpson',
'is_company': False,
'ref': 'BS',
})
partner_lisa = self.partner_model.create({
'name': 'Lisa Simpson',
'is_company': False,
'ref': 'LS',
})
relation_school2bart = self.relation_all_model.create({
'this_partner_id': partner_school.id,
'type_selection_id': school2student.id,
'other_partner_id': partner_bart.id,
})
self.assertTrue(relation_school2bart)
relation_school2lisa = self.relation_all_model.create({
'this_partner_id': partner_school.id,
'type_selection_id': school2student.id,
'other_partner_id': partner_lisa.id,
})
self.assertTrue(relation_school2lisa)
relation_bart2lisa = self.relation_all_model.create({
'this_partner_id': partner_bart.id,
'type_selection_id': school2student.id,
'other_partner_id': partner_lisa.id,
})
self.assertTrue(relation_bart2lisa)
# Third creata a category and make it a condition for the
# relation type.
# - Test restriction
# - Test ignore
category_student = self.category_model.create({
'name': 'Student',
})
with self.assertRaises(ValidationError):
type_school2student.write({
'partner_category_right': category_student.id,
})
self.assertFalse(type_school2student.partner_category_right.id)
type_school2student.write({
'handle_invalid_onchange': 'ignore',
'partner_category_right': category_student.id,
})
self.assertEqual(
type_school2student.partner_category_right.id,
category_student.id
)
# Fourth make company type a condition for left partner
# - Test ending
# - Test deletion
partner_bart.write({
'category_id': [(4, category_student.id)],
})
partner_lisa.write({
'category_id': [(4, category_student.id)],
})
# Future student to be deleted by end action:
partner_homer = self.partner_model.create({
'name': 'Homer Simpson',
'is_company': False,
'ref': 'HS',
'category_id': [(4, category_student.id)],
})
relation_lisa2homer = self.relation_all_model.create({
'this_partner_id': partner_lisa.id,
'type_selection_id': school2student.id,
'other_partner_id': partner_homer.id,
'date_start': fields.Date.to_string(
date.today() + relativedelta(months=+6)
),
})
self.assertTrue(relation_lisa2homer)
type_school2student.write({
'handle_invalid_onchange': 'end',
'contact_type_left': 'c',
})
self.assertEqual(
relation_bart2lisa.date_end,
fields.Date.today()
)
self.assertFalse(relation_lisa2homer.exists())
type_school2student.write({
'handle_invalid_onchange': 'delete',
'contact_type_left': 'c',
'contact_type_right': 'p',
})
self.assertFalse(relation_bart2lisa.exists())
| agpl-3.0 |
bright-sparks/chromium-spacewalk | tools/perf/measurements/thread_times.py | 32 | 2068 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import timeline_controller
from metrics import timeline
from telemetry.core.platform import tracing_category_filter
from telemetry.page import page_test
class ThreadTimes(page_test.PageTest):
def __init__(self):
super(ThreadTimes, self).__init__('RunSmoothness')
self._timeline_controller = None
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--report-silk-results', action='store_true',
help='Report results relevant to silk.')
parser.add_option('--report-silk-details', action='store_true',
help='Report details relevant to silk.')
def WillNavigateToPage(self, page, tab):
self._timeline_controller = timeline_controller.TimelineController()
if self.options.report_silk_details:
# We need the other traces in order to have any details to report.
self.timeline_controller.trace_categories = None
else:
self._timeline_controller.trace_categories = \
tracing_category_filter.CreateNoOverheadFilter().filter_string
self._timeline_controller.SetUp(page, tab)
def WillRunActions(self, page, tab):
self._timeline_controller.Start(tab)
def DidRunActions(self, page, tab):
self._timeline_controller.Stop(tab)
def ValidateAndMeasurePage(self, page, tab, results):
metric = timeline.ThreadTimesTimelineMetric()
renderer_thread = \
self._timeline_controller.model.GetRendererThreadFromTabId(tab.id)
if self.options.report_silk_results:
metric.results_to_report = timeline.ReportSilkResults
if self.options.report_silk_details:
metric.details_to_report = timeline.ReportSilkDetails
metric.AddResults(self._timeline_controller.model, renderer_thread,
self._timeline_controller.smooth_records, results)
def CleanUpAfterPage(self, _, tab):
self._timeline_controller.CleanUp(tab)
| bsd-3-clause |
jenalgit/django | tests/view_tests/tests/test_defaults.py | 286 | 5307 | from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import TestCase
from django.test.utils import override_settings
from ..models import Article, Author, UrlArticle
@override_settings(ROOT_URLCONF='view_tests.urls')
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
non_existing_urls = ['/non_existing_url/', # this is in urls.py
'/other_non_existing_url/'] # this NOT in urls.py
@classmethod
def setUpTestData(cls):
User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
Author.objects.create(name='Boris')
Article.objects.create(
title='Old Article', slug='old_article', author_id=1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
Article.objects.create(
title='Current Article', slug='current_article', author_id=1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23)
)
Article.objects.create(
title='Future Article', slug='future_article', author_id=1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23)
)
UrlArticle.objects.create(
title='Old Article', slug='old_article', author_id=1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
Site(id=1, domain='testserver', name='testserver').save()
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'404.html': '{{ csrf_token }}',
}),
],
},
}])
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertNotEqual(response.content, 'NOTPROVIDED')
self.assertNotEqual(response.content, '')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/server_error/')
self.assertEqual(response.status_code, 500)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'404.html': 'This is a test template for a 404 error '
'(path: {{ request_path }}, exception: {{ exception }}).',
'500.html': 'This is a test template for a 500 error.',
}),
],
},
}])
def test_custom_templates(self):
"""
Test that 404.html and 500.html templates are picked by their respective
handler.
"""
response = self.client.get('/server_error/')
self.assertContains(response, "test template for a 500 error", status_code=500)
response = self.client.get('/no_such_url/')
self.assertContains(response, 'path: /no_such_url/', status_code=404)
self.assertContains(response, 'exception: Resolver404', status_code=404)
response = self.client.get('/technical404/')
self.assertContains(response, 'exception: Testing technical 404.', status_code=404)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
@override_settings(DEFAULT_CONTENT_TYPE="text/xml")
def test_default_content_type_is_text_html(self):
"""
Content-Type of the default error responses is text/html. Refs #20822.
"""
response = self.client.get('/raises400/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/raises403/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/non_existing_url/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/server_error/')
self.assertEqual(response['Content-Type'], 'text/html')
| bsd-3-clause |
thrasher-/litecoin | test/functional/interface_zmq.py | 6 | 4367 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import struct
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
hash256,
)
from io import BytesIO
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_bitcoind_zmq()
self.skip_if_no_wallet()
def setup_nodes(self):
# Import keys
self.add_nodes(self.num_nodes)
self.start_nodes()
super().import_deterministic_coinbase_privkeys()
self.stop_nodes()
import zmq
# Initialize ZMQ context and socket.
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
address = "tcp://127.0.0.1:28332"
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
socket.connect(address)
# Subscribe to all available topics.
self.hashblock = ZMQSubscriber(socket, b"hashblock")
self.hashtx = ZMQSubscriber(socket, b"hashtx")
self.rawblock = ZMQSubscriber(socket, b"rawblock")
self.rawtx = ZMQSubscriber(socket, b"rawtx")
self.nodes[0].extra_args = ["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [self.hashblock, self.hashtx, self.rawblock, self.rawtx]]
self.start_nodes()
def import_deterministic_coinbase_privkeys(self):
pass
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def _zmq_test(self):
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generate(num_blocks)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = self.hashtx.receive()
# Should receive the coinbase raw transaction.
hex = self.rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, bytes_to_hex_str(txid))
# Should receive the generated block hash.
hash = bytes_to_hex_str(self.hashblock.receive())
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = self.rawblock.receive()
assert_equal(genhashes[x], bytes_to_hex_str(hash256(block[:80])))
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = self.hashtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(txid))
# Should receive the broadcasted raw transaction.
hex = self.rawtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(hash256(hex)))
if __name__ == '__main__':
ZMQTest().main()
| mit |
Jay-Jay-D/LeanSTP | Algorithm.Python/EmaCrossUniverseSelectionAlgorithm.py | 3 | 4365 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Data import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
from System.Collections.Generic import List
### <summary>
### In this algorithm we demonstrate how to perform some technical analysis as
### part of your coarse fundamental universe selection
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="indicators" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
class EmaCrossUniverseSelectionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2010,1,1) #Set Start Date
self.SetEndDate(2015,1,1) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.UniverseSettings.Resolution = Resolution.Daily
self.UniverseSettings.Leverage = 2
self.coarse_count = 10
self.averages = { };
# this add universe method accepts two parameters:
# - coarse selection function: accepts an IEnumerable<CoarseFundamental> and returns an IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction)
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
# We are going to use a dictionary to refer the object that will keep the moving averages
for cf in coarse:
if cf.Symbol not in self.averages:
self.averages[cf.Symbol] = SymbolData(cf.Symbol)
# Updates the SymbolData object with current EOD price
avg = self.averages[cf.Symbol]
avg.update(cf.EndTime, cf.AdjustedPrice)
# Filter the values of the dict: we only want up-trending securities
values = list(filter(lambda x: x.is_uptrend, self.averages.values()))
# Sorts the values of the dict: we want those with greater difference between the moving averages
values.sort(key=lambda x: x.scale, reverse=True)
for x in values[:self.coarse_count]:
self.Log('symbol: ' + str(x.symbol.Value) + ' scale: ' + str(x.scale))
# we need to return only the symbol objects
return [ x.symbol for x in values[:self.coarse_count] ]
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
# liquidate removed securities
for security in changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
# we want 20% allocation in each security in our universe
for security in changes.AddedSecurities:
self.SetHoldings(security.Symbol, 0.1)
class SymbolData(object):
def __init__(self, symbol):
self.symbol = symbol
self.tolerance = 1.01
self.fast = ExponentialMovingAverage(100)
self.slow = ExponentialMovingAverage(300)
self.is_uptrend = False
self.scale = 0
def update(self, time, value):
if self.fast.Update(time, value) and self.slow.Update(time, value):
fast = self.fast.Current.Value
slow = self.slow.Current.Value
self.is_uptrend = fast > slow * self.tolerance
if self.is_uptrend:
self.scale = (fast - slow) / ((fast + slow) / 2.0) | apache-2.0 |
django-leonardo/horizon | tools/install_venv_common.py | 166 | 5958 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
| apache-2.0 |
jaechankim/namebench | nb_third_party/dns/tsig.py | 215 | 7851 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import hmac
import struct
import dns.exception
import dns.rdataclass
import dns.name
class BadTime(dns.exception.DNSException):
"""Raised if the current time is not within the TSIG's validity time."""
pass
class BadSignature(dns.exception.DNSException):
"""Raised if the TSIG signature fails to verify."""
pass
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
pass
class PeerBadKey(PeerError):
"""Raised if the peer didn't know the key we used"""
pass
class PeerBadSignature(PeerError):
"""Raised if the peer didn't like the signature we sent"""
pass
class PeerBadTime(PeerError):
"""Raised if the peer didn't like the time we sent"""
pass
class PeerBadTruncation(PeerError):
"""Raised if the peer didn't like amount of truncation in the TSIG we sent"""
pass
default_algorithm = "HMAC-MD5.SIG-ALG.REG.INT"
BADSIG = 16
BADKEY = 17
BADTIME = 18
BADTRUNC = 22
def sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, string, hmac.HMAC object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
(algorithm_name, digestmod) = get_algorithm(algorithm)
if first:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(request_mac)
if ml > 0:
ctx.update(struct.pack('!H', ml))
ctx.update(request_mac)
id = struct.pack('!H', original_id)
ctx.update(id)
ctx.update(wire[2:])
if first:
ctx.update(keyname.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
long_time = time + 0L
upper_time = (long_time >> 32) & 0xffffL
lower_time = long_time & 0xffffffffL
time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
pre_mac = algorithm_name + time_mac
ol = len(other_data)
if ol > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
post_mac = struct.pack('!HH', error, ol) + other_data
if first:
ctx.update(pre_mac)
ctx.update(post_mac)
else:
ctx.update(time_mac)
mac = ctx.digest()
mpack = struct.pack('!H', len(mac))
tsig_rdata = pre_mac + mpack + mac + id + post_mac
if multi:
ctx = hmac.new(secret)
ml = len(mac)
ctx.update(struct.pack('!H', ml))
ctx.update(mac)
else:
ctx = None
return (tsig_rdata, mac, ctx)
def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
return sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx, multi, first, algorithm)
def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
tsig_rdlen, ctx=None, multi=False, first=True):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: hmac.HMAC object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
current = tsig_rdata
(aname, used) = dns.name.from_wire(wire, current)
current = current + used
(upper_time, lower_time, fudge, mac_size) = \
struct.unpack("!HIHH", wire[current:current + 10])
time = ((upper_time + 0L) << 32) + (lower_time + 0L)
current += 10
mac = wire[current:current + mac_size]
current += mac_size
(original_id, error, other_size) = \
struct.unpack("!HHH", wire[current:current + 6])
current += 6
other_data = wire[current:current + other_size]
current += other_size
if current != tsig_rdata + tsig_rdlen:
raise dns.exception.FormError
if error != 0:
if error == BADSIG:
raise PeerBadSignature
elif error == BADKEY:
raise PeerBadKey
elif error == BADTIME:
raise PeerBadTime
elif error == BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % error)
time_low = time - fudge
time_high = time + fudge
if now < time_low or now > time_high:
raise BadTime
(junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
original_id, error, other_data,
request_mac, ctx, multi, first, aname)
if (our_mac != mac):
raise BadSignature
return ctx
def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
hashes = {}
try:
import hashlib
hashes[dns.name.from_text('hmac-sha224')] = hashlib.sha224
hashes[dns.name.from_text('hmac-sha256')] = hashlib.sha256
hashes[dns.name.from_text('hmac-sha384')] = hashlib.sha384
hashes[dns.name.from_text('hmac-sha512')] = hashlib.sha512
hashes[dns.name.from_text('hmac-sha1')] = hashlib.sha1
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = hashlib.md5
import sys
if sys.hexversion < 0x02050000:
# hashlib doesn't conform to PEP 247: API for
# Cryptographic Hash Functions, which hmac before python
# 2.5 requires, so add the necessary items.
class HashlibWrapper:
def __init__(self, basehash):
self.basehash = basehash
self.digest_size = self.basehash().digest_size
def new(self, *args, **kwargs):
return self.basehash(*args, **kwargs)
for name in hashes:
hashes[name] = HashlibWrapper(hashes[name])
except ImportError:
import md5, sha
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = md5.md5
hashes[dns.name.from_text('hmac-sha1')] = sha.sha
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if algorithm in hashes:
return (algorithm.to_digestable(), hashes[algorithm])
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported")
| apache-2.0 |
Ictp/indico | indico/MaKaC/plugins/Collaboration/RecordingManager/micala.py | 1 | 24361 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from MaKaC.plugins.Collaboration.RecordingManager.exceptions import RecordingManagerException
from MaKaC.plugins.Collaboration.collaborationTools import CollaborationTools
from MaKaC.common.logger import Logger
import re
try:
import MySQLdb
except ImportError:
Logger.get("RecMan").debug("Cannot import MySQLdb")
class MicalaCommunication(object):
@classmethod
def getIp(cls):
'''This should return the IP of the current machine.'''
return("")
@classmethod
def getIdMachine(cls, machine_name):
'''Look up ID of this machine in database'''
# Logger.get('RecMan').debug('in getIdMachine(), machine_name = %s' % machine_name)
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
cursor = connection.cursor()
# believe it or not, the comma following machine_name is supposed to be there for MySQLdb's sake
cursor.execute("""SELECT idMachine, hostname FROM machines WHERE hostname = %s""",
(machine_name,))
connection.commit()
result_set = cursor.fetchone()
if result_set is not None and len(result_set) > 0:
idMachine = result_set[0]
else:
idMachine = ''
cursor.close()
connection.close()
return(idMachine)
@classmethod
def getIdTask(cls, task_name):
'''Look up ID of this task in database'''
# Logger.get('RecMan').debug('task_name = [%s]' % task_name)
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
cursor = connection.cursor()
cursor.execute("""SELECT idTask, name FROM tasks WHERE name = %s""",
(task_name,))
connection.commit()
result_set = cursor.fetchone()
if result_set is not None and len(result_set) > 0:
idTask = result_set[0]
else:
idTask = result_set[0]
cursor.close()
connection.close()
return(idTask)
@classmethod
def getIdLecture(cls, lecture_name):
'''Look up internal database ID of the given lecture'''
# Logger.get('RecMan').debug('lecture_name = [%s]' % lecture_name)
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
# Depending on style of lecture ID, search under Michigan style column or CERN style column
cursor = connection.cursor()
cursor.execute("""SELECT idLecture, LOID, IndicoID FROM lectures WHERE LOID = %s OR IndicoID = %s""",
(lecture_name, lecture_name))
connection.commit()
result_set = cursor.fetchone()
if result_set is not None and len(result_set) > 0:
# Logger.get('RecMan').debug("result_set: %s" % str(result_set))
idLecture = result_set[0]
else:
idLecture = ''
cursor.close()
connection.close()
return(idLecture)
@classmethod
def isTaskComplete(cls, idLecture, idTask):
'''Check to see if given task has been completed for the given lecture.'''
flagComplete = False
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
# Check to see if the given task for the given lecture has status 'COMPLETE'.
# If not then an empty set will be returned.
cursor = connection.cursor()
cursor.execute("""SELECT L.LOID
FROM lectures L, lectureLatestStatus LS, status S
WHERE L.idLecture = %s
AND L.idLecture = LS.idLecture
AND LS.idTask = %s
AND LS.idStatus = S.idStatus
AND S.status = 'COMPLETE'""",
(idLecture, idTask))
connection.commit()
result_set = cursor.fetchone()
if result_set is not None and len(result_set) > 0:
flagComplete = True
else:
flagComplete = False
cursor.close()
connection.close()
return(flagComplete)
@classmethod
def createNewMicalaLecture(cls, lecture_name, contentType):
'''insert a record into the micala database for a new lecture'''
# Logger.get('RecMan').debug('createNewMicalaLecture for [%s]' % lecture_name)
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
cursor = connection.cursor()
if contentType == "plain_video":
micalaContentType = "PLAINVIDEO"
# Logger.get('RecMan').debug("""INSERT INTO Lectures (IndicoID, contentType, dateCreated) VALUES(%s, %s, NOW());""" % (lecture_name, micalaContentType))
cursor.execute("""INSERT INTO lectures (IndicoID, contentType, dateCreated) VALUES(%s, %s, NOW());""", (lecture_name, micalaContentType))
elif contentType == "web_lecture":
micalaContentType = "WEBLECTURE"
# Logger.get('RecMan').debug("""INSERT INTO Lectures (LOID, contentType, dateCreated) VALUES(%s, %s, NOW());""" % (lecture_name, micalaContentType))
cursor.execute("""INSERT INTO lectures (LOID, contentType, dateCreated) VALUES(%s, %s, NOW());""", (lecture_name, micalaContentType))
connection.commit()
connection.close()
return cls.getIdLecture(lecture_name)
@classmethod
def updateLectureInfo(cls, idLecture, lectureTitle, lectureSpeakers):
'''Update basic info in micala DB for convenience in identifying records.'''
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
cursor = connection.cursor()
cleanedLectureTitle = cls.cleanSQLData(lectureTitle)
cleanedLectureSpeakers = cls.cleanSQLData(lectureSpeakers)
Logger.get('RecMan').info("""UPDATE lectures SET title = %s, creator = %s WHERE idLecture = %s""" % (cleanedLectureTitle, cleanedLectureSpeakers, idLecture))
cursor.execute("""UPDATE lectures SET title = %s, creator = %s WHERE idLecture = %s""", \
(cleanedLectureTitle, cleanedLectureSpeakers, idLecture))
connection.commit()
connection.close()
return ()
@classmethod
def getMatches(cls, confID):
'''For the current conference, get list from the database of IndicoID's already matched to Lecture Objects.'''
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
cursor = connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
cursor.execute('''SELECT IndicoID, LOID, contentType FROM lectures WHERE IndicoID LIKE "%s%%"''' % confID)
connection.commit()
rows = cursor.fetchall()
cursor.close()
connection.close()
# Build dictionary of matches
match_array = {}
for row in rows:
# We are only interested in reporting on lecture object matches here,
# not whether plain video talks are in the micala database.
# Also, it shouldn't ever happen, but ignore records in the micala DB with ContentType WEBLECTURE
# that don't have a LOID.
# LOID = NULL in the MySQL database translates in Python to being None.
if row["contentType"] == 'WEBLECTURE' and row["LOID"] is not None:
match_array[row["IndicoID"]] = row["LOID"]
return (match_array)
@classmethod
def reportStatus(cls, status, message, idMachine, idTask, idLecture):
'''Make status report to the database'''
# Logger.get('RecMan').debug('in reportStatus()')
if idLecture == '':
idLecture = None
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException(_("MySQL database error %d: %s") % (e.args[0], e.args[1]))
cursor = connection.cursor()
cursor.execute("""INSERT INTO status
(idMachine, idTask, idLecture, status, message, dateReported)
VALUES(%s, %s, %s, %s, %s, NOW());""", \
(idMachine, idTask, idLecture, status, message))
cursor.close()
connection.commit()
connection.close()
@classmethod
def associateIndicoIDToLOID(cls, IndicoID, LODBID):
"""Update the micala DB to associate the given talk with the given LOID"""
# Initialize success flag and result string
flagSuccess = True
result = ""
# Logger.get('RecMan').debug("in associateIndicoIDToLOID()")
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
flagSuccess = False
result += _("MySQL error %d: %s") % (e.args[0], e.args[1])
except Exception, e:
flagSuccess = False
result += _("Unknown error %d: %s") % (e.args[0], e.args[1])
cursor = connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
Logger.get('RecMan').info("UPDATE lectures SET IndicoID=%s, contentType=%s WHERE idLecture=%s" % (IndicoID, "WEBLECTURE", LODBID))
try:
cursor.execute("UPDATE lectures SET IndicoID=%s, contentType=%s WHERE idLecture=%s",
(IndicoID, "WEBLECTURE", LODBID))
connection.commit()
except MySQLdb.Error, e:
flagSuccess = False
result += _("MySQL error %d: %s") % (e.args[0], e.args[1])
except Exception, e:
flagSuccess = False
result += _("Unknown error %d: %s") % (e.args[0], e.args[1])
cursor.close()
connection.close()
return {"success": flagSuccess, "result": result}
@classmethod
def associateCDSRecordToLOID(cls, CDSID, LODBID):
"""Update the micala DB to associate the CDS record number with the given lecture.
Note: if you are using cdsdev, the CDSID stored in the micala database will be the cdsdev record, not the cds record.
The micala database doesn't know the difference between cds and cdsdev. So if you create a bunch of test records and
then want to go back and create them again in CDS, you'll have to tinker around with the micala database,
deleting some status records and probably re-publish those lectures from the beginning."""
# Initialize success flag and result string
flagSuccess = True
result = ""
# Logger.get('RecMan').debug("in associateIndicoIDToLOID()")
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
flagSuccess = False
result += _("MySQL error %d: %s") % (e.args[0], e.args[1])
except Exception, e:
flagSuccess = False
result += _("Unknown error %d: %s") % (e.args[0], e.args[1])
cursor = connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
try:
cursor.execute("UPDATE lectures SET CDSRecord=%s WHERE idLecture=%s",
(CDSID, LODBID))
connection.commit()
except MySQLdb.Error, e:
flagSuccess = False
result += _("MySQL error %d: %s") % (e.args[0], e.args[1])
except Exception, e:
flagSuccess = False
result += _("Unknown error %d: %s") % (e.args[0], e.args[1])
cursor.close()
connection.close()
return {"success": flagSuccess, "result": result}
@classmethod
def getCDSPending(cls, confId):
"""Query the Micala database to find Indico IDs whose MARC has been exported to CDS, but not marked as completed in the micala DB.
(Note: they may have just been completed, but we'll deal with newly completed tasks separately)
Return a list of these Indico IDs."""
try:
connection = MySQLdb.connect(host = CollaborationTools.getOptionValue("RecordingManager", "micalaDBServer"),
port = int(CollaborationTools.getOptionValue("RecordingManager", "micalaDBPort")),
user = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderUser"),
passwd = CollaborationTools.getOptionValue("RecordingManager", "micalaDBReaderPW"),
db = CollaborationTools.getOptionValue("RecordingManager", "micalaDBName"))
except MySQLdb.Error, e:
raise RecordingManagerException("MySQL database error %d: %s" % (e.args[0], e.args[1]))
cursorTaskStarted = connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
# The following query returns the IndicoID's for which the metadata export task was started.
# Whether it was finished we will find out separately by querying CDS to see what records have been created.
cursorTaskStarted.execute('''SELECT IndicoID, LOID, name, status FROM viewStatusComprehensive
WHERE status = 'START'
AND name = "%s"
AND IndicoID LIKE "%s%%"''' % \
(CollaborationTools.getOptionValue("RecordingManager", "micalaDBStatusExportCDS"),
confId))
connection.commit()
rowsStarted = cursorTaskStarted.fetchall()
cursorTaskStarted.close()
# Do another query to get list of IndicoIDs marked as completed
cursorTaskComplete = connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
# The following query returns the IndicoID's for which the metadata export task is COMPLETE.
cursorTaskComplete.execute('''SELECT IndicoID, LOID, name, status FROM viewStatusComprehensive
WHERE status = 'COMPLETE'
AND name = "%s"
AND IndicoID LIKE "%s%%"''' % \
(CollaborationTools.getOptionValue("RecordingManager", "micalaDBStatusExportCDS"),
confId))
connection.commit()
rowsComplete = cursorTaskComplete.fetchall()
cursorTaskComplete.close()
connection.close()
# Now from these queries, build two sets
setStarted = set()
setComplete = set()
for row in rowsStarted:
setStarted.add(row["IndicoID"])
for row in rowsComplete:
setComplete.add(row["IndicoID"])
# Return a list containing the IndicoID's for whom the task was marked as started but not finished.
return list(setStarted.difference(setComplete))
@classmethod
def updateMicalaCDSExport(cls, cds_indico_matches, cds_indico_pending):
'''If there are records found in CDS but not yet listed in the micala database as COMPLETE, then update it.
cds_indico_matches is a dictionary of key-value pairs { IndicoID1: CDSID1, IndicoID2: CDSID2, ... }
cds_indico_pending is a list of IndicoIDs (for whom the CDS export task has been started but not completed).'''
# Logger.get('RecMan').debug('in updateMicalaCDSExport()')
# debugging:
# for matched in cds_indico_matches.keys():
# Logger.get('RecMan').debug('Looping through cds_indico_matches: %s -> %s' % (matched, cds_indico_matches[matched]))
# for pending in cds_indico_pending:
# Logger.get('RecMan').debug('Looping through cds_indico_pending: %s' % pending)
for pending in cds_indico_pending:
# Logger.get('RecMan').debug('Looping through cds_indico_pending: %s (and looking up in cds_indico_matches)' % pending)
try:
newRecord = cds_indico_matches[pending]
idMachine = cls.getIdMachine(CollaborationTools.getOptionValue("RecordingManager", "micalaDBMachineName"))
idTask = cls.getIdTask(CollaborationTools.getOptionValue("RecordingManager", "micalaDBStatusExportCDS"))
idLecture = cls.getIdLecture(pending)
cls.reportStatus("COMPLETE", "CDS record: %s" % newRecord, idMachine, idTask, idLecture)
# add the CDS record number to the Lectures table
resultAssociateCDSRecord = cls.associateCDSRecordToLOID(newRecord, idLecture)
if not resultAssociateCDSRecord["success"]:
Logger.get('RecMan').error("Unable to update Lectures table in micala database: %s" % resultAssociateCDSRecord["result"])
# this is not currently used:
return resultAssociateCDSRecord["result"]
except KeyError:
# current pending lecture still not found in CDS so do nothing.
Logger.get('RecMan').debug('%s listed as pending and not found in cds_indico_matches, so it must still be pending.' % pending)
@classmethod
def cleanSQLData(cls, uncleanString):
"""Get rid of SQL-unfriendly chars."""
# Truncate the string in case it is longer than 500 chars
if len(uncleanString) > 500:
uncleanString = uncleanString[0:500]
# Replace all non-alphanumeric chars with spaces
cleanString = re.sub("[^0-9a-zA-Z]", " ", uncleanString)
return cleanString
| gpl-3.0 |
benschmaus/catapult | telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py | 1 | 9477 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from telemetry.core import exceptions
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.backends.chrome import misc_web_contents_backend
from telemetry.internal import forwarders
import py_utils
class CrOSBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
def __init__(self, cros_platform_backend, browser_options, cri, is_guest):
super(CrOSBrowserBackend, self).__init__(
cros_platform_backend, supports_tab_control=True,
supports_extensions=not is_guest,
browser_options=browser_options)
assert browser_options.IsCrosBrowserOptions()
# Initialize fields so that an explosion during init doesn't break in Close.
self._cri = cri
self._is_guest = is_guest
self._forwarder = None
self._remote_debugging_port = self._cri.GetRemotePort()
self._port = self._remote_debugging_port
extensions_to_load = browser_options.extensions_to_load
# Copy extensions to temp directories on the device.
# Note that we also perform this copy locally to ensure that
# the owner of the extensions is set to chronos.
for e in extensions_to_load:
extension_dir = cri.RunCmdOnDevice(
['mktemp', '-d', '/tmp/extension_XXXXX'])[0].rstrip()
e.local_path = os.path.join(extension_dir, os.path.basename(e.path))
cri.PushFile(e.path, extension_dir)
cri.Chown(extension_dir)
self._cri.RestartUI(self.browser_options.clear_enterprise_policy)
py_utils.WaitFor(self.IsBrowserRunning, 20)
# Delete test user's cryptohome vault (user data directory).
if not self.browser_options.dont_override_profile:
self._cri.RunCmdOnDevice(['cryptohome', '--action=remove', '--force',
'--user=%s' % self._username])
@property
def log_file_path(self):
return None
def GetBrowserStartupArgs(self):
args = super(CrOSBrowserBackend, self).GetBrowserStartupArgs()
logging_patterns = ['*/chromeos/net/*',
'*/chromeos/login/*',
'chrome_browser_main_posix']
vmodule = '--vmodule='
for pattern in logging_patterns:
vmodule += '%s=2,' % pattern
vmodule = vmodule.rstrip(',')
args.extend([
'--enable-smooth-scrolling',
'--enable-threaded-compositing',
# Allow devtools to connect to chrome.
'--remote-debugging-port=%i' % self._remote_debugging_port,
# Open a maximized window.
'--start-maximized',
# Disable system startup sound.
'--ash-disable-system-sounds',
# Ignore DMServer errors for policy fetches.
'--allow-failed-policy-fetch-for-test',
# Skip user image selection screen, and post login screens.
'--oobe-skip-postlogin',
# Disable chrome logging redirect. crbug.com/724273.
'--disable-logging-redirect',
# Debug logging.
vmodule])
# Disable GAIA services unless we're using GAIA login, or if there's an
# explicit request for it.
if (self.browser_options.disable_gaia_services and
not self.browser_options.gaia_login):
args.append('--disable-gaia-services')
trace_config_file = (self.platform_backend.tracing_controller_backend
.GetChromeTraceConfigFile())
if trace_config_file:
args.append('--trace-config-file=%s' % trace_config_file)
return args
@property
def pid(self):
return self._cri.GetChromePid()
@property
def browser_directory(self):
result = self._cri.GetChromeProcess()
if result and 'path' in result:
return os.path.dirname(result['path'])
return None
@property
def profile_directory(self):
return '/home/chronos/Default'
def __del__(self):
self.Close()
def Start(self):
# Escape all commas in the startup arguments we pass to Chrome
# because dbus-send delimits array elements by commas
startup_args = [a.replace(',', '\\,') for a in self.GetBrowserStartupArgs()]
# Restart Chrome with the login extension and remote debugging.
pid = self.pid
logging.info('Restarting Chrome (pid=%d) with remote port', pid)
args = ['dbus-send', '--system', '--type=method_call',
'--dest=org.chromium.SessionManager',
'/org/chromium/SessionManager',
'org.chromium.SessionManagerInterface.EnableChromeTesting',
'boolean:true',
'array:string:"%s"' % ','.join(startup_args)]
logging.info(' '.join(args))
self._cri.RunCmdOnDevice(args)
if not self._cri.local:
# TODO(crbug.com/404771): Move port forwarding to network_controller.
self._port = util.GetUnreservedAvailableLocalPort()
self._forwarder = self._platform_backend.forwarder_factory.Create(
forwarders.PortPair(self._port, self._remote_debugging_port),
use_remote_port_forwarding=False)
# Wait for new chrome and oobe.
py_utils.WaitFor(lambda: pid != self.pid, 15)
self._WaitForBrowserToComeUp()
self._InitDevtoolsClientBackend(
remote_devtools_port=self._remote_debugging_port)
py_utils.WaitFor(lambda: self.oobe_exists, 30)
if self.browser_options.auto_login:
if self._is_guest:
pid = self.pid
self.oobe.NavigateGuestLogin()
# Guest browsing shuts down the current browser and launches an
# incognito browser in a separate process, which we need to wait for.
try:
# TODO(achuith): Reduce this timeout to 15 sec after crbug.com/631640
# is resolved.
py_utils.WaitFor(lambda: pid != self.pid, 60)
except py_utils.TimeoutException:
self._RaiseOnLoginFailure(
'Failed to restart browser in guest mode (pid %d).' % pid)
elif self.browser_options.gaia_login:
self.oobe.NavigateGaiaLogin(self._username, self._password)
else:
self.oobe.NavigateFakeLogin(self._username, self._password,
self._gaia_id, not self.browser_options.disable_gaia_services)
try:
self._WaitForLogin()
except py_utils.TimeoutException:
self._RaiseOnLoginFailure('Timed out going through login screen. '
+ self._GetLoginStatus())
logging.info('Browser is up!')
def Background(self):
raise NotImplementedError
def Close(self):
super(CrOSBrowserBackend, self).Close()
if self._cri:
self._cri.RestartUI(False) # Logs out.
self._cri.CloseConnection()
py_utils.WaitFor(lambda: not self._IsCryptohomeMounted(), 180)
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
if self._cri:
for e in self._extensions_to_load:
self._cri.RmRF(os.path.dirname(e.local_path))
self._cri = None
def IsBrowserRunning(self):
return bool(self.pid)
def GetStandardOutput(self):
return 'Cannot get standard output on CrOS'
def GetStackTrace(self):
return (False, 'Cannot get stack trace on CrOS')
def GetMostRecentMinidumpPath(self):
return None
def GetAllMinidumpPaths(self):
return None
def GetAllUnsymbolizedMinidumpPaths(self):
return None
def SymbolizeMinidump(self, minidump_path):
return None
@property
@decorators.Cache
def misc_web_contents_backend(self):
"""Access to chrome://oobe/login page."""
return misc_web_contents_backend.MiscWebContentsBackend(self)
@property
def oobe(self):
return self.misc_web_contents_backend.GetOobe()
@property
def oobe_exists(self):
return self.misc_web_contents_backend.oobe_exists
@property
def _username(self):
return self.browser_options.username
@property
def _password(self):
return self.browser_options.password
@property
def _gaia_id(self):
return self.browser_options.gaia_id
def _IsCryptohomeMounted(self):
username = '$guest' if self._is_guest else self._username
return self._cri.IsCryptohomeMounted(username, self._is_guest)
def _GetLoginStatus(self):
"""Returns login status. If logged in, empty string is returned."""
status = ''
if not self._IsCryptohomeMounted():
status += 'Cryptohome not mounted. '
if not self.HasBrowserFinishedLaunching():
status += 'Browser didn\'t launch. '
if self.oobe_exists:
status += 'OOBE not dismissed.'
return status
def _IsLoggedIn(self):
"""Returns True if cryptohome has mounted, the browser is
responsive to devtools requests, and the oobe has been dismissed."""
return not self._GetLoginStatus()
def _WaitForLogin(self):
# Wait for cryptohome to mount.
py_utils.WaitFor(self._IsLoggedIn, 900)
# For incognito mode, the session manager actually relaunches chrome with
# new arguments, so we have to wait for the browser to come up.
self._WaitForBrowserToComeUp()
# Wait for extensions to load.
if self._supports_extensions:
self._WaitForExtensionsToLoad()
def _RaiseOnLoginFailure(self, error):
if self._platform_backend.CanTakeScreenshot():
self._cri.TakeScreenshotWithPrefix('login-screen')
raise exceptions.LoginException(error)
| bsd-3-clause |
raw1z/ultisnips | pythonx/UltiSnips/snippet/parsing/_lexer.py | 29 | 10109 | #!/usr/bin/env python
# encoding: utf-8
"""Not really a lexer in the classical sense, but code to convert snippet
definitions into logical units called Tokens."""
import string
import re
from UltiSnips.compatibility import as_unicode
from UltiSnips.position import Position
from UltiSnips.text import unescape
class _TextIterator(object):
"""Helper class to make iterating over text easier."""
def __init__(self, text, offset):
self._text = as_unicode(text)
self._line = offset.line
self._col = offset.col
self._idx = 0
def __iter__(self):
"""Iterator interface."""
return self
def __next__(self):
"""Returns the next character."""
if self._idx >= len(self._text):
raise StopIteration
rv = self._text[self._idx]
if self._text[self._idx] in ('\n', '\r\n'):
self._line += 1
self._col = 0
else:
self._col += 1
self._idx += 1
return rv
next = __next__ # for python2
def peek(self, count=1):
"""Returns the next 'count' characters without advancing the stream."""
if count > 1: # This might return '' if nothing is found
return self._text[self._idx:self._idx + count]
try:
return self._text[self._idx]
except IndexError:
return None
@property
def pos(self):
"""Current position in the text."""
return Position(self._line, self._col)
def _parse_number(stream):
"""Expects the stream to contain a number next, returns the number without
consuming any more bytes."""
rv = ''
while stream.peek() and stream.peek() in string.digits:
rv += next(stream)
return int(rv)
def _parse_till_closing_brace(stream):
"""
Returns all chars till a non-escaped } is found. Other
non escaped { are taken into account and skipped over.
Will also consume the closing }, but not return it
"""
rv = ''
in_braces = 1
while True:
if EscapeCharToken.starts_here(stream, '{}'):
rv += next(stream) + next(stream)
else:
char = next(stream)
if char == '{':
in_braces += 1
elif char == '}':
in_braces -= 1
if in_braces == 0:
break
rv += char
return rv
def _parse_till_unescaped_char(stream, chars):
"""
Returns all chars till a non-escaped char is found.
Will also consume the closing char, but and return it as second
return value
"""
rv = ''
while True:
escaped = False
for char in chars:
if EscapeCharToken.starts_here(stream, char):
rv += next(stream) + next(stream)
escaped = True
if not escaped:
char = next(stream)
if char in chars:
break
rv += char
return rv, char
class Token(object):
"""Represents a Token as parsed from a snippet definition."""
def __init__(self, gen, indent):
self.initial_text = as_unicode('')
self.start = gen.pos
self._parse(gen, indent)
self.end = gen.pos
def _parse(self, stream, indent):
"""Parses the token from 'stream' with the current 'indent'."""
pass # Does nothing
class TabStopToken(Token):
"""${1:blub}"""
CHECK = re.compile(r'^\${\d+[:}]')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
if stream.peek() == ':':
next(stream)
self.initial_text = _parse_till_closing_brace(stream)
def __repr__(self):
return 'TabStopToken(%r,%r,%r,%r)' % (
self.start, self.end, self.number, self.initial_text
)
class VisualToken(Token):
"""${VISUAL}"""
CHECK = re.compile(r"^\${VISUAL[:}/]")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
for _ in range(8): # ${VISUAL
next(stream)
if stream.peek() == ':':
next(stream)
self.alternative_text, char = _parse_till_unescaped_char(stream, '/}')
self.alternative_text = unescape(self.alternative_text)
if char == '/': # Transformation going on
try:
self.search = _parse_till_unescaped_char(stream, '/')[0]
self.replace = _parse_till_unescaped_char(stream, '/')[0]
self.options = _parse_till_closing_brace(stream)
except StopIteration:
raise RuntimeError(
"Invalid ${VISUAL} transformation! Forgot to escape a '/'?")
else:
self.search = None
self.replace = None
self.options = None
def __repr__(self):
return 'VisualToken(%r,%r)' % (
self.start, self.end
)
class TransformationToken(Token):
"""${1/match/replace/options}"""
CHECK = re.compile(r'^\${\d+\/')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
next(stream) # /
self.search = _parse_till_unescaped_char(stream, '/')[0]
self.replace = _parse_till_unescaped_char(stream, '/')[0]
self.options = _parse_till_closing_brace(stream)
def __repr__(self):
return 'TransformationToken(%r,%r,%r,%r,%r)' % (
self.start, self.end, self.number, self.search, self.replace
)
class MirrorToken(Token):
"""$1."""
CHECK = re.compile(r'^\$\d+')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
self.number = _parse_number(stream)
def __repr__(self):
return 'MirrorToken(%r,%r,%r)' % (
self.start, self.end, self.number
)
class EscapeCharToken(Token):
"""\\n."""
@classmethod
def starts_here(cls, stream, chars=r'{}\$`'):
"""Returns true if this token starts at the current position in
'stream'."""
cs = stream.peek(2)
if len(cs) == 2 and cs[0] == '\\' and cs[1] in chars:
return True
def _parse(self, stream, indent):
next(stream) # \
self.initial_text = next(stream)
def __repr__(self):
return 'EscapeCharToken(%r,%r,%r)' % (
self.start, self.end, self.initial_text
)
class ShellCodeToken(Token):
"""`echo "hi"`"""
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return stream.peek(1) == '`'
def _parse(self, stream, indent):
next(stream) # `
self.code = _parse_till_unescaped_char(stream, '`')[0]
def __repr__(self):
return 'ShellCodeToken(%r,%r,%r)' % (
self.start, self.end, self.code
)
class PythonCodeToken(Token):
"""`!p snip.rv = "Hi"`"""
CHECK = re.compile(r'^`!p\s')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(4)) is not None
def _parse(self, stream, indent):
for _ in range(3):
next(stream) # `!p
if stream.peek() in '\t ':
next(stream)
code = _parse_till_unescaped_char(stream, '`')[0]
# Strip the indent if any
if len(indent):
lines = code.splitlines()
self.code = lines[0] + '\n'
self.code += '\n'.join([l[len(indent):]
for l in lines[1:]])
else:
self.code = code
self.indent = indent
def __repr__(self):
return 'PythonCodeToken(%r,%r,%r)' % (
self.start, self.end, self.code
)
class VimLCodeToken(Token):
"""`!v g:hi`"""
CHECK = re.compile(r'^`!v\s')
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(4)) is not None
def _parse(self, stream, indent):
for _ in range(4):
next(stream) # `!v
self.code = _parse_till_unescaped_char(stream, '`')[0]
def __repr__(self):
return 'VimLCodeToken(%r,%r,%r)' % (
self.start, self.end, self.code
)
class EndOfTextToken(Token):
"""Appears at the end of the text."""
def __repr__(self):
return 'EndOfText(%r)' % self.end
def tokenize(text, indent, offset, allowed_tokens):
"""Returns an iterator of tokens of 'text'['offset':] which is assumed to
have 'indent' as the whitespace of the begging of the lines. Only
'allowed_tokens' are considered to be valid tokens."""
stream = _TextIterator(text, offset)
try:
while True:
done_something = False
for token in allowed_tokens:
if token.starts_here(stream):
yield token(stream, indent)
done_something = True
break
if not done_something:
next(stream)
except StopIteration:
yield EndOfTextToken(stream, indent)
| gpl-3.0 |
joebowen/LogMyRocket_API | LogMyRocket/libraries/sys_packages/_pytest/helpconfig.py | 180 | 5120 | """ version info, help messages, tracing configuration. """
import py
import pytest
import os, sys
def pytest_addoption(parser):
group = parser.getgroup('debugconfig')
group.addoption('--version', action="store_true",
help="display pytest lib version and import information.")
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
group._addoption('-p', action="append", dest="plugins", default = [],
metavar="name",
help="early-load given plugin (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`.")
group.addoption('--traceconfig', '--trace-config',
action="store_true", default=False,
help="trace considerations of conftest.py files."),
group.addoption('--debug',
action="store_true", dest="debug", default=False,
help="store internal tracing debug information in 'pytestdebug.log'.")
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, 'w')
debugfile.write("versions pytest-%s, py-%s, "
"python-%s\ncwd=%s\nargs=%s\n\n" %(
pytest.__version__, py.__version__,
".".join(map(str, sys.version_info)),
os.getcwd(), config._origargs))
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write("writing pytestdebug information to %s\n" % path)
def unset_tracing():
debugfile.close()
sys.stderr.write("wrote pytestdebug information to %s\n" %
debugfile.name)
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
def pytest_cmdline_main(config):
if config.option.version:
p = py.path.local(pytest.__file__)
sys.stderr.write("This is pytest version %s, imported from %s\n" %
(pytest.__version__, p))
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stderr.write(line + "\n")
return 0
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return 0
def showhelp(config):
reporter = config.pluginmanager.get_plugin('terminalreporter')
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line()
#tw.sep( "=", "config file settings")
tw.line("[pytest] ini-options in the next "
"pytest.ini|tox.ini|setup.cfg file:")
tw.line()
for name in config._parser._ininames:
help, type, default = config._parser._inidict[name]
if type is None:
type = "string"
spec = "%s (%s)" % (name, type)
line = " %-24s %s" %(spec, help)
tw.line(line[:tw.fullwidth])
tw.line()
tw.line("environment variables:")
vars = [
("PYTEST_ADDOPTS", "extra command line options"),
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
]
for name, help in vars:
tw.line(" %-24s %s" % (name, help))
tw.line()
tw.line()
tw.line("to see available markers type: py.test --markers")
tw.line("to see available fixtures type: py.test --fixtures")
tw.line("(shown according to specified file_or_dir or current dir "
"if not specified)")
for warningreport in reporter.stats.get('warnings', []):
tw.line("warning : " + warningreport.message, red=True)
return
conftest_options = [
('pytest_plugins', 'list of plugin names to load'),
]
def getpluginversioninfo(config):
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, '__file__', repr(plugin))
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
lines.append(" " + content)
return lines
def pytest_report_header(config):
lines = []
if config.option.debug or config.option.traceconfig:
lines.append("using: pytest-%s pylib-%s" %
(pytest.__version__,py.__version__))
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, '__file__'):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(" %-20s: %s" %(name, r))
return lines
| gpl-3.0 |
pandel/Marlin | buildroot/share/scripts/createTemperatureLookupMarlin.py | 6 | 6204 | #!/usr/bin/python
"""Thermistor Value Lookup Table Generator
Generates lookup to temperature values for use in a microcontroller in C format based on:
http://en.wikipedia.org/wiki/Steinhart-Hart_equation
The main use is for Arduino programs that read data from the circuit board described here:
http://reprap.org/wiki/Temperature_Sensor_v2.0
Usage: python createTemperatureLookupMarlin.py [options]
Options:
-h, --help show this help
--rp=... pull-up resistor
--t1=ttt:rrr low temperature temperature:resistance point (around 25 degC)
--t2=ttt:rrr middle temperature temperature:resistance point (around 150 degC)
--t3=ttt:rrr high temperature temperature:resistance point (around 250 degC)
--num-temps=... the number of temperature points to calculate (default: 36)
"""
from math import *
import sys
import getopt
"Constants"
ZERO = 273.15 # zero point of Kelvin scale
VADC = 5 # ADC voltage
VCC = 5 # supply voltage
ARES = pow(2,10) # 10 Bit ADC resolution
VSTEP = VADC / ARES # ADC voltage resolution
TMIN = 0 # lowest temperature in table
TMAX = 350 # highest temperature in table
class Thermistor:
"Class to do the thermistor maths"
def __init__(self, rp, t1, r1, t2, r2, t3, r3):
l1 = log(r1)
l2 = log(r2)
l3 = log(r3)
y1 = 1.0 / (t1 + ZERO) # adjust scale
y2 = 1.0 / (t2 + ZERO)
y3 = 1.0 / (t3 + ZERO)
x = (y2 - y1) / (l2 - l1)
y = (y3 - y1) / (l3 - l1)
c = (y - x) / ((l3 - l2) * (l1 + l2 + l3))
b = x - c * (l1**2 + l2**2 + l1*l2)
a = y1 - (b + l1**2 *c)*l1
if c < 0:
print "//////////////////////////////////////////////////////////////////////////////////////"
print "// WARNING: negative coefficient 'c'! Something may be wrong with the measurements! //"
print "//////////////////////////////////////////////////////////////////////////////////////"
c = -c
self.c1 = a # Steinhart-Hart coefficients
self.c2 = b
self.c3 = c
self.rp = rp # pull-up resistance
def resol(self, adc):
"Convert ADC reading into a resolution"
res = self.temp(adc)-self.temp(adc+1)
return res
def voltage(self, adc):
"Convert ADC reading into a Voltage"
return adc * VSTEP # convert the 10 bit ADC value to a voltage
def resist(self, adc):
"Convert ADC reading into a resistance in Ohms"
r = self.rp * self.voltage(adc) / (VCC - self.voltage(adc)) # resistance of thermistor
return r
def temp(self, adc):
"Convert ADC reading into a temperature in Celcius"
l = log(self.resist(adc))
Tinv = self.c1 + self.c2*l + self.c3* l**3 # inverse temperature
return (1/Tinv) - ZERO # temperature
def adc(self, temp):
"Convert temperature into a ADC reading"
x = (self.c1 - (1.0 / (temp+ZERO))) / (2*self.c3)
y = sqrt((self.c2 / (3*self.c3))**3 + x**2)
r = exp((y-x)**(1.0/3) - (y+x)**(1.0/3))
return (r / (self.rp + r)) * ARES
def main(argv):
"Default values"
t1 = 25 # low temperature in Kelvin (25 degC)
r1 = 100000 # resistance at low temperature (10 kOhm)
t2 = 150 # middle temperature in Kelvin (150 degC)
r2 = 1641.9 # resistance at middle temperature (1.6 KOhm)
t3 = 250 # high temperature in Kelvin (250 degC)
r3 = 226.15 # resistance at high temperature (226.15 Ohm)
rp = 4700; # pull-up resistor (4.7 kOhm)
num_temps = 36; # number of entries for look-up table
try:
opts, args = getopt.getopt(argv, "h", ["help", "rp=", "t1=", "t2=", "t3=", "num-temps="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--rp":
rp = int(arg)
elif opt == "--t1":
arg = arg.split(':')
t1 = float(arg[0])
r1 = float(arg[1])
elif opt == "--t2":
arg = arg.split(':')
t2 = float(arg[0])
r2 = float(arg[1])
elif opt == "--t3":
arg = arg.split(':')
t3 = float(arg[0])
r3 = float(arg[1])
elif opt == "--num-temps":
num_temps = int(arg)
t = Thermistor(rp, t1, r1, t2, r2, t3, r3)
increment = int((ARES-1)/(num_temps-1));
step = (TMIN-TMAX) / (num_temps-1)
low_bound = t.temp(ARES-1);
up_bound = t.temp(1);
min_temp = int(TMIN if TMIN > low_bound else low_bound)
max_temp = int(TMAX if TMAX < up_bound else up_bound)
temps = range(max_temp, TMIN+step, step);
print "// Thermistor lookup table for Marlin"
print "// ./createTemperatureLookupMarlin.py --rp=%s --t1=%s:%s --t2=%s:%s --t3=%s:%s --num-temps=%s" % (rp, t1, r1, t2, r2, t3, r3, num_temps)
print "// Steinhart-Hart Coefficients: a=%.15g, b=%.15g, c=%.15g " % (t.c1, t.c2, t.c3)
print "// Theoretical limits of thermistor: %.2f to %.2f degC" % (low_bound, up_bound)
print
print "const short temptable[][2] PROGMEM = {"
for temp in temps:
adc = t.adc(temp)
print " { (short) (%7.2f * OVERSAMPLENR ), %4s }%s // v=%.3f\tr=%.3f\tres=%.3f degC/count" % (adc , temp, \
',' if temp != temps[-1] else ' ', \
t.voltage(adc), \
t.resist( adc), \
t.resol( adc) \
)
print "};"
def usage():
print __doc__
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 |
aricoin/Aricoin | qa/rpc-tests/util.py | 1 | 5260 | # Copyright (c) 2014 The Aricoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-aricoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-aricoinrpc"))
from decimal import Decimal
import json
import shutil
import subprocess
import time
from aricoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
START_P2P_PORT=11000
START_RPC_PORT=11100
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
aricoind_processes = []
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
aricoind and aricoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run aricoinds:
for i in range(4):
datadir = os.path.join("cache", "node"+str(i))
os.makedirs(datadir)
with open(os.path.join(datadir, "aricoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(START_P2P_PORT+i)+"\n");
f.write("rpcport="+str(START_RPC_PORT+i)+"\n");
args = [ "aricoind", "-keypool=1", "-datadir="+datadir ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(START_P2P_PORT))
aricoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "aricoin-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
# Shut them down, and remove debug.logs:
stop_nodes(rpcs)
wait_aricoinds()
for i in range(4):
os.remove(debug_log("cache", i))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
def start_nodes(num_nodes, dir):
# Start aricoinds, and wait for RPC interface to be up and running:
devnull = open("/dev/null", "w+")
for i in range(num_nodes):
datadir = os.path.join(dir, "node"+str(i))
args = [ "aricoind", "-datadir="+datadir ]
aricoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "aricoin-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
# Create&return JSON-RPC connections
rpc_connections = []
for i in range(num_nodes):
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpc_connections.append(AuthServiceProxy(url))
return rpc_connections
def debug_log(dir, n_node):
return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log")
def stop_nodes(nodes):
for i in range(len(nodes)):
nodes[i].stop()
del nodes[:] # Emptying array closes connections as a side effect
def wait_aricoinds():
# Wait for all aricoinds to cleanly exit
for aricoind in aricoind_processes:
aricoind.wait()
del aricoind_processes[:]
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num)
from_connection.addnode(ip_port, "onetry")
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
| mit |
JonWel/CoolProp | Web/scripts/fluid_properties.Consistency.py | 3 | 1267 | from __future__ import print_function
import os.path
import CoolProp
import subprocess
import sys
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
root_dir = os.path.abspath(os.path.join(web_dir, '..'))
fluids_path = os.path.join(web_dir,'fluid_properties','fluids')
plots_path = os.path.join(web_dir,'fluid_properties','fluids','Consistencyplots')
template = """from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') #Force mpl to use a non-GUI backend
import matplotlib.pyplot as plt
from CoolProp.Plots.ConsistencyPlots import ConsistencyFigure
ff = ConsistencyFigure('{fluid:s}')
ff.savefig('{fluid:s}.png', dpi = 30)
ff.savefig('{fluid:s}.pdf')
plt.close()
del ff
"""
if not os.path.exists(plots_path):
os.makedirs(plots_path)
for fluid in CoolProp.__fluids__:
print('fluid:', fluid)
file_string = template.format(fluid = fluid)
file_path = os.path.join(plots_path, fluid + '.py')
print('Writing to', file_path)
with open(file_path, 'w') as fp:
fp.write(file_string)
print('calling:', 'python "' + fluid + '.py"', 'in',plots_path)
subprocess.check_call('python "' + fluid + '.py"', cwd = plots_path, stdout = sys.stdout, stderr = sys.stderr, shell = True) | mit |
Galaxy-Nexus-Project/android_omap_tuna | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
anisku11/sublimeku | Packages/pygments/all/pygments/lexers/igor.py | 35 | 16982 | # -*- coding: utf-8 -*-
"""
pygments.lexers.igor
~~~~~~~~~~~~~~~~~~~~
Lexers for Igor Pro.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Keyword, Name, String
__all__ = ['IgorLexer']
class IgorLexer(RegexLexer):
"""
Pygments Lexer for Igor Pro procedure files (.ipf).
See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
.. versionadded:: 2.0
"""
name = 'Igor'
aliases = ['igor', 'igorpro']
filenames = ['*.ipf']
mimetypes = ['text/ipf']
flags = re.IGNORECASE | re.MULTILINE
flowControl = (
'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry',
'break', 'continue', 'return', 'AbortOnRTE', 'AbortOnValue'
)
types = (
'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
'STRUCT', 'dfref', 'funcref', 'char', 'uchar', 'int16', 'uint16', 'int32',
'uint32', 'float', 'double'
)
keywords = (
'override', 'ThreadSafe', 'MultiThread', 'static', 'Proc',
'Picture', 'Prompt', 'DoPrompt', 'macro', 'window', 'function', 'end',
'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu'
)
operations = (
'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio',
'AddMovieFrame', 'APMath', 'Append', 'AppendImage',
'AppendLayoutObject', 'AppendMatrixContour', 'AppendText',
'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour',
'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall',
'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox',
'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc',
'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar',
'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile',
'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross',
'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor',
'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions',
'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide',
'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints',
'Differentiate', 'dir', 'Display', 'DisplayHelpTopic',
'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow',
'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine',
'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText',
'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder',
'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText',
'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp',
'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR',
'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly',
'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf',
'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload',
'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo',
'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow',
'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox',
'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools',
'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles',
'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection',
'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask',
'ImageHistModification', 'ImageHistogram', 'ImageInterpolate',
'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration',
'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave',
'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold',
'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort',
'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath',
'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder',
'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings',
'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout',
'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData',
'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess',
'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime',
'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter',
'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve',
'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD',
'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve',
'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText',
'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList',
'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout',
'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder',
'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain',
'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage',
'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath',
'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open',
'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo',
'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction',
'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs',
'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable',
'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit',
'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour',
'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage',
'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder',
'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages',
'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample',
'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook',
'SavePackagePreferences', 'SavePICT', 'SaveTableCopy',
'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern',
'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer',
'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode',
'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed',
'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus',
'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth',
'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet',
'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart',
'SphericalInterpolate', 'SphericalTriangulate', 'SplitString',
'sprintf', 'sscanf', 'Stack', 'StackWindows',
'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest',
'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
'StatsCircularCorrelationTest', 'StatsCircularMeans',
'StatsCircularMoments', 'StatsCircularTwoSampleTest',
'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest',
'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest',
'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
'StatsLinearRegression', 'StatsMultiCorrelationTest',
'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles',
'StatsRankCorrelationTest', 'StatsResample', 'StatsSample',
'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest',
'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest',
'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest',
'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String',
'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile',
'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid',
'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv',
'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform',
'WindowFunction',
)
functions = (
'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog',
'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi',
'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch',
'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs',
'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev',
'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos',
'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi',
'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual',
'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian',
'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave',
'DDERequestWave', 'DDEStatus', 'DDETerminate', 'defined', 'deltax', 'digamma',
'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf',
'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata',
'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor',
'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize',
'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise',
'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1',
'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion',
'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D',
'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre',
'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln',
'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint',
'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max',
'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey',
'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect',
'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x',
'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar',
'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec',
'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ',
'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD',
'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF',
'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF',
'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF',
'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF',
'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF',
'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF',
'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF',
'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF',
'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF',
'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF',
'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF',
'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF',
'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF',
'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF',
'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF',
'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF',
'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF',
'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF',
'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF',
'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute',
'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF',
'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF',
'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF',
'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise',
'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF',
'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch',
'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists',
'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease',
'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks',
'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists',
'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem',
'WinType', 'WNoise', 'x2pnt', 'xcsr', 'zcsr', 'ZernikeR',
)
functions += (
'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo',
'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName',
'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo',
'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date',
'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo',
'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont',
'GetDimLabel', 'GetErrMessage', 'GetFormula',
'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR',
'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash',
'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile',
'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList',
'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str',
'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo',
'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey',
'RemoveEnding', 'RemoveFromList', 'RemoveListItem',
'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey',
'Secs2Date', 'Secs2Time', 'SelectString', 'SortList',
'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath',
'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault',
'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel',
'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr',
'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits',
'WinList', 'WinName', 'WinRecreation', 'XWaveName',
'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef',
'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef',
'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR',
'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR',
)
tokens = {
'root': [
(r'//.*$', Comment.Single),
(r'"([^"\\]|\\.)*"', String),
# Flow Control.
(words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword),
# Types.
(words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
# Keywords.
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# Built-in operations.
(words(operations, prefix=r'\b', suffix=r'\b'), Name.Class),
# Built-in functions.
(words(functions, prefix=r'\b', suffix=r'\b'), Name.Function),
# Compiler directives.
(r'^#(include|pragma|define|ifdef|ifndef|endif)',
Name.Decorator),
(r'[^a-z"/]+$', Text),
(r'.', Text),
],
}
| mit |
doug-fish/horizon | openstack_dashboard/api/rest/config.py | 45 | 1598 | # Copyright 2015 IBM Corp.
# Copyright 2015, Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from django.views import generic
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
# settings that we allow to be retrieved via REST API
# these settings are available to the client and are not secured.
# *** THEY SHOULD BE TREATED WITH EXTREME CAUTION ***
settings_required = getattr(settings, 'REST_API_REQUIRED_SETTINGS', [])
settings_additional = getattr(settings, 'REST_API_ADDITIONAL_SETTINGS', [])
settings_allowed = settings_required + settings_additional
@urls.register
class Settings(generic.View):
"""API for retrieving settings.
This API returns read-only settings values.
This configuration object can be fetched as needed.
Examples of settings: OPENSTACK_HYPERVISOR_FEATURES
"""
url_regex = r'settings/$'
@rest_utils.ajax()
def get(self, request):
return {k: getattr(settings, k, None) for k in settings_allowed}
| apache-2.0 |
2015fallproject/2015fallcase1 | static/Brython3.2.0-20150701-214155/Lib/test/unittests/test_opcodes.py | 175 | 2675 | # Python test set -- part 2, opcodes
from test.support import run_unittest
import unittest
class OpcodeTest(unittest.TestCase):
def test_try_inside_for_loop(self):
n = 0
for i in range(10):
n = n+i
try: 1/0
except NameError: pass
except ZeroDivisionError: pass
except TypeError: pass
try: pass
except: pass
try: pass
finally: pass
n = n+i
if n != 90:
self.fail('try inside for')
def test_raise_class_exceptions(self):
class AClass(Exception): pass
class BClass(AClass): pass
class CClass(Exception): pass
class DClass(AClass):
def __init__(self, ignore):
pass
try: raise AClass()
except: pass
try: raise AClass()
except AClass: pass
try: raise BClass()
except AClass: pass
try: raise BClass()
except CClass: self.fail()
except: pass
a = AClass()
b = BClass()
try:
raise b
except AClass as v:
self.assertEqual(v, b)
else:
self.fail("no exception")
# not enough arguments
##try: raise BClass, a
##except TypeError: pass
##else: self.fail("no exception")
try: raise DClass(a)
except DClass as v:
self.assertIsInstance(v, DClass)
else:
self.fail("no exception")
def test_compare_function_objects(self):
f = eval('lambda: None')
g = eval('lambda: None')
self.assertNotEqual(f, g)
f = eval('lambda a: a')
g = eval('lambda a: a')
self.assertNotEqual(f, g)
f = eval('lambda a=1: a')
g = eval('lambda a=1: a')
self.assertNotEqual(f, g)
f = eval('lambda: 0')
g = eval('lambda: 1')
self.assertNotEqual(f, g)
f = eval('lambda: None')
g = eval('lambda a: None')
self.assertNotEqual(f, g)
f = eval('lambda a: None')
g = eval('lambda b: None')
self.assertNotEqual(f, g)
f = eval('lambda a: None')
g = eval('lambda a=None: None')
self.assertNotEqual(f, g)
f = eval('lambda a=0: None')
g = eval('lambda a=1: None')
self.assertNotEqual(f, g)
def test_modulo_of_string_subclasses(self):
class MyString(str):
def __mod__(self, value):
return 42
self.assertEqual(MyString() % 3, 42)
def test_main():
run_unittest(OpcodeTest)
if __name__ == '__main__':
test_main()
| agpl-3.0 |
Guidobelix/pyload | module/lib/beaker/crypto/pbkdf2.py | 43 | 11924 | #!/usr/bin/python
# -*- coding: ascii -*-
###########################################################################
# PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation
#
# Copyright (C) 2007 Dwayne C. Litzenberger <dlitz@dlitz.net>
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Country of origin: Canada
#
###########################################################################
# Sample PBKDF2 usage:
# from Crypto.Cipher import AES
# from PBKDF2 import PBKDF2
# import os
#
# salt = os.urandom(8) # 64-bit salt
# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
# iv = os.urandom(16) # 128-bit IV
# cipher = AES.new(key, AES.MODE_CBC, iv)
# ...
#
# Sample crypt() usage:
# from PBKDF2 import crypt
# pwhash = crypt("secret")
# alleged_pw = raw_input("Enter password: ")
# if pwhash == crypt(alleged_pw, pwhash):
# print "Password good"
# else:
# print "Invalid password"
#
###########################################################################
# History:
#
# 2007-07-27 Dwayne C. Litzenberger <dlitz@dlitz.net>
# - Initial Release (v1.0)
#
# 2007-07-31 Dwayne C. Litzenberger <dlitz@dlitz.net>
# - Bugfix release (v1.1)
# - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor
# function in the previous release) silently truncates all keys to 64
# bytes. The way it was used in the previous release, this would only be
# problem if the pseudorandom function that returned values larger than
# 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like
# anything that silently reduces the security margin from what is
# expected.
#
###########################################################################
__version__ = "1.1"
from struct import pack
from binascii import b2a_hex
from random import randint
from base64 import b64encode
from beaker.crypto.util import hmac as HMAC, hmac_sha1 as SHA1
def strxor(a, b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
class PBKDF2(object):
"""PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
This implementation takes a passphrase and a salt (and optionally an
iteration count, a digest module, and a MAC module) and provides a
file-like object from which an arbitrarily-sized key can be read.
If the passphrase and/or salt are unicode objects, they are encoded as
UTF-8 before they are processed.
The idea behind PBKDF2 is to derive a cryptographic key from a
passphrase and a salt.
PBKDF2 may also be used as a strong salted password hash. The
'crypt' function is provided for that purpose.
Remember: Keys generated using PBKDF2 are only as strong as the
passphrases they are derived from.
"""
def __init__(self, passphrase, salt, iterations=1000,
digestmodule=SHA1, macmodule=HMAC):
if not callable(macmodule):
macmodule = macmodule.new
self.__macmodule = macmodule
self.__digestmodule = digestmodule
self._setup(passphrase, salt, iterations, self._pseudorandom)
def _pseudorandom(self, key, msg):
"""Pseudorandom function. e.g. HMAC-SHA1"""
return self.__macmodule(key=key, msg=msg,
digestmod=self.__digestmodule).digest()
def read(self, bytes):
"""Read the specified number of key bytes."""
if self.closed:
raise ValueError("file-like object is closed")
size = len(self.__buf)
blocks = [self.__buf]
i = self.__blockNum
while size < bytes:
i += 1
if i > 0xffffffff:
# We could return "" here, but
raise OverflowError("derived key too long")
block = self.__f(i)
blocks.append(block)
size += len(block)
buf = "".join(blocks)
retval = buf[:bytes]
self.__buf = buf[bytes:]
self.__blockNum = i
return retval
def __f(self, i):
# i must fit within 32 bits
assert (1 <= i <= 0xffffffff)
U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
result = U
for j in xrange(2, 1+self.__iterations):
U = self.__prf(self.__passphrase, U)
result = strxor(result, U)
return result
def hexread(self, octets):
"""Read the specified number of octets. Return them as hexadecimal.
Note that len(obj.hexread(n)) == 2*n.
"""
return b2a_hex(self.read(octets))
def _setup(self, passphrase, salt, iterations, prf):
# Sanity checks:
# passphrase and salt must be str or unicode (in the latter
# case, we convert to UTF-8)
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("UTF-8")
if not isinstance(passphrase, str):
raise TypeError("passphrase must be str or unicode")
if isinstance(salt, unicode):
salt = salt.encode("UTF-8")
if not isinstance(salt, str):
raise TypeError("salt must be str or unicode")
# iterations must be an integer >= 1
if not isinstance(iterations, (int, long)):
raise TypeError("iterations must be an integer")
if iterations < 1:
raise ValueError("iterations must be at least 1")
# prf must be callable
if not callable(prf):
raise TypeError("prf must be callable")
self.__passphrase = passphrase
self.__salt = salt
self.__iterations = iterations
self.__prf = prf
self.__blockNum = 0
self.__buf = ""
self.closed = False
def close(self):
"""Close the stream."""
if not self.closed:
del self.__passphrase
del self.__salt
del self.__iterations
del self.__prf
del self.__blockNum
del self.__buf
self.closed = True
def crypt(word, salt=None, iterations=None):
"""PBKDF2-based unix crypt(3) replacement.
The number of iterations specified in the salt overrides the 'iterations'
parameter.
The effective hash length is 192 bits.
"""
# Generate a (pseudo-)random salt if the user hasn't provided one.
if salt is None:
salt = _makesalt()
# salt must be a string or the us-ascii subset of unicode
if isinstance(salt, unicode):
salt = salt.encode("us-ascii")
if not isinstance(salt, str):
raise TypeError("salt must be a string")
# word must be a string or unicode (in the latter case, we convert to UTF-8)
if isinstance(word, unicode):
word = word.encode("UTF-8")
if not isinstance(word, str):
raise TypeError("word must be a string or unicode")
# Try to extract the real salt and iteration count from the salt
if salt.startswith("$p5k2$"):
(iterations, salt, dummy) = salt.split("$")[2:5]
if iterations == "":
iterations = 400
else:
converted = int(iterations, 16)
if iterations != "%x" % converted: # lowercase hex, minimum digits
raise ValueError("Invalid salt")
iterations = converted
if not (iterations >= 1):
raise ValueError("Invalid salt")
# Make sure the salt matches the allowed character set
allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
for ch in salt:
if ch not in allowed:
raise ValueError("Illegal character %r in salt" % (ch,))
if iterations is None or iterations == 400:
iterations = 400
salt = "$p5k2$$" + salt
else:
salt = "$p5k2$%x$%s" % (iterations, salt)
rawhash = PBKDF2(word, salt, iterations).read(24)
return salt + "$" + b64encode(rawhash, "./")
# Add crypt as a static method of the PBKDF2 class
# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
# crypt.
PBKDF2.crypt = staticmethod(crypt)
def _makesalt():
"""Return a 48-bit pseudorandom salt for crypt().
This function is not suitable for generating cryptographic secrets.
"""
binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
return b64encode(binarysalt, "./")
def test_pbkdf2():
"""Module self-test"""
from binascii import a2b_hex
#
# Test vectors from RFC 3962
#
# Test 1
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16)
expected = a2b_hex("cdedb5281bb2f801565a1122b2563515")
if result != expected:
raise RuntimeError("self-test failed")
# Test 2
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32)
expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b"
"a7e52ddbc5e5142f708a31e2e62b1e13")
if result != expected:
raise RuntimeError("self-test failed")
# Test 3
result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32)
expected = ("139c30c0966bc32ba55fdbf212530ac9"
"c5ec59f1a452f5cc9ad940fea0598ed1")
if result != expected:
raise RuntimeError("self-test failed")
# Test 4
result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32)
expected = ("9ccad6d468770cd51b10e6a68721be61"
"1a8b4d282601db3b36be9246915ec82a")
if result != expected:
raise RuntimeError("self-test failed")
#
# Other test vectors
#
# Chunked read
f = PBKDF2("kickstart", "workbench", 256)
result = f.read(17)
result += f.read(17)
result += f.read(1)
result += f.read(2)
result += f.read(3)
expected = PBKDF2("kickstart", "workbench", 256).read(40)
if result != expected:
raise RuntimeError("self-test failed")
#
# crypt() test vectors
#
# crypt 1
result = crypt("cloadm", "exec")
expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql'
if result != expected:
raise RuntimeError("self-test failed")
# crypt 2
result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....')
expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g'
if result != expected:
raise RuntimeError("self-test failed")
# crypt 3
result = crypt("dcl", "tUsch7fU", iterations=13)
expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL"
if result != expected:
raise RuntimeError("self-test failed")
# crypt 4 (unicode)
result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2',
'$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ')
expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ'
if result != expected:
raise RuntimeError("self-test failed")
if __name__ == '__main__':
test_pbkdf2()
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
daenamkim/ansible | lib/ansible/modules/net_tools/snmp_facts.py | 24 | 12848 | #!/usr/bin/python
# This file is part of Networklore's snmp library for Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: snmp_facts
version_added: "1.9"
author: "Patrick Ogenstad (@ogenstad)"
short_description: Retrieve facts for a device using SNMP.
description:
- Retrieve facts for a device using SNMP, the facts will be
inserted to the ansible_facts key.
requirements:
- pysnmp
options:
host:
description:
- Set to target snmp server (normally {{inventory_hostname}})
required: true
version:
description:
- SNMP Version to use, v2/v2c or v3
choices: [ 'v2', 'v2c', 'v3' ]
required: true
community:
description:
- The SNMP community string, required if version is v2/v2c
required: false
level:
description:
- Authentication level, required if version is v3
choices: [ 'authPriv', 'authNoPriv' ]
required: false
username:
description:
- Username for SNMPv3, required if version is v3
required: false
integrity:
description:
- Hashing algorithm, required if version is v3
choices: [ 'md5', 'sha' ]
required: false
authkey:
description:
- Authentication key, required if version is v3
required: false
privacy:
description:
- Encryption algorithm, required if level is authPriv
choices: [ 'des', 'aes' ]
required: false
privkey:
description:
- Encryption key, required if version is authPriv
required: false
'''
EXAMPLES = '''
# Gather facts with SNMP version 2
- snmp_facts:
host: '{{ inventory_hostname }}'
version: v2c
community: public
delegate_to: local
# Gather facts using SNMP version 3
- snmp_facts:
host: '{{ inventory_hostname }}'
version: v3
level: authPriv
integrity: sha
privacy: aes
username: snmp-user
authkey: abc12345
privkey: def6789
delegate_to: localhost
'''
import binascii
from collections import defaultdict
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
has_pysnmp = True
except:
has_pysnmp = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
class DefineOid(object):
def __init__(self,dotprefix=False):
if dotprefix:
dp = "."
else:
dp = ""
# From SNMPv2-MIB
self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
# From IF-MIB
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
# From IP-MIB
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
def decode_hex(hexstring):
if len(hexstring) < 3:
return hexstring
if hexstring[:2] == "0x":
return to_text(binascii.unhexlify(hexstring[2:]))
else:
return hexstring
def decode_mac(hexstring):
if len(hexstring) != 14:
return hexstring
if hexstring[:2] == "0x":
return hexstring[2:]
else:
return hexstring
def lookup_adminstatus(int_adminstatus):
adminstatus_options = {
1: 'up',
2: 'down',
3: 'testing'
}
if int_adminstatus in adminstatus_options:
return adminstatus_options[int_adminstatus]
else:
return ""
def lookup_operstatus(int_operstatus):
operstatus_options = {
1: 'up',
2: 'down',
3: 'testing',
4: 'unknown',
5: 'dormant',
6: 'notPresent',
7: 'lowerLayerDown'
}
if int_operstatus in operstatus_options:
return operstatus_options[int_operstatus]
else:
return ""
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
version=dict(required=True, choices=['v2', 'v2c', 'v3']),
community=dict(required=False, default=False),
username=dict(required=False),
level=dict(required=False, choices=['authNoPriv', 'authPriv']),
integrity=dict(required=False, choices=['md5', 'sha']),
privacy=dict(required=False, choices=['des', 'aes']),
authkey=dict(required=False),
privkey=dict(required=False),
removeplaceholder=dict(required=False)),
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
supports_check_mode=False)
m_args = module.params
if not has_pysnmp:
module.fail_json(msg='Missing required pysnmp module (check docs)')
cmdGen = cmdgen.CommandGenerator()
# Verify that we receive a community when using snmp v2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
if m_args['community'] is False:
module.fail_json(msg='Community not set when using snmp version 2')
if m_args['version'] == "v3":
if m_args['username'] is None:
module.fail_json(msg='Username not set when using snmp version 3')
if m_args['level'] == "authPriv" and m_args['privacy'] is None:
module.fail_json(msg='Privacy algorithm not set when using authPriv')
if m_args['integrity'] == "sha":
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
elif m_args['integrity'] == "md5":
integrity_proto = cmdgen.usmHMACMD5AuthProtocol
if m_args['privacy'] == "aes":
privacy_proto = cmdgen.usmAesCfb128Protocol
elif m_args['privacy'] == "des":
privacy_proto = cmdgen.usmDESPrivProtocol
# Use SNMP Version 2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
snmp_auth = cmdgen.CommunityData(m_args['community'])
# Use SNMP Version 3 with authNoPriv
elif m_args['level'] == "authNoPriv":
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
# Use SNMP Version 3 with authPriv
else:
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
privProtocol=privacy_proto)
# Use p to prefix OIDs with a dot for polling
p = DefineOid(dotprefix=True)
# Use v without a prefix to use with return values
v = DefineOid(dotprefix=False)
Tree = lambda: defaultdict(Tree)
results = Tree()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.sysDescr,),
cmdgen.MibVariable(p.sysObjectId,),
cmdgen.MibVariable(p.sysUpTime,),
cmdgen.MibVariable(p.sysContact,),
cmdgen.MibVariable(p.sysName,),
cmdgen.MibVariable(p.sysLocation,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if current_oid == v.sysDescr:
results['ansible_sysdescr'] = decode_hex(current_val)
elif current_oid == v.sysObjectId:
results['ansible_sysobjectid'] = current_val
elif current_oid == v.sysUpTime:
results['ansible_sysuptime'] = current_val
elif current_oid == v.sysContact:
results['ansible_syscontact'] = current_val
elif current_oid == v.sysName:
results['ansible_sysname'] = current_val
elif current_oid == v.sysLocation:
results['ansible_syslocation'] = current_val
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.ifIndex,),
cmdgen.MibVariable(p.ifDescr,),
cmdgen.MibVariable(p.ifMtu,),
cmdgen.MibVariable(p.ifSpeed,),
cmdgen.MibVariable(p.ifPhysAddress,),
cmdgen.MibVariable(p.ifAdminStatus,),
cmdgen.MibVariable(p.ifOperStatus,),
cmdgen.MibVariable(p.ipAdEntAddr,),
cmdgen.MibVariable(p.ipAdEntIfIndex,),
cmdgen.MibVariable(p.ipAdEntNetMask,),
cmdgen.MibVariable(p.ifAlias,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
interface_indexes = []
all_ipv4_addresses = []
ipv4_networks = Tree()
for varBinds in varTable:
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if v.ifIndex in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
interface_indexes.append(ifIndex)
if v.ifDescr in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['name'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mtu'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['speed'] = current_val
if v.ifPhysAddress in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
if v.ifAdminStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
if v.ifOperStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
if v.ipAdEntAddr in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['address'] = current_val
all_ipv4_addresses.append(current_val)
if v.ipAdEntIfIndex in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['interface'] = current_val
if v.ipAdEntNetMask in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['netmask'] = current_val
if v.ifAlias in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['description'] = current_val
interface_to_ipv4 = {}
for ipv4_network in ipv4_networks:
current_interface = ipv4_networks[ipv4_network]['interface']
current_network = {
'address': ipv4_networks[ipv4_network]['address'],
'netmask': ipv4_networks[ipv4_network]['netmask']
}
if not current_interface in interface_to_ipv4:
interface_to_ipv4[current_interface] = []
interface_to_ipv4[current_interface].append(current_network)
else:
interface_to_ipv4[current_interface].append(current_network)
for interface in interface_to_ipv4:
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
module.exit_json(ansible_facts=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
YinongLong/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 21 | 10259 | """Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_predict():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
| bsd-3-clause |
Jorge-Rodriguez/ansible-modules-core | cloud/docker/docker_login.py | 39 | 9349 | #!/usr/bin/python
#
# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
# Chris Houseknecht, <house@redhat.com>
# James Tanner, <jtanner@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: docker_login
short_description: Log into a Docker registry.
version_added: "2.0"
description:
- Provides functionality similar to the "docker login" command.
- Authenticate with a docker registry and add the credentials to your local Docker config file. Adding the
credentials to the config files allows future connections to the registry using tools such as Ansible's Docker
modules, the Docker CLI and docker-py without needing to provide credentials.
- Running in check mode will perform the authentication without updating the config file.
options:
registry_url:
description:
- The registry URL.
default: "https://index.docker.io/v1/"
aliases:
- registry
- url
username:
description:
- The username for the registry account
required: true
default: null
password:
description:
- The plaintext password for the registry account
required: true
default: null
email:
description:
- "The email address for the registry account. NOTE: private registries may not require this,
but Docker Hub requires it."
default: None
reauthorize:
description:
- Refresh exiting authentication found in the configuration file.
default: false
aliases:
- reauth
config_path:
description:
- Custom path to the Docker CLI configuration file.
default: ~/.docker/config.json
aliases:
- self.config_path
- dockercfg_path
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
authors:
- "Olaf Kilian <olaf.kilian@symanex.com>"
- "Chris Houseknecht (@chouseknecht)"
- "James Tanner (@jctanner)"
'''
EXAMPLES = '''
- name: Log into DockerHub
docker_login:
username: docker
password: rekcod
email: docker@docker.io
- name: Log into private registry and force re-authorization
docker_login:
registry: your.private.registry.io
username: yourself
password: secrets3
reauthorize: yes
- name: Log into DockerHub using a custom config file
docker_login:
username: docker
password: rekcod
email: docker@docker.io
config_path: /tmp/.mydockercfg
'''
RETURN = '''
actions:
description: List of actions taken by the module.
returned: always
type: list
sample: [
"Log into https://index.docker.io/v1/",
"Updated config file /Users/chouseknecht/.docker/config.json with new authorization for https://index.docker.io/v1/"
]
login_results:
description: Results from the login.
returned: always
type: dict
sample: {
"email": "testuer@yahoo.com",
"password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"serveraddress": "localhost:5000",
"username": "testuser"
}
'''
import base64
from ansible.module_utils.docker_common import *
class LoginManager(DockerBaseClass):
def __init__(self, client, results):
super(LoginManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.registry_url = parameters.get('registry_url')
self.username = parameters.get('username')
self.password = parameters.get('password')
self.email = parameters.get('email')
self.reauthorize = parameters.get('reauthorize')
self.config_path = parameters.get('config_path')
self.login()
def fail(self, msg):
self.client.fail(msg)
def login(self):
'''
Log into the registry with provided username/password. On success update the config
file with the new authorization.
:return: None
'''
if self.email and not re.match(EMAIL_REGEX, self.email):
self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
"/%s/" % (EMAIL_REGEX))
self.results['actions'].append("Logged into %s" % (self.registry_url))
self.log("Log into %s with username %s" % (self.registry_url, self.username))
try:
response = self.client.login(
self.username,
password=self.password,
email=self.email,
registry=self.registry_url,
reauth=self.reauthorize,
dockercfg_path=self.config_path
)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
self.results['login_result'] = response
if not self.check_mode:
self.update_config_file()
def config_file_exists(self, path):
if os.path.exists(path):
self.log("Configuration file %s exists" % (path))
return True
self.log("Configuration file %s not found." % (path))
return False
def create_config_file(self, path):
'''
Create a config file with a JSON blob containing an auths key.
:return: None
'''
self.log("Creating docker config file %s" % (path))
config_path_dir = os.path.dirname(path)
if not os.path.exists(config_path_dir):
try:
os.makedirs(config_path_dir)
except Exception as exc:
self.fail("Error: failed to create %s - %s" % (config_path_dir, str(exc)))
self.write_config(path, dict(auths=dict()))
def write_config(self, path, config):
try:
json.dump(config, open(path, "w"), indent=5, sort_keys=True)
except Exception as exc:
self.fail("Error: failed to write config to %s - %s" % (path, str(exc)))
def update_config_file(self):
'''
If the authorization not stored in the config file or reauthorize is True,
update the config file with the new authorization.
:return: None
'''
path = os.path.expanduser(self.config_path)
if not self.config_file_exists(path):
self.create_config_file(path)
try:
# read the existing config
config = json.load(open(path, "r"))
except ValueError:
self.log("Error reading config from %s" % (path))
config = dict()
if not config.get('auths'):
self.log("Adding auths dict to config.")
config['auths'] = dict()
if not config['auths'].get(self.registry_url):
self.log("Adding registry_url %s to auths." % (self.registry_url))
config['auths'][self.registry_url] = dict()
encoded_credentials = dict(
auth=base64.b64encode(self.username + b':' + self.password),
email=self.email
)
if config['auths'][self.registry_url] != encoded_credentials or self.reauthorize:
# Update the config file with the new authorization
config['auths'][self.registry_url] = encoded_credentials
self.log("Updating config file %s with new authorization for %s" % (path, self.registry_url))
self.results['actions'].append("Updated config file %s with new authorization for %s" % (
path, self.registry_url))
self.results['changed'] = True
self.write_config(path, config)
def main():
argument_spec=dict(
registry_url=dict(type='str', required=False, default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
email=dict(type='str'),
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
config_path=dict(type='str', default='~/.docker/config.json', aliases=['self.config_path',
'dockercfg_path']),
)
required_if = [
('registry_url', DEFAULT_DOCKER_REGISTRY, ['email'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if
)
results = dict(
changed=False,
actions=[],
login_result={}
)
LoginManager(client, results)
client.module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
N-Bz/bybop | src/Bybop_Discovery.py | 1 | 3630 | # This sample uses https://pypi.python.org/pypi/zeroconf
# as its MDNS implementation
from zeroconf import ServiceBrowser, Zeroconf
import socket
import threading
class DeviceID(object):
# Drones
BEBOP_DRONE = '0901'
JUMPING_SUMO = '0902'
JUMPING_NIGHT = '0905'
JUMPING_RACE = '0906'
BEBOP_2 = '090c'
MAMBO = '090b'
DISCO = '090e'
ANAFI = '0914'
# Remote controllers
SKYCONTROLLER = '0903'
SKYCONTROLLER_2 = '090f'
SKYCONTROLLER_2P = '0915'
SKYCONTROLLER_3 = '0918'
BEBOP_FAMILY = [
BEBOP_DRONE,
BEBOP_2,
DISCO,
]
ANAFI_FAMILY = [
ANAFI,
]
JUMPING_FAMILY = [
JUMPING_SUMO,
JUMPING_NIGHT,
JUMPING_RACE,
]
MAMBO_FAMILY = [
MAMBO,
]
DRONES = BEBOP_FAMILY + ANAFI_FAMILY + JUMPING_FAMILY + MAMBO_FAMILY
REMOTES = [
SKYCONTROLLER,
SKYCONTROLLER_2,
SKYCONTROLLER_2P,
SKYCONTROLLER_3
]
ALL = DRONES + REMOTES
class Discovery(object):
"""
Basic implementation of a MDNS search for ARSDK Devices.
The protocol here is not covered by the ARSDK but this implementation is
here to provide a fully working sample code.
"""
def __init__(self, deviceId):
"""
Create and start a researcher for devices on network.
Arguments:
- deviceId : List of deviceIds (strings) to search.
"""
self._zeroconf = Zeroconf()
self._browser = []
self._services = {}
self._lock = threading.RLock()
self._cond = threading.Condition(self._lock)
for did in deviceId:
self._browser.append(ServiceBrowser(self._zeroconf, '_arsdk-' +
str(did) + '._udp.local.',
self))
def stop(self):
"""
Stop searching.
When stopped, this object can not be restarted
"""
with self._lock:
self._cond.notify_all()
self._zeroconf.close()
def get_devices(self):
""" Get the current list of devices """
return dict(self._services)
def wait_for_change(self, timeout=None):
"""
Wait for a change in the device list
Keyword arguments:
- timeout : Timeout in floating point seconds for the operation
"""
with self._lock:
self._cond.wait(timeout)
def _signal_change(self):
with self._lock:
self._cond.notify_all()
def remove_service(self, zeroconf, type, name):
""" Internal function for zeroconf.ServiceBrowser. """
if name in self._services:
del self._services[name]
self._signal_change()
def add_service(self, zeroconf, type, name):
""" Internal function for zeroconf.ServiceBrowser. """
info = zeroconf.get_service_info(type, name)
if info is not None:
self._services[name] = info
self._signal_change()
else:
print('Found a service witout info : ' + name + '. Stopping !')
self.stop()
def get_name(device):
""" Get the display name of a device """
return device.name[0:-(len(device.type) + 1)]
def get_ip(device):
""" Get the IP, as string, of a device """
return socket.inet_ntoa(device.address)
def get_port(device):
""" Get the port, as string, of a device """
return str(device.port)
def get_device_id(device):
""" Get the device_id of a device """
return device.type[len('_arsdk-'):-len('._udp.local.')]
| bsd-3-clause |
SilverIce/JContainers | dep/boost/tools/build/src/tools/gcc.py | 5 | 38692 | # Status: being ported by Steven Watanabe
# Base revision: 47077
# TODO: common.jam needs to be ported
# TODO: generators.jam needs to have register_c_compiler.
#
# Copyright 2001 David Abrahams.
# Copyright 2002-2006 Rene Rivera.
# Copyright 2002-2003 Vladimir Prus.
# Copyright (c) 2005 Reece H. Dunn.
# Copyright 2006 Ilya Sokolov.
# Copyright 2007 Roland Schwarz
# Copyright 2007 Boris Gubenko.
# Copyright 2008 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import subprocess
import re
import bjam
from b2.tools import unix, common, rc, pch, builtin
from b2.build import feature, type, toolset, generators, property_set
from b2.build.property import Property
from b2.util.utility import os_name, on_windows
from b2.manager import get_manager
from b2.build.generators import Generator
from b2.build.toolset import flags
from b2.util.utility import to_seq
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
feature.extend('toolset', ['gcc'])
toolset.inherit_generators('gcc', [], 'unix', ['unix.link', 'unix.link.dll'])
toolset.inherit_flags('gcc', 'unix')
toolset.inherit_rules('gcc', 'unix')
generators.override('gcc.prebuilt', 'builtin.prebuilt')
generators.override('gcc.searched-lib-generator', 'searched-lib-generator')
# Target naming is determined by types/lib.jam and the settings below this
# comment.
#
# On *nix:
# libxxx.a static library
# libxxx.so shared library
#
# On windows (mingw):
# libxxx.lib static library
# xxx.dll DLL
# xxx.lib import library
#
# On windows (cygwin) i.e. <target-os>cygwin
# libxxx.a static library
# xxx.dll DLL
# libxxx.dll.a import library
#
# Note: user can always override by using the <tag>@rule
# This settings have been choosen, so that mingw
# is in line with msvc naming conventions. For
# cygwin the cygwin naming convention has been choosen.
# Make the "o" suffix used for gcc toolset on all
# platforms
type.set_generated_target_suffix('OBJ', ['<toolset>gcc'], 'o')
type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'a')
type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'dll.a')
type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'lib')
__machine_match = re.compile('^([^ ]+)')
__version_match = re.compile('^([0-9.]+)')
def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options) ;
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - Ranlib
ranlib = common.get_invocation_command('gcc',
'ranlib', feature.get_values('<ranlib>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.RANLIB', condition, [ranlib])
if debug():
print 'notice: using gcc archiver ::', condition, '::', ranlib
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure(rc_command, condition, '<rc-type>' + rc_type)
###if [ os.name ] = NT
###{
### # This causes single-line command invocation to not go through .bat files,
### # thus avoiding command-line length limitations.
### JAMSHELL = % ;
###}
#FIXME: when register_c_compiler is moved to
# generators, these should be updated
builtin.register_c_compiler('gcc.compile.c++', ['CPP'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c', ['C'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.asm', ['ASM'], ['OBJ'], ['<toolset>gcc'])
# pch support
# The compiler looks for a precompiled header in each directory just before it
# looks for the include file in that directory. The name searched for is the
# name specified in the #include directive with ".gch" suffix appended. The
# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to
# full name of the header.
type.set_generated_target_suffix('PCH', ['<toolset>gcc'], 'gch')
# GCC-specific pch generator.
class GccPchGenerator(pch.PchGenerator):
# Inherit the __init__ method
def run_pch(self, project, name, prop_set, sources):
# Find the header in sources. Ignore any CPP sources.
header = None
for s in sources:
if type.is_derived(s.type(), 'H'):
header = s
# Error handling: Base header file name should be the same as the base
# precompiled header name.
header_name = header.name()
header_basename = os.path.basename(header_name).rsplit('.', 1)[0]
if header_basename != name:
location = project.project_module
###FIXME:
raise Exception()
### errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ;
pch_file = Generator.run(self, project, name, prop_set, [header])
# return result of base class and pch-file property as usage-requirements
# FIXME: what about multiple results from generator.run?
return (property_set.create([Property('pch-file', pch_file[0]),
Property('cflags', '-Winvalid-pch')]),
pch_file)
# Calls the base version specifying source's name as the name of the created
# target. As result, the PCH will be named whatever.hpp.gch, and not
# whatever.gch.
def generated_targets(self, sources, prop_set, project, name = None):
name = sources[0].name()
return Generator.generated_targets(self, sources,
prop_set, project, name)
# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The
# latter have HPP type, but HPP type is derived from H. The type of compilation
# is determined entirely by the destination type.
generators.register(GccPchGenerator('gcc.compile.c.pch', False, ['H'], ['C_PCH'], ['<pch>on', '<toolset>gcc' ]))
generators.register(GccPchGenerator('gcc.compile.c++.pch', False, ['H'], ['CPP_PCH'], ['<pch>on', '<toolset>gcc' ]))
# Override default do-nothing generators.
generators.override('gcc.compile.c.pch', 'pch.default-c-pch-generator')
generators.override('gcc.compile.c++.pch', 'pch.default-cpp-pch-generator')
flags('gcc.compile', 'PCH_FILE', ['<pch>on'], ['<pch-file>'])
# Declare flags and action for compilation
flags('gcc.compile', 'OPTIONS', ['<optimization>off'], ['-O0'])
flags('gcc.compile', 'OPTIONS', ['<optimization>speed'], ['-O3'])
flags('gcc.compile', 'OPTIONS', ['<optimization>space'], ['-Os'])
flags('gcc.compile', 'OPTIONS', ['<inlining>off'], ['-fno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>on'], ['-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>full'], ['-finline-functions', '-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<warnings>off'], ['-w'])
flags('gcc.compile', 'OPTIONS', ['<warnings>on'], ['-Wall'])
flags('gcc.compile', 'OPTIONS', ['<warnings>all'], ['-Wall', '-pedantic'])
flags('gcc.compile', 'OPTIONS', ['<warnings-as-errors>on'], ['-Werror'])
flags('gcc.compile', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.compile', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.compile.c++', 'OPTIONS', ['<rtti>off'], ['-fno-rtti'])
flags('gcc.compile.c++', 'OPTIONS', ['<exception-handling>off'], ['-fno-exceptions'])
# On cygwin and mingw, gcc generates position independent code by default, and
# warns if -fPIC is specified. This might not be the right way of checking if
# we're using cygwin. For example, it's possible to run cygwin gcc from NT
# shell, or using crosscompiling. But we'll solve that problem when it's time.
# In that case we'll just add another parameter to 'init' and move this login
# inside 'init'.
if not os_name () in ['CYGWIN', 'NT']:
# This logic will add -fPIC for all compilations:
#
# lib a : a.cpp b ;
# obj b : b.cpp ;
# exe c : c.cpp a d ;
# obj d : d.cpp ;
#
# This all is fine, except that 'd' will be compiled with -fPIC even though
# it's not needed, as 'd' is used only in exe. However, it's hard to detect
# where a target is going to be used. Alternative, we can set -fPIC only
# when main target type is LIB but than 'b' will be compiled without -fPIC.
# In x86-64 that will lead to link errors. So, compile everything with
# -fPIC.
#
# Yet another alternative would be to create propagated <sharedable>
# feature, and set it when building shared libraries, but that's hard to
# implement and will increase target path length even more.
flags('gcc.compile', 'OPTIONS', ['<link>shared'], ['-fPIC'])
if os_name() != 'NT' and os_name() != 'OSF' and os_name() != 'HPUX':
# OSF does have an option called -soname but it doesn't seem to work as
# expected, therefore it has been disabled.
HAVE_SONAME = ''
SONAME_OPTION = '-h'
flags('gcc.compile', 'USER_OPTIONS', [], ['<cflags>'])
flags('gcc.compile.c++', 'USER_OPTIONS',[], ['<cxxflags>'])
flags('gcc.compile', 'DEFINES', [], ['<define>'])
flags('gcc.compile', 'INCLUDES', [], ['<include>'])
engine = get_manager().engine()
engine.register_action('gcc.compile.c++.pch',
'"$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
engine.register_action('gcc.compile.c.pch',
'"$(CONFIG_COMMAND)" -x c-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
def gcc_compile_cpp(targets, sources, properties):
# Some extensions are compiled as C++ by default. For others, we need to
# pass -x c++. We could always pass -x c++ but distcc does not work with it.
extension = os.path.splitext (sources [0]) [1]
lang = ''
if not extension in ['.cc', '.cp', '.cxx', '.cpp', '.c++', '.C']:
lang = '-x c++'
get_manager().engine().set_target_variable (targets, 'LANG', lang)
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
def gcc_compile_c(targets, sources, properties):
engine = get_manager().engine()
# If we use the name g++ then default file suffix -> language mapping does
# not work. So have to pass -x option. Maybe, we can work around this by
# allowing the user to specify both C and C++ compiler names.
#if $(>:S) != .c
#{
engine.set_target_variable (targets, 'LANG', '-x c')
#}
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
engine.register_action(
'gcc.compile.c++',
'"$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-128 $(OPTIONS) ' +
'$(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" ' +
'-c -o "$(<:W)" "$(>:W)"',
function=gcc_compile_cpp,
bound_list=['PCH_FILE'])
engine.register_action(
'gcc.compile.c',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) ' +
'-I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_c,
bound_list=['PCH_FILE'])
def gcc_compile_asm(targets, sources, properties):
get_manager().engine().set_target_variable(targets, 'LANG', '-x assembler-with-cpp')
engine.register_action(
'gcc.compile.asm',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_asm)
class GccLinkingGenerator(unix.UnixLinkingGenerator):
"""
The class which check that we don't try to use the <runtime-link>static
property while creating or using shared library, since it's not supported by
gcc/libc.
"""
def run(self, project, name, ps, sources):
# TODO: Replace this with the use of a target-os property.
no_static_link = False
if bjam.variable('UNIX'):
no_static_link = True;
##FIXME: what does this mean?
## {
## switch [ modules.peek : JAMUNAME ]
## {
## case * : no-static-link = true ;
## }
## }
reason = None
if no_static_link and ps.get('runtime-link') == 'static':
if ps.get('link') == 'shared':
reason = "On gcc, DLL can't be build with '<runtime-link>static'."
elif type.is_derived(self.target_types[0], 'EXE'):
for s in sources:
source_type = s.type()
if source_type and type.is_derived(source_type, 'SHARED_LIB'):
reason = "On gcc, using DLLS together with the " +\
"<runtime-link>static options is not possible "
if reason:
print 'warning:', reason
print 'warning:',\
"It is suggested to use '<runtime-link>static' together",\
"with '<link>static'." ;
return
else:
generated_targets = unix.UnixLinkingGenerator.run(self, project,
name, ps, sources)
return generated_targets
if on_windows():
flags('gcc.link.dll', '.IMPLIB-COMMAND', [], ['-Wl,--out-implib,'])
generators.register(
GccLinkingGenerator('gcc.link', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
[ 'EXE' ],
[ '<toolset>gcc' ]))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
['IMPORT_LIB', 'SHARED_LIB'],
['<toolset>gcc']))
else:
generators.register(
GccLinkingGenerator('gcc.link', True,
['LIB', 'OBJ'],
['EXE'],
['<toolset>gcc']))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['LIB', 'OBJ'],
['SHARED_LIB'],
['<toolset>gcc']))
# Declare flags for linking.
# First, the common flags.
flags('gcc.link', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.link', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.link', 'USER_OPTIONS', [], ['<linkflags>'])
flags('gcc.link', 'LINKPATH', [], ['<library-path>'])
flags('gcc.link', 'FINDLIBS-ST', [], ['<find-static-library>'])
flags('gcc.link', 'FINDLIBS-SA', [], ['<find-shared-library>'])
flags('gcc.link', 'LIBRARIES', [], ['<library-file>'])
# For <runtime-link>static we made sure there are no dynamic libraries in the
# link. On HP-UX not all system libraries exist as archived libraries (for
# example, there is no libunwind.a), so, on this platform, the -static option
# cannot be specified.
if os_name() != 'HPUX':
flags('gcc.link', 'OPTIONS', ['<runtime-link>static'], ['-static'])
# Now, the vendor specific flags.
# The parameter linker can be either gnu, darwin, osf, hpux or sun.
def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'")
# Declare actions for linking.
def gcc_link(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
# Serialize execution of the 'link' action, since running N links in
# parallel is just slower. For now, serialize only gcc links, it might be a
# good idea to serialize all links.
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.register_action(
'gcc.link',
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'-Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" ' +
'$(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function=gcc_link,
bound_list=['LIBRARIES'])
# Default value. Mostly for the sake of intel-linux that inherits from gcc, but
# does not have the same logic to set the .AR variable. We can put the same
# logic in intel-linux, but that's hardly worth the trouble as on Linux, 'ar' is
# always available.
__AR = 'ar'
flags('gcc.archive', 'AROPTIONS', [], ['<archiveflags>'])
def gcc_archive(targets, sources, properties):
# Always remove archive and start again. Here's rationale from
#
# Andre Hentz:
#
# I had a file, say a1.c, that was included into liba.a. I moved a1.c to
# a2.c, updated my Jamfiles and rebuilt. My program was crashing with absurd
# errors. After some debugging I traced it back to the fact that a1.o was
# *still* in liba.a
#
# Rene Rivera:
#
# Originally removing the archive was done by splicing an RM onto the
# archive action. That makes archives fail to build on NT when they have
# many files because it will no longer execute the action directly and blow
# the line length limit. Instead we remove the file in a different action,
# just before building the archive.
clean = targets[0] + '(clean)'
bjam.call('TEMPORARY', clean)
bjam.call('NOCARE', clean)
engine = get_manager().engine()
engine.set_target_variable('LOCATE', clean, bjam.call('get-target-variable', targets, 'LOCATE'))
engine.add_dependency(clean, sources)
engine.add_dependency(targets, clean)
engine.set_update_action('common.RmTemps', clean, targets)
# Declare action for creating static libraries.
# The letter 'r' means to add files to the archive with replacement. Since we
# remove archive, we don't care about replacement, but there's no option "add
# without replacement".
# The letter 'c' suppresses the warning in case the archive does not exists yet.
# That warning is produced only on some platforms, for whatever reasons.
engine.register_action('gcc.archive',
'''"$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)"
"$(.RANLIB)" "$(<)"
''',
function=gcc_archive,
flags=['piecemeal'])
def gcc_link_dll(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.set_target_variable(targets, "HAVE_SONAME", HAVE_SONAME)
engine.set_target_variable(targets, "SONAME_OPTION", SONAME_OPTION)
engine.register_action(
'gcc.link.dll',
# Differ from 'link' above only by -shared.
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'"$(.IMPLIB-COMMAND)$(<[1])" -o "$(<[-1])" ' +
'$(HAVE_SONAME)-Wl,$(SONAME_OPTION)$(SPACE)-Wl,$(<[-1]:D=) ' +
'-shared $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function = gcc_link_dll,
bound_list=['LIBRARIES'])
# Set up threading support. It's somewhat contrived, so perform it at the end,
# to avoid cluttering other code.
if on_windows():
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-mthreads'])
elif bjam.variable('UNIX'):
jamuname = bjam.variable('JAMUNAME')
host_os_name = jamuname[0]
if host_os_name.startswith('SunOS'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthreads'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
elif host_os_name == 'BeOS':
# BeOS has no threading options, don't set anything here.
pass
elif host_os_name == 'Haiku':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-lroot'])
# there is no -lrt on Haiku, and -pthread is implicit
elif host_os_name.endswith('BSD'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD
elif host_os_name == 'DragonFly':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD - DragonFly is a FreeBSD variant,
# which anoyingly doesn't say it's a *BSD.
elif host_os_name == 'IRIX':
# gcc on IRIX does not support multi-threading, don't set anything here.
pass
elif host_os_name == 'Darwin':
# Darwin has no threading options, don't set anything here.
pass
else:
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
def cpu_flags(toolset, variable, architecture, instruction_set, values, default=None):
#FIXME: for some reason this fails. Probably out of date feature code
## if default:
## flags(toolset, variable,
## ['<architecture>' + architecture + '/<instruction-set>'],
## values)
flags(toolset, variable,
#FIXME: same as above
[##'<architecture>/<instruction-set>' + instruction_set,
'<architecture>' + architecture + '/<instruction-set>' + instruction_set],
values)
# Set architecture/instruction-set options.
#
# x86 and compatible
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'native', ['-march=native'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i486', ['-march=i486'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i586', ['-march=i586'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i686', ['-march=i686'], default=True)
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium', ['-march=pentium'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-mmx', ['-march=pentium-mmx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentiumpro', ['-march=pentiumpro'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium2', ['-march=pentium2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3', ['-march=pentium3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3m', ['-march=pentium3m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-m', ['-march=pentium-m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4', ['-march=pentium4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4m', ['-march=pentium4m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'prescott', ['-march=prescott'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nocona', ['-march=nocona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'core2', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe-l', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'allendale', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'wolfdale', ['-march=core2', '-msse4.1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'merom', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'merom-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'kentsfield', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'kentsfield-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'yorksfield', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'penryn', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'corei7', ['-march=corei7'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nehalem', ['-march=corei7'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'corei7-avx', ['-march=corei7-avx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'sandy-bridge', ['-march=corei7-avx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'core-avx-i', ['-march=core-avx-i'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'ivy-bridge', ['-march=core-avx-i'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'haswell', ['-march=core-avx-i', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6', ['-march=k6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-2', ['-march=k6-2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-3', ['-march=k6-3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon', ['-march=athlon'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-tbird', ['-march=athlon-tbird'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-4', ['-march=athlon-4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-xp', ['-march=athlon-xp'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-mp', ['-march=athlon-mp'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8', ['-march=k8'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron', ['-march=opteron'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64', ['-march=athlon64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-fx', ['-march=athlon-fx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8-sse3', ['-march=k8-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron-sse3', ['-march=opteron-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64-sse3', ['-march=athlon64-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'amdfam10', ['-march=amdfam10'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'barcelona', ['-march=barcelona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver1', ['-march=bdver1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver2', ['-march=bdver2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver3', ['-march=bdver3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'btver1', ['-march=btver1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'btver2', ['-march=btver2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip-c6', ['-march=winchip-c6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip2', ['-march=winchip2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3', ['-march=c3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3-2', ['-march=c3-2'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'atom', ['-march=atom'])
# Sparc
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v7', ['-mcpu=v7'], default=True)
cpu_flags('gcc', 'OPTIONS', 'sparc', 'cypress', ['-mcpu=cypress'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v8', ['-mcpu=v8'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'supersparc', ['-mcpu=supersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite', ['-mcpu=sparclite'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'hypersparc', ['-mcpu=hypersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite86x', ['-mcpu=sparclite86x'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f930', ['-mcpu=f930'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f934', ['-mcpu=f934'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclet', ['-mcpu=sparclet'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'tsc701', ['-mcpu=tsc701'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v9', ['-mcpu=v9'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc', ['-mcpu=ultrasparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc3', ['-mcpu=ultrasparc3'])
# RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'power', '403', ['-mcpu=403'])
cpu_flags('gcc', 'OPTIONS', 'power', '505', ['-mcpu=505'])
cpu_flags('gcc', 'OPTIONS', 'power', '601', ['-mcpu=601'])
cpu_flags('gcc', 'OPTIONS', 'power', '602', ['-mcpu=602'])
cpu_flags('gcc', 'OPTIONS', 'power', '603', ['-mcpu=603'])
cpu_flags('gcc', 'OPTIONS', 'power', '603e', ['-mcpu=603e'])
cpu_flags('gcc', 'OPTIONS', 'power', '604', ['-mcpu=604'])
cpu_flags('gcc', 'OPTIONS', 'power', '604e', ['-mcpu=604e'])
cpu_flags('gcc', 'OPTIONS', 'power', '620', ['-mcpu=620'])
cpu_flags('gcc', 'OPTIONS', 'power', '630', ['-mcpu=630'])
cpu_flags('gcc', 'OPTIONS', 'power', '740', ['-mcpu=740'])
cpu_flags('gcc', 'OPTIONS', 'power', '7400', ['-mcpu=7400'])
cpu_flags('gcc', 'OPTIONS', 'power', '7450', ['-mcpu=7450'])
cpu_flags('gcc', 'OPTIONS', 'power', '750', ['-mcpu=750'])
cpu_flags('gcc', 'OPTIONS', 'power', '801', ['-mcpu=801'])
cpu_flags('gcc', 'OPTIONS', 'power', '821', ['-mcpu=821'])
cpu_flags('gcc', 'OPTIONS', 'power', '823', ['-mcpu=823'])
cpu_flags('gcc', 'OPTIONS', 'power', '860', ['-mcpu=860'])
cpu_flags('gcc', 'OPTIONS', 'power', '970', ['-mcpu=970'])
cpu_flags('gcc', 'OPTIONS', 'power', '8540', ['-mcpu=8540'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power', ['-mcpu=power'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power2', ['-mcpu=power2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power3', ['-mcpu=power3'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power4', ['-mcpu=power4'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power5', ['-mcpu=power5'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc', ['-mcpu=powerpc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc64', ['-mcpu=powerpc64'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios', ['-mcpu=rios'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios1', ['-mcpu=rios1'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios2', ['-mcpu=rios2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rsc', ['-mcpu=rsc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rs64a', ['-mcpu=rs64'])
# AIX variant of RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32/<target-os>aix'], ['-maix32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-maix64'])
flags('gcc', 'AROPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-X64'])
| mit |
mkieszek/odoo | openerp/addons/base/tests/test_base.py | 4 | 34257 | import unittest2
import openerp.tests.common as common
from openerp.exceptions import ValidationError
class test_base(common.TransactionCase):
def setUp(self):
super(test_base,self).setUp()
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
self.res_partner_title = self.registry('res.partner.title')
# samples use effective TLDs from the Mozilla public suffix
# list at http://publicsuffix.org
self.samples = [
('"Raoul Grosbedon" <raoul@chirurgiens-dentistes.fr> ', 'Raoul Grosbedon', 'raoul@chirurgiens-dentistes.fr'),
('ryu+giga-Sushi@aizubange.fukushima.jp', '', 'ryu+giga-Sushi@aizubange.fukushima.jp'),
('Raoul chirurgiens-dentistes.fr', 'Raoul chirurgiens-dentistes.fr', ''),
(" Raoul O'hara <!@historicalsociety.museum>", "Raoul O'hara", '!@historicalsociety.museum')
]
def test_00_res_partner_name_create(self):
cr, uid = self.cr, self.uid
parse = self.res_partner._parse_partner_name
for text, name, mail in self.samples:
self.assertEqual((name,mail), parse(text), 'Partner name parsing failed')
partner_id, dummy = self.res_partner.name_create(cr, uid, text)
partner = self.res_partner.browse(cr, uid, partner_id)
self.assertEqual(name or mail, partner.name, 'Partner name incorrect')
self.assertEqual(mail or False, partner.email, 'Partner email incorrect')
def test_10_res_partner_find_or_create(self):
cr,uid = self.cr, self.uid
email = self.samples[0][0]
partner_id, dummy = self.res_partner.name_create(cr, uid, email)
found_id = self.res_partner.find_or_create(cr, uid, email)
self.assertEqual(partner_id, found_id, 'find_or_create failed')
partner_id2, dummy2 = self.res_partner.name_create(cr, uid, 'sarah.john@connor.com')
found_id2 = self.res_partner.find_or_create(cr, uid, 'john@connor.com')
self.assertNotEqual(partner_id2, found_id2, 'john@connor.com match sarah.john@connor.com')
new_id = self.res_partner.find_or_create(cr, uid, self.samples[1][0])
self.assertTrue(new_id > partner_id, 'find_or_create failed - should have created new one')
new_id2 = self.res_partner.find_or_create(cr, uid, self.samples[2][0])
self.assertTrue(new_id2 > new_id, 'find_or_create failed - should have created new one again')
def test_15_res_partner_name_search(self):
cr,uid = self.cr, self.uid
for name, active in [
('"A Raoul Grosbedon" <raoul@chirurgiens-dentistes.fr>', False),
('B Raoul chirurgiens-dentistes.fr', True),
("C Raoul O'hara <!@historicalsociety.museum>", True),
('ryu+giga-Sushi@aizubange.fukushima.jp', True),
]:
partner_id, dummy = self.res_partner.name_create(cr, uid, name, context={'default_active': active})
partners = self.res_partner.name_search(cr, uid, 'Raoul')
self.assertEqual(len(partners), 2, 'Incorrect search number result for name_search')
partners = self.res_partner.name_search(cr, uid, 'Raoul', limit=1)
self.assertEqual(len(partners), 1, 'Incorrect search number result for name_search with a limit')
self.assertEqual(partners[0][1], 'B Raoul chirurgiens-dentistes.fr', 'Incorrect partner returned, should be the first active')
def test_20_res_partner_address_sync(self):
cr, uid = self.cr, self.uid
ghoststep = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'GhostStep',
'is_company': True,
'street': 'Main Street, 10',
'phone': '123456789',
'email': 'info@ghoststep.com',
'vat': 'BE0477472701',
'type': 'contact'}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Denis Bladesmith <denis.bladesmith@ghoststep.com>')[0])
self.assertEqual(p1.type, 'contact', 'Default type must be "contact"')
p1phone = '123456789#34'
p1.write({'phone': p1phone,
'parent_id': ghoststep.id})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
# turn off sync
p1street = 'Different street, 42'
p1.write({'street': p1street,
'type': 'invoice'})
p1.refresh(), ghoststep.refresh()
self.assertEqual(p1.street, p1street, 'Address fields must not be synced after turning sync off')
self.assertNotEqual(ghoststep.street, p1street, 'Parent address must never be touched')
# turn on sync again
p1.write({'type': 'contact'})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced again')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
# Modify parent, sync to children
ghoststreet = 'South Street, 25'
ghoststep.write({'street': ghoststreet})
p1.refresh()
self.assertEqual(p1.street, ghoststreet, 'Address fields must be synced automatically')
self.assertEqual(p1.phone, p1phone, 'Phone should not be synced')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
p1street = 'My Street, 11'
p1.write({'street': p1street})
ghoststep.refresh()
self.assertEqual(ghoststep.street, ghoststreet, 'Touching contact should never alter parent')
def test_30_res_partner_first_contact_sync(self):
""" Test initial creation of company/contact pair where contact address gets copied to
company """
cr, uid = self.cr, self.uid
ironshield = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'IronShield')[0])
self.assertFalse(ironshield.is_company, 'Partners are not companies by default')
self.assertEqual(ironshield.type, 'contact', 'Default type must be "contact"')
ironshield.write({'type': 'contact'})
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Isen Hardearth',
'street': 'Strongarm Avenue, 12',
'parent_id': ironshield.id}))
self.assertEquals(p1.type, 'contact', 'Default type must be "contact", not the copied parent type')
ironshield.refresh()
self.assertEqual(ironshield.street, p1.street, 'Address fields should be copied to company')
self.assertTrue(ironshield.is_company, 'Company flag should be turned on after first contact creation')
def test_40_res_partner_address_getc(self):
""" Test address_get address resolution mechanism: it should first go down through descendants,
stopping when encountering another is_copmany entity, then go up, stopping again at the first
is_company entity or the root ancestor and if nothing matches, it should use the provided partner
itself """
cr, uid = self.cr, self.uid
elmtree = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Elmtree')[0])
branch1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 1',
'parent_id': elmtree.id,
'is_company': True}))
leaf10 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 10',
'parent_id': branch1.id,
'type': 'invoice'}))
branch11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 11',
'parent_id': branch1.id,
'type': 'other'}))
leaf111 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 111',
'parent_id': branch11.id,
'type': 'delivery'}))
branch11.write({'is_company': False}) # force is_company after creating 1rst child
branch2 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 2',
'parent_id': elmtree.id,
'is_company': True}))
leaf21 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 21',
'parent_id': branch2.id,
'type': 'delivery'}))
leaf22 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 22',
'parent_id': branch2.id}))
leaf23 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 23',
'parent_id': branch2.id,
'type': 'contact'}))
# go up, stop at branch1
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch11.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id}, 'Invalid address resolution')
# go down, stop at at all child companies
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': elmtree.id,
'invoice': elmtree.id,
'contact': elmtree.id,
'other': elmtree.id}, 'Invalid address resolution')
# go down through children
self.assertEqual(self.res_partner.address_get(cr, uid, [branch1.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch2.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': branch2.id,
'contact': branch2.id,
'other': branch2.id}, 'Invalid address resolution. Company is the first encountered contact, therefore default for unfound addresses.')
# go up then down through siblings
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf21.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': branch2.id,
'contact': branch2.id,
'other': branch2.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf22.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': leaf22.id,
'contact': leaf22.id,
'other': leaf22.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf23.id], ['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': leaf23.id,
'other': leaf23.id}, 'Invalid address resolution, `default` should only override if no partner with specific type exists')
# empty adr_pref means only 'contact'
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], []),
{'contact': elmtree.id}, 'Invalid address resolution, no contact means commercial entity ancestor')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'contact': branch1.id}, 'Invalid address resolution, no contact means finding contact in ancestors')
branch11.write({'type': 'contact'})
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'contact': branch11.id}, 'Invalid address resolution, branch11 should now be contact')
def test_50_res_partner_commercial_sync(self):
cr, uid = self.cr, self.uid
p0 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sigurd Sunknife',
'email': 'ssunknife@gmail.com'}))
sunhelm = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sunhelm',
'is_company': True,
'street': 'Rainbow Street, 13',
'phone': '1122334455',
'email': 'info@sunhelm.com',
'vat': 'BE0477472701',
'child_ids': [(4, p0.id),
(0, 0, {'name': 'Alrik Greenthorn',
'email': 'agr@sunhelm.com'})],
}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Otto Blackwood',
'email': 'otto.blackwood@sunhelm.com',
'parent_id': sunhelm.id}))
p11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Gini Graywool',
'email': 'ggr@sunhelm.com',
'parent_id': p1.id}))
p2 = self.res_partner.browse(cr, uid, self.res_partner.search(cr, uid,
[('email', '=', 'agr@sunhelm.com')])[0])
self.res_partner.write(cr, uid, sunhelm.id, {'child_ids': [(0, 0, {'name': 'Ulrik Greenthorn',
'email': 'ugr@sunhelm.com'})]})
p3 = self.res_partner.browse(cr, uid, self.res_partner.search(cr, uid,
[('email', '=', 'ugr@sunhelm.com')])[0])
for p in (p0, p1, p11, p2, p3):
p.refresh()
self.assertEquals(p.commercial_partner_id, sunhelm, 'Incorrect commercial entity resolution')
self.assertEquals(p.vat, sunhelm.vat, 'Commercial fields must be automatically synced')
sunhelmvat = 'BE0123456789'
sunhelm.write({'vat': sunhelmvat})
for p in (p0, p1, p11, p2, p3):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Commercial fields must be automatically and recursively synced')
p1vat = 'BE0987654321'
p1.write({'vat': p1vat})
for p in (sunhelm, p0, p11, p2, p3):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Sync to children should only work downstream and on commercial entities')
# promote p1 to commercial entity
p1.write({
'parent_id': sunhelm.id,
'is_company': True,
'name': 'Sunhelm Subsidiary'})
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEquals(p1.commercial_partner_id, p1, 'Incorrect commercial entity resolution after setting is_company')
# writing on parent should not touch child commercial entities
sunhelmvat2 = 'BE0112233445'
sunhelm.write({'vat': sunhelmvat2})
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
p0.refresh()
self.assertEquals(p0.vat, sunhelmvat2, 'Commercial fields must be automatically synced')
def test_60_read_group(self):
cr, uid = self.cr, self.uid
title_sir = self.res_partner_title.create(cr, uid, {'name': 'Sir'})
title_lady = self.res_partner_title.create(cr, uid, {'name': 'Lady'})
test_users = [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend', 'date': '2015-03-28', 'title': title_lady},
{'name': 'Alice', 'login': 'alice2', 'color': 0, 'function': 'Friend', 'date': '2015-01-28', 'title': title_lady},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend', 'date': '2015-03-02', 'title': title_sir},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper', 'date': '2015-03-20', 'title': title_lady},
{'name': 'Nab', 'login': 'nab', 'color': -3, 'function': '5$ Wrench', 'date': '2014-09-10', 'title': title_sir},
{'name': 'Nab', 'login': 'nab-she', 'color': 6, 'function': '5$ Wrench', 'date': '2014-01-02', 'title': title_lady},
]
ids = [self.res_users.create(cr, uid, u) for u in test_users]
domain = [('id', 'in', ids)]
# group on local char field without domain and without active_test (-> empty WHERE clause)
groups_data = self.res_users.read_group(cr, uid, [], fields=['login'], groupby=['login'], orderby='login DESC', context={'active_test': False})
self.assertGreater(len(groups_data), 6, "Incorrect number of results when grouping on a field")
# group on local char field with limit
groups_data = self.res_users.read_group(cr, uid, domain, fields=['login'], groupby=['login'], orderby='login DESC', limit=3, offset=3)
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field with limit")
self.assertEqual(['bob', 'alice2', 'alice'], [g['login'] for g in groups_data], 'Result mismatch')
# group on inherited char field, aggregate on int field (second groupby ignored on purpose)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color', 'function'], groupby=['function', 'login'])
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(['5$ Wrench', 'Eavesdropper', 'Friend'], [g['function'] for g in groups_data], 'incorrect read_group order')
for group_data in groups_data:
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
# group on inherited char field, reverse order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
# group on int field, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['color'], groupby='color')
self.assertEqual([-3, 0, 1, 2, 3, 6], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# multi group, second level is int field, should still be summed in first level grouping
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby=['name', 'color'], orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 3, 2, 1], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# group on inherited char field, multiple orders with directions
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='color DESC, name')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['Eve', 'Nab', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 2, 1, 2], [g['name_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'])
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['January 2014', 'September 2014', 'January 2015', 'March 2015'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 1, 1, 3], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, custom order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'], orderby='date DESC')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['March 2015', 'January 2015', 'September 2014', 'January 2014'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 1, 1, 1], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited many2one (res_partner.title), default order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'])
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), reversed natural order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), multiple orders with m2o in second position
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="color desc, title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the result')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), ordered by other inherited field (color)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby='color')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
class test_partner_recursion(common.TransactionCase):
def setUp(self):
super(test_partner_recursion,self).setUp()
self.res_partner = self.registry('res.partner')
cr, uid = self.cr, self.uid
self.p1 = self.res_partner.name_create(cr, uid, 'Elmtree')[0]
self.p2 = self.res_partner.create(cr, uid, {'name': 'Elmtree Child 1', 'parent_id': self.p1})
self.p3 = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.1', 'parent_id': self.p2})
# split 101, 102, 103 tests to force SQL rollback between them
def test_101_res_partner_recursion(self):
cr, uid, p1, p3 = self.cr, self.uid, self.p1, self.p3
self.assertRaises(ValidationError, self.res_partner.write, cr, uid, [p1], {'parent_id': p3})
def test_102_res_partner_recursion(self):
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
self.assertRaises(ValidationError, self.res_partner.write, cr, uid, [p2], {'parent_id': p3})
def test_103_res_partner_recursion(self):
cr, uid, p3 = self.cr, self.uid, self.p3
self.assertRaises(ValidationError, self.res_partner.write, cr, uid, [p3], {'parent_id': p3})
def test_104_res_partner_recursion_indirect_cycle(self):
""" Indirect hacky write to create cycle in children """
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
p3b = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.2', 'parent_id': self.p2})
self.assertRaises(ValidationError, self.res_partner.write, cr, uid, [p2],
{'child_ids': [(1, p3, {'parent_id': p3b}), (1, p3b, {'parent_id': p3})]})
def test_110_res_partner_recursion_multi_update(self):
""" multi-write on several partners in same hierarchy must not trigger a false cycle detection """
cr, uid, p1, p2, p3 = self.cr, self.uid, self.p1, self.p2, self.p3
self.assertTrue(self.res_partner.write(cr, uid, [p1,p2,p3], {'phone': '123456'}))
class test_translation(common.TransactionCase):
def setUp(self):
super(test_translation, self).setUp()
self.res_category = self.registry('res.partner.category')
self.ir_translation = self.registry('ir.translation')
cr, uid = self.cr, self.uid
self.registry('ir.translation').load_module_terms(cr, ['base'], ['fr_FR'])
self.cat_id = self.res_category.create(cr, uid, {'name': 'Customers'})
self.ir_translation.create(cr, uid, {'name': 'res.partner.category,name', 'module':'base',
'value': 'Clients', 'res_id': self.cat_id, 'lang':'fr_FR', 'state':'translated', 'type': 'model'})
def test_101_create_translated_record(self):
cr, uid = self.cr, self.uid
no_context_cat = self.res_category.browse(cr, uid, self.cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Error in basic name_get")
fr_context_cat = self.res_category.browse(cr, uid, self.cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
cr, uid = self.cr, self.uid
self.new_cat_id = self.res_category.copy(cr, uid, self.cat_id, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication did not set untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Did not found translation for initial value")
def test_103_duplicate_record_fr(self):
cr, uid = self.cr, self.uid
self.new_fr_cat_id = self.res_category.copy(cr, uid, self.cat_id, default={'name': 'Clients (copie)'}, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication erased original untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients (copie)', "Did not used default value for translated value")
def test_104_orderby_translated_field(self):
""" Test search ordered by a translated field. """
# create a category with a French translation
category = self.env['res.partner.category'].create({'name': 'Padawans'})
category_fr = category.with_context(lang='fr_FR')
category_fr.write({'name': 'Apprentis'})
# search for categories, and sort them by (translated) name
categories = category_fr.search([('id', 'in', [self.cat_id, category.id])], order='name')
self.assertEqual(categories.ids, [category.id, self.cat_id],
"Search ordered by translated name should return Padawans (Apprentis) before Customers (Clients)")
test_state = None
#: Stores state information across multiple test classes
def setUpModule():
global test_state
test_state = {}
def tearDownModule():
global test_state
test_state = None
class TestPhaseInstall00(unittest2.TestCase):
"""
WARNING: Relies on tests being run in alphabetical order
"""
@classmethod
def setUpClass(cls):
cls.state = None
def test_00_setup(self):
type(self).state = 'init'
@common.at_install(False)
def test_01_no_install(self):
type(self).state = 'error'
def test_02_check(self):
self.assertEqual(
self.state, 'init',
"Testcase state should not have been transitioned from 00")
class TestPhaseInstall01(unittest2.TestCase):
at_install = False
def test_default_norun(self):
self.fail("An unmarket test in a non-at-install case should not run")
@common.at_install(True)
def test_set_run(self):
test_state['set_at_install'] = True
class TestPhaseInstall02(unittest2.TestCase):
"""
Can't put the check for test_set_run in the same class: if
@common.at_install does not work for test_set_run, it won't work for
the other one either. Thus move checking of whether test_set_run has
correctly run indeed to a separate class.
Warning: relies on *classes* being run in alphabetical order in test
modules
"""
def test_check_state(self):
self.assertTrue(
test_state.get('set_at_install'),
"The flag should be set if local overriding of runstate")
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
ovnicraft/openerp-restaurant | sale_crm/report/__init__.py | 17 | 1106 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sales_crm_account_invoice_report
import sale_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
emilybache/texttest-runner | src/main/python/lib/capturemock/replayinfo.py | 1 | 11846 |
""" Module to manage the information in the file and return appropriate matches """
import logging, difflib, re, os
try: # Python 2.7, Python 3.x
from collections import OrderedDict
except ImportError: # Python 2.6 and earlier
from ordereddict import OrderedDict
try: # Python 2.x
import config
except ImportError: # Python 3.x
from . import config
class ReplayInfo:
def __init__(self, mode, replayFile, rcHandler):
self.responseMap = OrderedDict()
self.diag = logging.getLogger("Replay")
self.replayItems = set()
self.replayAll = mode == config.REPLAY
self.exactMatching = rcHandler.getboolean("use_exact_matching", [ "general" ], False)
if replayFile:
trafficList = self.readIntoList(replayFile)
self.parseTrafficList(trafficList)
items = self.makeCommandItems(rcHandler.getIntercepts("command line")) + \
self.makePythonItems(rcHandler.getIntercepts("python"))
self.replayItems = self.filterForReplay(items, trafficList)
@staticmethod
def filterForReplay(itemInfo, lines):
newItems = set()
for line in lines:
for item, regexp in itemInfo:
if regexp.search(line):
newItems.add(item)
return newItems
@staticmethod
def makeCommandItems(commands):
return [ (command, re.compile("<-CMD:([^ ]* )*" + command + "( [^ ]*)*")) for command in commands ]
@staticmethod
def makePythonItems(pythonAttrs):
return [ (attr, re.compile("<-PYT:(import )?" + attr)) for attr in pythonAttrs ]
def isActiveForAll(self):
return len(self.responseMap) > 0 and self.replayAll
def isActiveFor(self, traffic):
if len(self.responseMap) == 0:
return False
elif self.replayAll:
return True
else:
return traffic.isMarkedForReplay(self.replayItems, self.responseMap.keys())
def getTrafficLookupKey(self, trafficStr):
# If we're matching server communications it means we're 'playing client'
# In this case we should just send all our stuff in order and not worry about matching things.
return "<-SRV" if trafficStr.startswith("<-SRV") else trafficStr
def responseCompleted(self, currResponseHandlers, indentLevel, fromSUT):
prevIndentLevel = len(currResponseHandlers) - 1
if indentLevel < prevIndentLevel:
return True
elif indentLevel == prevIndentLevel:
_, prevFromSUT = currResponseHandlers[-1]
return fromSUT == prevFromSUT
else:
return False
def parseTrafficList(self, trafficList):
currResponseHandlers = []
for trafficStr in trafficList:
prefix = trafficStr.split(":")[0]
indentLevel = int(len(prefix) / 2) - 2
fromSUT = prefix.startswith("<-")
while self.responseCompleted(currResponseHandlers, indentLevel, fromSUT):
currResponseHandlers.pop()
if currResponseHandlers and (not fromSUT or indentLevel % 2 == 1):
responseHandler, _ = currResponseHandlers[-1]
responseHandler.addResponse(trafficStr)
if fromSUT or indentLevel > len(currResponseHandlers) - 1:
currTrafficIn = self.getTrafficLookupKey(trafficStr.strip())
responseHandler = self.responseMap.get(currTrafficIn)
if responseHandler:
responseHandler.newResponse()
if prefix.endswith("PYT") and not "(" in trafficStr:
self.registerIntermediateCalls(responseHandler)
else:
responseHandler = ReplayedResponseHandler()
self.responseMap[currTrafficIn] = responseHandler
if indentLevel > len(currResponseHandlers) - 1:
currResponseHandlers.append((responseHandler, fromSUT))
else:
currResponseHandlers[-1] = responseHandler, fromSUT
self.diag.debug("Replay info " + repr(self.responseMap))
def registerIntermediateCalls(self, currResponseHandler):
intermediate = []
for trafficIn in reversed(self.responseMap):
responseHandler = self.responseMap[trafficIn]
if responseHandler is currResponseHandler:
break
if "(" in trafficIn:
intermediate.insert(0, responseHandler)
currResponseHandler.addIntermediate(intermediate)
def readIntoList(self, replayFile):
trafficList = []
currTraffic = ""
for line in open(replayFile, "rU"):
prefix = line.split(":")[0]
if len(prefix) < 10 and (prefix.startswith("<-") or prefix[-5:-3] == "->"):
if currTraffic:
trafficList.append(currTraffic)
currTraffic = ""
currTraffic += line
if currTraffic:
trafficList.append(currTraffic)
return trafficList
def readReplayResponses(self, traffic, allClasses, exact=False):
# We return the response matching the traffic in if we can, otherwise
# the one that is most similar to it
if not traffic.hasInfo():
return []
responseMapKey = self.getResponseMapKey(traffic, exact)
if responseMapKey:
return self.responseMap[responseMapKey].makeResponses(allClasses)
else:
return []
def findResponseToTrafficStartingWith(self, prefix):
for currDesc, responseHandler in self.responseMap.items():
_, text = currDesc.split(":", 1)
if text.startswith(prefix):
responses, _ = responseHandler.getCurrentStrings()
if len(responses):
return responses[0][6:]
def getResponseMapKey(self, traffic, exact):
desc = self.getTrafficLookupKey(traffic.getDescription())
self.diag.debug("Trying to match '" + desc + "'")
if desc in self.responseMap:
self.diag.debug("Found exact match")
return desc
elif not exact:
if self.exactMatching:
raise config.CaptureMockReplayError("Could not find any replay request matching '" + desc + "'")
else:
return self.findBestMatch(desc)
def findBestMatch(self, desc):
descWords = self.getWords(desc)
bestMatch = None
bestMatchInfo = set(), 100000
for currDesc, responseHandler in self.responseMap.items():
if self.sameType(desc, currDesc):
descToCompare = currDesc
self.diag.debug("Comparing with '" + descToCompare + "'")
matchInfo = self.getWords(descToCompare), responseHandler.getUnmatchedResponseCount()
if self.isBetterMatch(matchInfo, bestMatchInfo, descWords):
bestMatchInfo = matchInfo
bestMatch = currDesc
if bestMatch is not None:
self.diag.debug("Best match chosen as '" + bestMatch + "'")
return bestMatch
def sameType(self, desc1, desc2):
return desc1[2:5] == desc2[2:5]
def getWords(self, desc):
# Heuristic decisions trying to make the best of inexact matches
separators = [ "/", "(", ")", "\\", None ] # the last means whitespace...
return self._getWords(desc, separators)
def _getWords(self, desc, separators):
if len(separators) == 0:
return [ desc ]
words = []
for part in desc.split(separators[0]):
words += self._getWords(part, separators[1:])
return words
def getMatchingBlocks(self, list1, list2):
matcher = difflib.SequenceMatcher(None, list1, list2)
return list(matcher.get_matching_blocks())
def commonElementCount(self, blocks):
return sum((block.size for block in blocks))
def nonMatchingSequenceCount(self, blocks):
if len(blocks) > 1 and self.lastBlockReachesEnd(blocks):
return len(blocks) - 2
else:
return len(blocks) - 1
def lastBlockReachesEnd(self, blocks):
return blocks[-2].a + blocks[-2].size == blocks[-1].a and \
blocks[-2].b + blocks[-2].size == blocks[-1].b
def isBetterMatch(self, info1, info2, targetWords):
words1, unmatchedCount1 = info1
words2, unmatchedCount2 = info2
blocks1 = self.getMatchingBlocks(words1, targetWords)
blocks2 = self.getMatchingBlocks(words2, targetWords)
common1 = self.commonElementCount(blocks1)
common2 = self.commonElementCount(blocks2)
self.diag.debug("Words in common " + repr(common1) + " vs " + repr(common2))
if common1 > common2:
return True
elif common1 < common2:
return False
nonMatchCount1 = self.nonMatchingSequenceCount(blocks1)
nonMatchCount2 = self.nonMatchingSequenceCount(blocks2)
self.diag.debug("Non matching sequences " + repr(nonMatchCount1) + " vs " + repr(nonMatchCount2))
if nonMatchCount1 < nonMatchCount2:
return True
elif nonMatchCount1 > nonMatchCount2:
return False
self.diag.debug("Unmatched count difference " + repr(unmatchedCount1) + " vs " + repr(unmatchedCount2))
return unmatchedCount1 > unmatchedCount2
# Need to handle multiple replies to the same question
class ReplayedResponseHandler:
def __init__(self):
self.timesChosen = 0
self.responses = [[]]
self.intermediateHandlers = []
def __repr__(self):
return repr(self.responses)
def addIntermediate(self, handlers):
self.intermediateHandlers.append(handlers)
def newResponse(self):
self.responses.append([])
def addResponse(self, trafficStr):
self.responses[-1].append(trafficStr)
def allIntermediatesCalled(self):
return all((handler.timesChosen for handler in self.intermediateHandlers[self.timesChosen - 1 ]))
def getCurrentStrings(self):
if self.intermediateHandlers:
if self.timesChosen == 0:
return self.responses[0], 1
elif self.allIntermediatesCalled():
moreHandlers = self.timesChosen < len(self.intermediateHandlers)
return self.responses[self.timesChosen], int(moreHandlers)
else:
return self.responses[self.timesChosen - 1], 0
elif self.timesChosen < len(self.responses):
currStrings = self.responses[self.timesChosen]
else:
currStrings = self.responses[0]
return currStrings, 1
def getUnmatchedResponseCount(self):
return len(self.responses) - self.timesChosen
def makeResponses(self, allClasses):
trafficStrings, increment = self.getCurrentStrings()
responses = []
for trafficStr in trafficStrings:
prefix, text = trafficStr.split(":", 1)
trafficType = prefix[-3:]
for trafficClass in allClasses:
if trafficClass.typeId == trafficType:
responses.append((trafficClass, text))
self.timesChosen += increment
return responses
def filterFileForReplay(itemInfo, replayFile):
with open(replayFile, "rU") as f:
return ReplayInfo.filterForReplay(itemInfo, f)
def filterCommands(commands, replayFile):
return filterFileForReplay(ReplayInfo.makeCommandItems(commands), replayFile)
def filterPython(pythonAttrs, replayFile):
return filterFileForReplay(ReplayInfo.makePythonItems(pythonAttrs), replayFile)
| mit |
manipopopo/tensorflow | tensorflow/python/data/util/sparse_test.py | 32 | 12006 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class SparseTest(test.TestCase):
def testAnySparse(self):
test_cases = (
{
"classes": (),
"expected": False
},
{
"classes": (ops.Tensor),
"expected": False
},
{
"classes": (((ops.Tensor))),
"expected": False
},
{
"classes": (ops.Tensor, ops.Tensor),
"expected": False
},
{
"classes": (ops.Tensor, sparse_tensor.SparseTensor),
"expected": True
},
{
"classes": (sparse_tensor.SparseTensor, sparse_tensor.SparseTensor),
"expected":
True
},
{
"classes": (sparse_tensor.SparseTensor, ops.Tensor),
"expected": True
},
{
"classes": (((sparse_tensor.SparseTensor))),
"expected": True
},
)
for test_case in test_cases:
self.assertEqual(
sparse.any_sparse(test_case["classes"]), test_case["expected"])
def assertShapesEqual(self, a, b):
for a, b in zip(nest.flatten(a), nest.flatten(b)):
self.assertEqual(a.ndims, b.ndims)
if a.ndims is None:
continue
for c, d in zip(a.as_list(), b.as_list()):
self.assertEqual(c, d)
def testAsDenseShapes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": tensor_shape.scalar(),
"classes": ops.Tensor,
"expected": tensor_shape.scalar()
},
{
"types": tensor_shape.scalar(),
"classes": sparse_tensor.SparseTensor,
"expected": tensor_shape.unknown_shape()
},
{
"types": (tensor_shape.scalar()),
"classes": (ops.Tensor),
"expected": (tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar()),
"classes": (sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.scalar(), ()),
"classes": (ops.Tensor, ()),
"expected": (tensor_shape.scalar(), ())
},
{
"types": ((), tensor_shape.scalar()),
"classes": ((), ops.Tensor),
"expected": ((), tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar(), ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (tensor_shape.unknown_shape(), ())
},
{
"types": ((), tensor_shape.scalar()),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (tensor_shape.scalar(), (), tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape(), (),
tensor_shape.unknown_shape())
},
{
"types": ((), tensor_shape.scalar(), ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), tensor_shape.scalar(), ())
},
{
"types": ((), tensor_shape.scalar(), ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), tensor_shape.unknown_shape(), ())
},
)
for test_case in test_cases:
self.assertShapesEqual(
sparse.as_dense_shapes(test_case["types"], test_case["classes"]),
test_case["expected"])
def testAsDenseTypes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": dtypes.int32,
"classes": ops.Tensor,
"expected": dtypes.int32
},
{
"types": dtypes.int32,
"classes": sparse_tensor.SparseTensor,
"expected": dtypes.variant
},
{
"types": (dtypes.int32),
"classes": (ops.Tensor),
"expected": (dtypes.int32)
},
{
"types": (dtypes.int32),
"classes": (sparse_tensor.SparseTensor),
"expected": (dtypes.variant)
},
{
"types": (dtypes.int32, ()),
"classes": (ops.Tensor, ()),
"expected": (dtypes.int32, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), ops.Tensor),
"expected": ((), dtypes.int32)
},
{
"types": (dtypes.int32, ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (dtypes.variant, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), dtypes.variant)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (dtypes.int32, (), dtypes.int32)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (dtypes.variant, (), dtypes.variant)
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), dtypes.int32, ())
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), dtypes.variant, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.as_dense_types(test_case["types"], test_case["classes"]),
test_case["expected"])
def testGetClasses(self):
s = sparse_tensor.SparseTensor(indices=[[0]], values=[1], dense_shape=[1])
d = ops.Tensor
t = sparse_tensor.SparseTensor
test_cases = (
{
"classes": (),
"expected": ()
},
{
"classes": s,
"expected": t
},
{
"classes": constant_op.constant([1]),
"expected": d
},
{
"classes": (s),
"expected": (t)
},
{
"classes": (constant_op.constant([1])),
"expected": (d)
},
{
"classes": (s, ()),
"expected": (t, ())
},
{
"classes": ((), s),
"expected": ((), t)
},
{
"classes": (constant_op.constant([1]), ()),
"expected": (d, ())
},
{
"classes": ((), constant_op.constant([1])),
"expected": ((), d)
},
{
"classes": (s, (), constant_op.constant([1])),
"expected": (t, (), d)
},
{
"classes": ((), s, ()),
"expected": ((), t, ())
},
{
"classes": ((), constant_op.constant([1]), ()),
"expected": ((), d, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.get_classes(test_case["classes"]), test_case["expected"])
def assertSparseValuesEqual(self, a, b):
if not isinstance(a, sparse_tensor.SparseTensor):
self.assertFalse(isinstance(b, sparse_tensor.SparseTensor))
self.assertEqual(a, b)
return
self.assertTrue(isinstance(b, sparse_tensor.SparseTensor))
with self.test_session():
self.assertAllEqual(a.eval().indices, b.eval().indices)
self.assertAllEqual(a.eval().values, b.eval().values)
self.assertAllEqual(a.eval().dense_shape, b.eval().dense_shape)
def testSerializeDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
def testSerializeManyDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_many_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
if __name__ == "__main__":
test.main()
| apache-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.3/django/contrib/gis/geos/tests/test_mutable_list.py | 244 | 14587 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
from django.contrib.gis.geos.mutable_list import ListMixin
from django.utils import unittest
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super(UserListA, self).__init__(*args, **kwargs)
def __len__(self): return len(self._list)
def __str__(self): return str(self._list)
def __repr__(self): return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None: length = self.limit
pl = range(length)
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return range(-1 - self.limit, 0) + range(1, 1 + self.limit)
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i,j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i,j,k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i,k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i,k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x,i,j,k,L): x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange( len(ul[i:j:k]) )
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange( len(ul[i::k]) )
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange( len(ul[:i:k]) )
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i,j))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i,j,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i,k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i): x[i] = 20
def getfcn(x, i): return x[i]
def delfcn(x, i): del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i)
self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i)
self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50,55))
ul.extend(range(50,55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i,50)
ul.insert(i,50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i): x.pop(i)
self.assertRaises(IndexError, popfcn, ul, self.limit)
self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v): return x.index(v)
def removefcn(x, v): return x.remove(v)
self.assertRaises(ValueError, indexfcn, ul, 40)
self.assertRaises(ValueError, removefcn, ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = (int, long)
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), ('hello','goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len()
ul._minlength = 1
def delfcn(x,i): del x[:i]
def setfcn(x,i): x[:i] = []
for i in range(self.limit - ul._minlength + 1, self.limit + 1):
self.assertRaises(ValueError, delfcn, ul, i)
self.assertRaises(ValueError, setfcn, ul, i)
del ul[:ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
self.assertRaises(ValueError, ul.append, 10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
self.assertRaises(IndexError, ul._checkindex, i)
ul._IndexError = TypeError
self.assertRaises(TypeError, ul._checkindex, -self.limit - 1)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = range(10,14)
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1,0,1,3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assertTrue(pl >= ul, 'cmp for gte self')
self.assertTrue(pl <= ul, 'cmp for lte self')
self.assertTrue(ul >= pl, 'cmp for self gte')
self.assertTrue(ul <= pl, 'cmp for self lte')
self.assertTrue(pl + [5] > ul, 'cmp')
self.assertTrue(pl + [5] >= ul, 'cmp')
self.assertTrue(pl < ul + [2], 'cmp')
self.assertTrue(pl <= ul + [2], 'cmp')
self.assertTrue(ul + [5] > pl, 'cmp')
self.assertTrue(ul + [5] >= pl, 'cmp')
self.assertTrue(ul < pl + [2], 'cmp')
self.assertTrue(ul <= pl + [2], 'cmp')
pl[1] = 20
self.assertTrue(pl > ul, 'cmp for gt self')
self.assertTrue(ul < pl, 'cmp for self lt')
pl[1] = -20
self.assertTrue(pl < ul, 'cmp for lt self')
self.assertTrue(pl < ul, 'cmp for lt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(ListMixinTest))
s.addTest(unittest.makeSuite(ListMixinTestSingle))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| apache-2.0 |
bbfamily/abu | abupy/UmpBu/ABuUmpEdgeBase.py | 1 | 25925 | # -*- encoding:utf-8 -*-
"""
边裁基础实现模块
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import logging
import os
from abc import abstractmethod
import numpy as np
import sklearn.preprocessing as preprocessing
from enum import Enum
from sklearn.metrics.pairwise import pairwise_distances
from ..CoreBu import ABuEnv
from ..UtilBu import ABuFileUtil
from ..SimilarBu.ABuCorrcoef import ECoreCorrType, corr_xy
from .ABuUmpBase import AbuUmpBase
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import filter
__author__ = '阿布'
__weixin__ = 'abu_quant'
"""在predict中度量输入的x和矩阵中其它矢量的pairwise_distances后,通过if distances_cx.min() > K_DISTANCE_THRESHOLD过滤"""
K_DISTANCE_THRESHOLD = 0.668
"""从第一轮pairwise_distances的结果使用argsort后取K_N_TOP_SEED个做为第二轮相似匹配的种子"""
K_N_TOP_SEED = 100
"""完成第二轮相似度匹配后使用K_SIMILAR_THRESHOLD做为阀值过滤后得到有投票权的向量"""
K_SIMILAR_THRESHOLD = 0.91
"""
K_CG_TOP_RATE做为计算win_top和loss_top
win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
eg:
len(self.fiter.df['profit_cg']) == 100
-> win_top = 100 - 100 * 0.236
-> win_top = 100 - 23.6
-> win_top = 76.4
loss_top = len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
eg:
len(self.fiter.df['profit_cg']) == 100
-> loss_top = 100 * 0.236
-> loss_top = 23.6
"""
K_CG_TOP_RATE = 0.236
"""在predict中最后的投票结果需要大于一定比例才被认可, 即对有争议的投票需要一方拥有相对优势才认可"""
K_EDGE_JUDGE_RATE = 0.618
class EEdgeType(Enum):
"""对交易的利润亏损进行rank后的分类结果"""
"""损失最多的一类交易,可理解为最底端"""
E_EEdge_TOP_LOSS = -1
"""其它的普通收益亏损的交易,在整个训练集交易中占最多数"""
E_EEdge_NORMAL = 0
"""盈利最多的一类交易,可理解为最顶端"""
E_STORE_TOP_WIN = 1
"""在第二轮的相似度匹配中使用的方法,传递给ABuCorrcoef.corr_xy函数"""
g_similar_type = ECoreCorrType.E_CORE_TYPE_PEARS
class AbuUmpEdgeBase(AbuUmpBase):
"""边裁基类"""
@classmethod
def ump_edge_clf_dump(cls, orders_pd_train, show_info=False, market_name=None):
"""
类方法,通过交易训练集orders_pd_train构造AbuUmpEdgeBase子类对象, 使用fit方法对训练集进行特征采集,后进行dump_clf即
本地序列化存贮等工作
:param orders_pd_train: 交易训练集,pd.DataFrame对象
:param show_info: 是否显示edge.fiter.df.head(),默认False
:param market_name: 主裁训练或者获取裁判对应的存贮唯一名称,默认None, 根据env中的当前市场设置存储名称
:return: AbuUmpEdgeBase子类对象实例
"""
edge = cls(orders_pd_train, market_name=market_name)
edge.fit()
edge.dump_clf()
if show_info:
print('edge.fiter.df.head():\n', edge.fiter.df.head())
return edge
@abstractmethod
def get_fiter_class(self):
"""abstractmethod子类必须实现,声明具体子类裁判使用的筛选特征形成特征的类"""
pass
@abstractmethod
def get_predict_col(self):
"""abstractmethod子类必须实现,获取具体子类裁判需要的特征keys"""
pass
@classmethod
@abstractmethod
def class_unique_id(cls):
"""
具体ump类关键字唯一名称,类方法,abstractmethod子类必须实现
主要针对外部user设置自定义ump使用, 需要user自己保证class_unique_id的唯一性,内部不做检测
具体使用见ABuUmpManager中extend_ump_block方法
"""
pass
def __init__(self, orders_pd=None, predict=False, market_name=None, **kwarg):
"""
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象, 最好是经过度量类
AbuMetricsBase对象进行度量fit_metrics之后的orders_pd
:param predict: 是否构造的裁判类型为预测,非训练裁判
:param market_name: 主裁训练或者获取裁判对应的存贮唯一名称,默认None, 根据env中的当前市场设置存储名称
:param kwarg: 将kwarg参数透传给fiter_cls的构造:
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
# 特征筛选类fiter_cls
self.fiter_cls = self.get_fiter_class()
# 对交易特征进行统一标准化的scaler对象
self.scaler = preprocessing.StandardScaler()
if isinstance(market_name, ABuEnv.EMarketTargetType):
market_name = market_name.value
# predict或者训练的情况都需要对应裁判的唯一名称, 默认使用对应市场的字符串名字 eg,'us', 'cn'
self.market_name = ABuEnv.g_market_target.value if market_name is None else market_name
if not predict:
# TODO 拆开predict和训练数据逻辑,不要纠缠在一起
if orders_pd is not None and 'profit_cg' not in orders_pd.columns:
# profit_cg等度量参数是要在AbuMetricsBase结束后才会有
logging.info('you do better AbuMetricsBase.fit_metrics in orders_pd!!!!')
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
# 这里只做fit_metrics_order,没做fit_metrics因为比如期货,比特币会有自己的度量类,使用通用的fit_metrics_order
AbuMetricsBase(orders_pd, None, None, None).fit_metrics_order()
# 实例化特征构造对象self.fiter
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
通过self.fiter_cls构造形成self.fiter后self.fiter.df中以存在特征
eg:self.fiter.df
profit profit_cg buy_deg_ang42 buy_deg_ang252 buy_deg_ang60 buy_deg_ang21
2014-09-24 -22618.04 -0.0566 3.378 3.458 3.458 1.818
2014-10-24 -29690.28 -0.0742 0.191 2.889 2.809 -1.089
2014-10-29 18959.19 0.0542 -2.026 16.689 -0.761 1.980
2014-10-29 148209.36 0.5022 -3.427 -11.956 -8.296 6.507
2014-10-29 24867.60 0.0952 -2.915 39.469 -6.043 7.046
"""
# 默认使用svm,这里需要参数可设置
self.fiter().estimator.svc()
def fit(self):
"""
边裁训练集拟合存储函数,相对主裁的训练fit函数,边裁的fit很简单
self.fiter.df经过fit后添加了新列p_rk_cg和rk形式如下所示
eg:self.fiter.df
profit profit_cg buy_deg_ang42 buy_deg_ang252 \
2014-09-24 -22618.04 -0.0566 3.378 3.458
2014-10-24 -29690.28 -0.0742 0.191 2.889
2014-10-29 18959.19 0.0542 -2.026 16.689
2014-10-29 148209.36 0.5022 -3.427 -11.956
2014-10-29 24867.60 0.0952 -2.915 39.469
2014-10-29 18959.19 0.0542 -2.026 16.689
2014-11-03 1250.80 0.0045 0.103 39.202
2014-11-11 59888.21 0.1857 8.341 -9.450
2014-11-12 -3578.78 -0.0140 3.963 6.595
2014-11-26 -29085.19 -0.0946 14.052 6.061
... ... ... ... ...
2016-03-14 16220.57 0.0559 4.002 -10.559
2016-03-14 -25328.12 -0.1218 0.129 -6.649
2016-03-30 -29858.44 -0.0863 13.121 -8.461
2016-04-04 5373.76 0.0244 4.409 -33.097
2016-04-13 -28044.40 -0.1159 6.603 -31.459
2016-04-14 -18645.93 -0.0467 4.611 18.428
2016-04-15 -32484.79 -0.1149 4.238 -13.247
2016-04-15 -32484.79 -0.1149 4.238 -13.247
2016-04-29 290.96 0.0007 1.445 16.266
2016-04-29 290.96 0.0007 1.445 16.266
buy_deg_ang60 buy_deg_ang21 p_rk_cg rk
2014-09-24 3.458 1.818 19.0 0
2014-10-24 2.809 -1.089 13.0 -1
2014-10-29 -0.761 1.980 35.5 0
2014-10-29 -8.296 6.507 56.0 1
2014-10-29 -6.043 7.046 43.0 1
2014-10-29 -0.761 1.980 35.5 0
2014-11-03 -4.614 10.125 28.0 0
2014-11-11 0.730 12.397 48.0 1
2014-11-12 -7.524 6.671 23.0 0
2014-11-26 7.566 12.494 9.0 -1
... ... ... ... ..
2016-03-14 -7.992 9.324 37.0 0
2016-03-14 -10.880 5.201 2.0 -1
2016-03-30 4.498 4.070 12.0 -1
2016-04-04 -6.281 5.618 33.0 0
2016-04-13 0.191 4.457 4.0 -1
2016-04-14 3.134 0.733 20.0 0
2016-04-15 4.693 1.162 5.5 -1
2016-04-15 4.693 1.162 5.5 -1
2016-04-29 4.615 -1.115 24.5 0
2016-04-29 4.615 -1.115 24.5 0
边裁裁决方式多次使用非均衡技术对最后的结果概率进行干预,目的是使最终的裁决正确率达成非均衡的目标,
非均衡技术思想是量化中很很重要的一种设计思路,因为我们量化的目标结果就是非均衡(我们想要赢的钱比输的多)
"""
# 对训练特征fiter.df中的profit_cg进行rank,即针对训练集中的交易盈利亏损值进行rank排序, rank结果添加到self.fiter.df新列
# TODO 暂时只使用profit_cg不使用profit做为训练参数,需要整合profit为训练的rank等综合权重处理
self.fiter.df['p_rk_cg'] = self.fiter.df['profit_cg'].rank()
"""
eg: self.fiter.df['p_rk_cg']
2014-09-24 19.0
2014-10-24 13.0
2014-10-29 35.5
2014-10-29 56.0
2014-10-29 43.0
2014-10-29 35.5
2014-11-03 28.0
2014-11-11 48.0
2014-11-12 23.0
2014-11-26 9.0
...
2016-03-14 37.0
2016-03-14 2.0
2016-03-30 12.0
2016-04-04 33.0
2016-04-13 4.0
2016-04-14 20.0
2016-04-15 5.5
2016-04-15 5.5
2016-04-29 24.5
2016-04-29 24.5
"""
# K_CG_TOP_RATE=0.236, 由于策略的胜负的非均衡,win_top的位置实际比较loss_top为非均衡,为后续制造概率优势
win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
"""
eg:
len(self.fiter.df['profit_cg']) == 100
-> win_top = 100 - 100 * 0.236
-> win_top = 100 - 23.6
-> win_top = 76.4
"""
loss_top = len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
"""
eg:
len(self.fiter.df['profit_cg']) == 100
-> loss_top = 100 * 0.236
-> loss_top = 23.6
"""
# self.fiter.df添加新列'rk',初始值都为EEdgeType.E_EEdge_NORMAL.value,即0
self.fiter.df['rk'] = EEdgeType.E_EEdge_NORMAL.value
"""
根据win_top, loss_top将整体切分为三段,rk:-1, 0, 1
rk profit_cg p_rk_cg
2011-09-21 0 0.036216 58816.0
2011-09-21 1 0.046784 61581.0
2011-09-21 -1 -0.191184 1276.0
2011-09-21 0 -0.000428 43850.0
2011-09-21 0 0.001724 44956.0
"""
# noinspection PyTypeChecker
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] > win_top, EEdgeType.E_STORE_TOP_WIN.value,
self.fiter.df['rk'])
# noinspection PyTypeChecker
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] < loss_top, EEdgeType.E_EEdge_TOP_LOSS.value,
self.fiter.df['rk'])
def dump_file_fn(self):
"""
边裁本地缓存的存储路径规则:
ABuEnv.g_project_data_dir + 'ump/ump_edge_' + market_name + self.class_unique_id()
"""
# TODO 如果有裁判覆盖,保留备份,显示通知
unique_ump_name = 'ump/ump_edge_{}_{}'.format(self.market_name, self.class_unique_id())
return os.path.join(ABuEnv.g_project_data_dir, unique_ump_name)
def dump_clf(self):
"""
边裁的本地序列化相对主裁的dump_clf也简单很多,
将self.fiter.df和self.fiter.x打包成一个字典对象df_x_dict
通过ABuFileUtil.dump_pickle进行保存
"""
df_x_dict = {'fiter_df': self.fiter.df, 'fiter_x': self.fiter.x}
"""
eg:df_x_dict
array([[ 3.378, 3.458, 3.458, 1.818],
[ 0.191, 2.889, 2.809, -1.089],
[ -2.026, 16.689, -0.761, 1.98 ],
[ -3.427, -11.956, -8.296, 6.507],
[ -2.915, 39.469, -6.043, 7.046],
[ -2.026, 16.689, -0.761, 1.98 ],
[ 0.103, 39.202, -4.614, 10.125],
[ 8.341, -9.45 , 0.73 , 12.397],
[ 3.963, 6.595, -7.524, 6.671],
....................................
[ 4.002, -10.559, -7.992, 9.324],
[ 0.129, -6.649, -10.88 , 5.201],
[ 13.121, -8.461, 4.498, 4.07 ],
[ 4.409, -33.097, -6.281, 5.618],
[ 6.603, -31.459, 0.191, 4.457],
[ 4.611, 18.428, 3.134, 0.733],
[ 4.238, -13.247, 4.693, 1.162],
[ 4.238, -13.247, 4.693, 1.162],
[ 1.445, 16.266, 4.615, -1.115],
[ 1.445, 16.266, 4.615, -1.115]])
"""
ABuFileUtil.dump_pickle(df_x_dict, self.dump_file_fn(), how='zero')
def predict(self, **kwargs):
"""
边裁交易决策函数,从CachedUmpManager中获取缓存df_x_dict,对kwargs关键字参数所描述的交易特征进行拦截决策
边裁的predict()实现相对主裁来说比较复杂,大致思路如下:
1. 从输入的新交易中挑选需要的特征组成x
2. 将x和之前保存的训练集数据组合concatenate(),一起做数据标准化scaler
3. 使用sklearn.metrics.pairwise.pairwise_distances()度量输入特征和训练集矩阵中的距离序列
4. 取pairwise_distances() TOP个作为种子,继续匹配相似度
5. 相似度由大到小排序,保留大于保留阀值的相似度交易数据做为最终有投票权利的
6. 保留的交易认为是与新交易最相似的交易,保留的交易使用之前非均衡的rk对新交易进行投票
7. 最后的判断需要大于一定比例才被结果认可,即再次启动非均衡
:param kwargs: 需要和子类对象实现的虚方法get_predict_col中获取特征列对应的
关键字参数,eg: buy_deg_ang42=3.378, buy_deg_ang60=3.458
buy_deg_ang21=3.191, buy_deg_ang252=1.818
:return: 是否对kwargs关键字参数所描述的交易特征进行拦截,
EEdgeType: 不拦截: EEdgeType.E_EEdge_NORMAL or EEdgeType.E_STORE_TOP_WIN
拦截: EEdgeType.E_EEdge_TOP_LOSS
"""
# 统一从CachedUmpManager中获取缓存ump,没有缓存的情况下load_pickle
df_x_dict = AbuUmpBase.dump_clf_manager.get_ump(self)
# 从df_x_dict['fiter_df'].columns中筛选特征列
feature_columns = df_x_dict['fiter_df'].columns.drop(['profit', 'profit_cg', 'p_rk_cg', 'rk'])
"""
eg: df_x_dict['fiter_df'].columns
Index(['profit', 'profit_cg', 'buy_deg_ang42', 'buy_deg_ang252',
'buy_deg_ang60', 'buy_deg_ang21', 'p_rk_cg', 'rk'], dtype='object')
drop(['profit', 'profit_cg', 'p_rk_cg', 'rk']
-> ['buy_deg_ang42', 'buy_deg_ang252', 'buy_deg_ang60', 'buy_deg_ang21']
"""
# eg, x: array([ 3.378, 3.458, 3.458, 1.818])
x = np.array([kwargs[col] for col in feature_columns])
x = x.reshape(1, -1)
# 把新的x concatenate到之前保存的矩阵中
con_x = np.concatenate((x, df_x_dict['fiter_x']), axis=0)
# 将输入的x和原始矩阵组装好的新矩阵con_x一起标准化
con_x = self.scaler.fit_transform(con_x)
# 使用输入的x即con_x[0]和矩阵中其它的进行pairwise_distances比较
distances_cx = pairwise_distances(con_x[0].reshape(1, -1), con_x[1:],
metric='euclidean')
distances_cx = distances_cx[0]
"""
eg: distances_cx
array([[ 0. , 0.8432, 1.4371, 2.4178, 3.1302, 1.4371, 3.1774,
2.5422, 1.7465, 3.0011, 0.7233, 2.264 , 0.8279, 0.8279,
2.309 , 1.4878, 1.9396, 0.7438, 0.9731, 0.4494, 2.0755,
2.9762, 4.5869, 5.2029, 0.7362, 0.7362, 3.623 , 0.6105,
0.6105, 1.2288, 2.0991, 2.0991, 3.2272, 0.8599, 0.7419,
0.7419, 0.7804, 2.5241, 1.8116, 2.5373, 2.2742, 2.1726,
3.2738, 1.293 , 2.4555, 2.4555, 2.3358, 2.1673, 2.0187,
2.8637, 2.5066, 1.052 , 1.1481, 1.1481, 1.1175, 1.1175]])
"""
# 如果最小距离大于阀值,认为无效,K_DISTANCE_THRESHOLD = 0.668
if distances_cx.min() > K_DISTANCE_THRESHOLD:
return EEdgeType.E_EEdge_NORMAL
distances_sort = distances_cx.argsort()
"""
eg: distances_sort
array([ 0, 19, 28, 27, 10, 24, 25, 35, 34, 17, 36, 13, 12, 1, 33, 18, 51,
54, 55, 52, 53, 29, 43, 5, 2, 15, 8, 38, 16, 48, 20, 30, 31, 47,
41, 11, 40, 14, 46, 3, 45, 44, 50, 37, 39, 7, 49, 21, 9, 4, 6,
32, 42, 26, 22, 23])
"""
n_top = K_N_TOP_SEED if len(distances_cx) > K_N_TOP_SEED else len(distances_cx)
# 取前100个作为种子继续匹配相似度做数据准备
distances_sort = distances_sort[:n_top]
# 进行第二轮的相似度匹配,使用输入的x即con_x[0]和distances_sort中记录的其它矩阵矢量进行corr_xy
similar_cx = {arg: corr_xy(con_x[0], con_x[arg + 1], g_similar_type) for arg in distances_sort}
"""
eg: similar_cx
{0: 1.0, 19: 0.9197507467964976, 28: 0.57289288329659238, 27: 0.57289288329659238,
10: 0.44603792013583493, 24: 0.4103293780402798, 25: 0.4103293780402798,
35: 0.22026514236282496, 34: 0.22026514236282496, 17: -0.24170074544552811,
36: 0.43863838382081699, 13: 0.16234971594751921, 12: 0.16234971594751921, 1: 0.92424298737490296,
33: 0.47818723914034433, 18: -0.17734957863273493, 51: 0.63704694680797502, 54: 0.75395818997353681,
55: 0.75395818997353681, 52: 0.6485413094804453, 53: 0.6485413094804453,
29: 0.89796883127042837, 43: 0.86342390437553329, 5: 0.12738173851484677,
2: 0.12738173851484677, 15: 0.53496775815355813, 8: -0.92624283913287053,
38: -0.52046967255944876, 16: -0.65837858483393186, 48: 0.26241267262766549,
20: 0.45007515315947716, 30: -0.78037071039800843, 31: -0.78037071039800843,
47: -0.99196576241088685, 41: 0.71286817166895511, 11: -0.57565781272205685,
40: -0.089683927257343574, 14: -0.49743962329463148, 46: -0.84622925585859421, 3: -0.82066914234853283,
45: 0.30735926720691314, 44: 0.30735926720691314, 50: 0.010871213734502339, 37: -0.65150765047066517,
39: -0.38809703338219459, 7: -0.57947244493007666, 49: -0.33103296960584466, 21: 0.69444344588208717,
9: -0.3435188573004419, 4: -0.39204446380766983, 6: -0.54996919528831723, 32: -0.9481034251744791,
42: 0.20829094732022327, 26: 0.9936229414412624, 22: -0.35972456962349542, 23: -0.085747705364200594}
"""
# 相似度大到小排序
similar_sorted = sorted(zip(similar_cx.values(), similar_cx.keys()))[::-1]
"""
eg: similar_sorted
[(1.0, 0), (0.9936229414412624, 26), (0.92424298737490296, 1), (0.9197507467964976, 19), (
0.89796883127042837, 29), (0.86342390437553329, 43), (0.75395818997353681, 55), (0.75395818997353681, 54),
(0.71286817166895511, 41), (0.69444344588208717, 21), (0.6485413094804453, 53), (0.6485413094804453, 52),
(0.63704694680797502, 51), (0.57289288329659238, 28), (0.57289288329659238, 27), (0.53496775815355813, 15),
(0.47818723914034433, 33), (0.45007515315947716, 20), (0.44603792013583493, 10), (0.43863838382081699, 36),
(0.4103293780402798, 25), (0.4103293780402798, 24), (0.30735926720691314, 45), (0.30735926720691314, 44),
(0.26241267262766549, 48), (0.22026514236282496, 35), (0.22026514236282496, 34), (0.20829094732022327, 42),
(0.16234971594751921, 13), (0.16234971594751921, 12), (0.12738173851484677, 5), (0.12738173851484677, 2),
(0.010871213734502339, 50), (-0.085747705364200594, 23), (-0.089683927257343574, 40),
(-0.17734957863273493, 18), (-0.24170074544552811, 17), (-0.33103296960584466, 49),
(-0.3435188573004419, 9), (-0.35972456962349542, 22), (-0.38809703338219459, 39),
(-0.39204446380766983, 4), (-0.49743962329463148, 14), (-0.52046967255944876, 38),
(-0.54996919528831723, 6), (-0.57565781272205685, 11), (-0.57947244493007666, 7),
(-0.65150765047066517, 37), (-0.65837858483393186, 16), (-0.78037071039800843, 31),
(-0.78037071039800843, 30), (-0.82066914234853283, 3), (-0.84622925585859421, 46),
(-0.92624283913287053, 8), (-0.9481034251744791, 32), (-0.99196576241088685, 47)]
"""
# 只取大于阀值相似度K_SIMILAR_THRESHOLD的做为最终有投票权利的
similar_filters = list(filter(lambda sm: sm[0] > K_SIMILAR_THRESHOLD, similar_sorted))
"""
eg: similar_filters
[(1.0, 0), (0.9936229414412624, 26), (0.92424298737490296, 1), (0.9197507467964976, 19)]
"""
if len(similar_filters) < int(n_top * 0.1):
# 投票的太少,初始相似种子n_top的0.1为阀值,认为无效,eg:int(100 * 0.1) == 10
return EEdgeType.E_EEdge_NORMAL
top_loss_cluster_cnt = 0
top_win_cluster_cnt = 0
# 由于gmm_component_filter中win_top的非均衡,导致top_win_cluster_cnt > top_loss_cluster_cnt概率大
for similar in similar_filters:
"""
eg:
similar: (0.9936229414412624, 26)
order_ind = similar[1] = 26
similar_val = similar[0] = 0.9936229414412624
"""
order_ind = similar[1]
similar_val = similar[0]
# 通过order_ind获取有投票权利的交易的rk值
rk = df_x_dict['fiter_df'].iloc[order_ind]['rk']
# 对应这个最相似的在哪一个分类中,判断edge
if rk == -1:
# 需要 * similar_val eg: top_loss_cluster_cnt += 1 * 0.9936229414412624
top_loss_cluster_cnt += 1 * similar_val
elif rk == 1:
top_win_cluster_cnt += 1 * similar_val
# 最后的投票结果需要大于一定比例才被认可, 即对有争议的投票需要一方拥有相对优势才认可
if int(top_win_cluster_cnt * K_EDGE_JUDGE_RATE) > top_loss_cluster_cnt:
"""
eg: top_win_cluster_cnt = 100
top_loss_cluster_cnt = 50
int(top_win_cluster_cnt * K_EDGE_JUDGE_RATE) == 62
62 > 50 -> EEdgeType.E_STORE_TOP_WIN
"""
return EEdgeType.E_STORE_TOP_WIN
elif int(top_loss_cluster_cnt * K_EDGE_JUDGE_RATE) > top_win_cluster_cnt:
"""
eg: top_loss_cluster_cnt = 100
top_win_cluster_cnt = 50
int(top_loss_cluster_cnt * K_EDGE_JUDGE_RATE) == 62
62 > 50 -> EEdgeType.E_EEdge_TOP_LOSS
"""
# 由于top_win_cluster_cnt > top_loss_cluster_cnt的非均衡本来就有概率优势,* K_EDGE_JUDGE_RATE进一步扩大概率优势
return EEdgeType.E_EEdge_TOP_LOSS
return EEdgeType.E_EEdge_NORMAL
| gpl-3.0 |
RAtechntukan/CouchPotatoServer | libs/html5lib/treebuilders/etree_lxml.py | 1724 | 14031 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
import warnings
import re
import sys
from . import _base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
finalText = None
infosetFilter = ihatexml.InfosetFilter()
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info.major == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\"" % (' ' * 2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\"" % (' ' * 2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = ihatexml.InfosetFilter()
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value={}):
self._element = element
dict.__init__(self, value)
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = builder.Comment
# self.fragmentClass = builder.DocumentFragment
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
_base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
"""Create the document root"""
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our iniial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
root.addprevious(etree.Comment(comment_token["data"]))
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| gpl-3.0 |
dahlia/pghstore | tests/loads.py | 1 | 7351 | # -*- coding: utf-8 -*-
import unittest
import pghstore
class LoadsTests(unittest.TestCase):
def test_empty(self):
self.assertEqual(pghstore.loads(''), {})
def test_simple(self):
self.assertEqual(pghstore.loads('"key" => "value"'), {"key": "value"})
self.assertEqual(
pghstore.loads('"key" => "value", "key2" => "value2"'),
{"key": "value", "key2": "value2"});
def test_escaped_double_quote(self):
self.assertEqual(
pghstore.loads('"k\\"ey" => "va\\"lue"'),
{"k\\\"ey": "va\\\"lue"});
def test_null(self):
self.assertEqual(pghstore.loads('"key" => null'), {"key": None})
self.assertEqual(pghstore.loads('"key" => NULL'), {"key": None})
self.assertEqual(pghstore.loads(
'"key" => NULL, "key2": "value2"'),
{"key": None,
"key2": "value2"});
self.assertEqual(pghstore.loads(
'"key0" => "value0", "key" => NULL, "key2": "value2"'),
{"key0": "value0",
"key": None,
"key2": "value2"});
def test_utf8(self):
self.maxDiff = None
#self.assertEqual(pghstore.loads('"åäö" => "åäö"'), {"åäö": "åäö"})
s = '"name"=>"Noorwe\xc3\xab", "name2"=>"öäå"'
self.assertEqual(pghstore.loads(s),
{"name": "Noorwe\xc3\xab",
"name2": "öäå"})
names = '"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab", "name:ar"=>"\xd8\xa7\xd9\x84\xd9\x86\xd8\xb1\xd9\x88\xd9\x8a\xd8\xac", "name:be"=>"\xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:br"=>"Norvegia", "name:ca"=>"Noruega", "name:cs"=>"Norsko", "name:cy"=>"Norwy", "name:da"=>"Norge", "name:de"=>"Norwegen", "name:el"=>"\xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1", "name:en"=>"Norway", "name:eo"=>"Norvegio", "name:es"=>"Noruega", "name:et"=>"Norra", "name:fa"=>"\xd9\x86\xd8\xb1\xd9\x88\xda\x98", "name:fi"=>"Norja", "name:fo"=>"Noregur", "name:fr"=>"Norv\xc3\xa8ge", "name:fy"=>"Noarwegen", "name:ga"=>"An Iorua", "name:gd"=>"Nirribhidh", "name:he"=>"\xd7\xa0\xd7\x95\xd7\xa8\xd7\x95\xd7\x95\xd7\x92\xd7\x99\xd7\x94", "name:hr"=>"Norve\xc5\xa1ka", "name:hu"=>"Norv\xc3\xa9gia", "name:hy"=>"\xd5\x86\xd5\xb8\xd6\x80\xd5\xbe\xd5\xa5\xd5\xa3\xd5\xab\xd5\xa1", "name:id"=>"Norwegia", "name:is"=>"Noregur", "name:it"=>"Norvegia", "name:ja"=>"\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc", "name:la"=>"Norvegia", "name:lb"=>"Norwegen", "name:li"=>"Noorwege", "name:lt"=>"Norvegija", "name:lv"=>"Norv\xc4\x93\xc4\xa3ija", "name:mn"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8", "name:nb"=>"Norge", "name:nl"=>"Noorwegen", "name:nn"=>"Noreg", "name:no"=>"Norge", "name:pl"=>"Norwegia", "name:ru"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8\xd1\x8f", "name:sk"=>"N\xc3\xb3rsko", "name:sl"=>"Norve\xc5\xa1ka", "name:sv"=>"Norge", "name:th"=>"\xe0\xb8\x9b\xe0\xb8\xa3\xe0\xb8\xb0\xe0\xb9\x80\xe0\xb8\x97\xe0\xb8\xa8\xe0\xb8\x99\xe0\xb8\xad\xe0\xb8\xa3\xe0\xb9\x8c\xe0\xb9\x80\xe0\xb8\xa7\xe0\xb8\xa2\xe0\xb9\x8c", "name:tr"=>"Norve\xc3\xa7", "name:uk"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:vi"=>"Na Uy", "name:zh"=>"\xe6\x8c\xaa\xe5\xa8\x81", "name:haw"=>"Nolewai", "name:zh_py"=>"Nuowei", "name:zh_pyt"=>"Nu\xc3\xb3w\xc4\x93i", "official_name"=>"Kongeriket Norge", "official_name:be"=>"\xd0\x9a\xd0\xb0\xd1\x80\xd0\xb0\xd0\xbb\xd0\xb5\xd1\x9e\xd1\x81\xd1\x82\xd0\xb2\xd0\xb0 \xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "official_name:el"=>"\xce\x92\xce\xb1\xcf\x83\xce\xaf\xce\xbb\xce\xb5\xce\xb9\xce\xbf \xcf\x84\xce\xb7\xcf\x82 \xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1\xcf\x82", "official_name:en"=>"Kingdom of Norway", "official_name:id"=>"Kerajaan Norwegia", "official_name:it"=>"Regno di Norvegia", "official_name:ja"=>"\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc\xe7\x8e\x8b\xe5\x9b\xbd", "official_name:lb"=>"Kinneksr\xc3\xa4ich Norwegen", "official_name:lt"=>"Norvegijos Karalyst\xc4\x97", "official_name:sk"=>"N\xc3\xb3rske kr\xc3\xa1\xc4\xbeovstvo", "official_name:sv"=>"Konungariket Norge", "official_name:vi"=>"V\xc6\xb0\xc6\xa1ng qu\xe1\xbb\x91c Na Uy"'
r = pghstore.loads(names)
self.assertEqual(
pghstore.loads(names),
{"name": "Norge/Noreg", "name:af": "Noorwe\xc3\xab", "name:ar": "\xd8\xa7\xd9\x84\xd9\x86\xd8\xb1\xd9\x88\xd9\x8a\xd8\xac", "name:be": "\xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:br": "Norvegia", "name:ca": "Noruega", "name:cs": "Norsko", "name:cy": "Norwy", "name:da": "Norge", "name:de": "Norwegen", "name:el": "\xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1", "name:en": "Norway", "name:eo": "Norvegio", "name:es": "Noruega", "name:et": "Norra", "name:fa": "\xd9\x86\xd8\xb1\xd9\x88\xda\x98", "name:fi": "Norja", "name:fo": "Noregur", "name:fr": "Norv\xc3\xa8ge", "name:fy": "Noarwegen", "name:ga": "An Iorua", "name:gd": "Nirribhidh", "name:he": "\xd7\xa0\xd7\x95\xd7\xa8\xd7\x95\xd7\x95\xd7\x92\xd7\x99\xd7\x94", "name:hr": "Norve\xc5\xa1ka", "name:hu": "Norv\xc3\xa9gia", "name:hy": "\xd5\x86\xd5\xb8\xd6\x80\xd5\xbe\xd5\xa5\xd5\xa3\xd5\xab\xd5\xa1", "name:id": "Norwegia", "name:is": "Noregur", "name:it": "Norvegia", "name:ja": "\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc", "name:la": "Norvegia", "name:lb": "Norwegen", "name:li": "Noorwege", "name:lt": "Norvegija", "name:lv": "Norv\xc4\x93\xc4\xa3ija", "name:mn": "\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8", "name:nb": "Norge", "name:nl": "Noorwegen", "name:nn": "Noreg", "name:no": "Norge", "name:pl": "Norwegia", "name:ru": "\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8\xd1\x8f", "name:sk": "N\xc3\xb3rsko", "name:sl": "Norve\xc5\xa1ka", "name:sv": "Norge", "name:th": "\xe0\xb8\x9b\xe0\xb8\xa3\xe0\xb8\xb0\xe0\xb9\x80\xe0\xb8\x97\xe0\xb8\xa8\xe0\xb8\x99\xe0\xb8\xad\xe0\xb8\xa3\xe0\xb9\x8c\xe0\xb9\x80\xe0\xb8\xa7\xe0\xb8\xa2\xe0\xb9\x8c", "name:tr": "Norve\xc3\xa7", "name:uk": "\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:vi": "Na Uy", "name:zh": "\xe6\x8c\xaa\xe5\xa8\x81", "name:haw": "Nolewai", "name:zh_py": "Nuowei", "name:zh_pyt": "Nu\xc3\xb3w\xc4\x93i", "official_name": "Kongeriket Norge", "official_name:be": "\xd0\x9a\xd0\xb0\xd1\x80\xd0\xb0\xd0\xbb\xd0\xb5\xd1\x9e\xd1\x81\xd1\x82\xd0\xb2\xd0\xb0 \xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "official_name:el": "\xce\x92\xce\xb1\xcf\x83\xce\xaf\xce\xbb\xce\xb5\xce\xb9\xce\xbf \xcf\x84\xce\xb7\xcf\x82 \xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1\xcf\x82", "official_name:en": "Kingdom of Norway", "official_name:id": "Kerajaan Norwegia", "official_name:it": "Regno di Norvegia", "official_name:ja": "\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc\xe7\x8e\x8b\xe5\x9b\xbd", "official_name:lb": "Kinneksr\xc3\xa4ich Norwegen", "official_name:lt": "Norvegijos Karalyst\xc4\x97", "official_name:sk": "N\xc3\xb3rske kr\xc3\xa1\xc4\xbeovstvo", "official_name:sv": "Konungariket Norge", "official_name:vi": "V\xc6\xb0\xc6\xa1ng qu\xe1\xbb\x91c Na Uy"})
| mit |
adkerr/tempest | tempest/openstack/common/local.py | 57 | 1722 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
| apache-2.0 |
lcamacho/airmozilla | airmozilla/manage/tests/views/test_channels.py | 1 | 4246 | from nose.tools import eq_, ok_
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Channel
from .base import ManageTestCase
class TestChannels(ManageTestCase):
def setUp(self):
super(TestChannels, self).setUp()
Channel.objects.create(
name='Testing',
slug='testing',
description='Anything'
)
def test_channels(self):
""" Channels listing responds OK. """
response = self.client.get(reverse('manage:channels'))
eq_(response.status_code, 200)
def test_channel_new(self):
""" Channel form adds new channels. """
# render the form
response = self.client.get(reverse('manage:channel_new'))
eq_(response.status_code, 200)
response_ok = self.client.post(
reverse('manage:channel_new'),
{
'name': ' Web Dev ',
'slug': 'web-dev',
'description': '<h1>Stuff</h1>',
'image_is_banner': True,
'feed_size': 10,
}
)
self.assertRedirects(response_ok, reverse('manage:channels'))
ok_(Channel.objects.get(name='Web Dev'))
ok_(Channel.objects.get(name='Web Dev').image_is_banner)
response_fail = self.client.post(reverse('manage:channel_new'))
eq_(response_fail.status_code, 200)
def test_channel_edit(self):
channel = Channel.objects.get(slug='testing')
response = self.client.get(
reverse('manage:channel_edit', args=(channel.pk,)),
)
eq_(response.status_code, 200)
ok_('value="testing"' in response.content)
response = self.client.post(
reverse('manage:channel_edit', args=(channel.pk,)),
{
'name': 'Different',
'slug': 'different',
'description': '<p>Other things</p>',
'feed_size': 10,
}
)
eq_(response.status_code, 302)
channel = Channel.objects.get(slug='different')
def test_channel_edit_visibility_clash(self):
channel = Channel.objects.get(slug='testing')
response = self.client.get(
reverse('manage:channel_edit', args=(channel.pk,)),
)
response = self.client.post(
reverse('manage:channel_edit', args=(channel.pk,)),
{
'name': 'Different',
'slug': 'different',
'description': '<p>Other things</p>',
'never_show': True,
'always_show': True,
'feed_size': 10,
}
)
eq_(response.status_code, 200)
def test_channel_edit_child(self):
channel = Channel.objects.get(slug='testing')
response = self.client.get(
reverse('manage:channel_edit', args=(channel.pk,)),
)
eq_(response.status_code, 200)
choices = (
response.content
.split('name="parent"')[1]
.split('</select>')[0]
)
ok_('Main' in choices)
# you should not be able to self-reference
ok_('Testing' not in choices)
main = Channel.objects.get(slug='main')
response = self.client.post(
reverse('manage:channel_edit', args=(channel.pk,)),
{
'name': 'Different',
'slug': 'different',
'description': '<p>Other things</p>',
'parent': main.pk,
'feed_size': 10,
}
)
eq_(response.status_code, 302)
channel = Channel.objects.get(slug='different')
eq_(channel.parent, main)
# now expect two links to "Main" on the channels page
response = self.client.get(reverse('manage:channels'))
eq_(response.status_code, 200)
view_url = reverse('main:home_channels', args=(main.slug,))
eq_(response.content.count(view_url), 2)
def test_channel_delete(self):
channel = Channel.objects.create(
name='How Tos',
slug='how-tos',
)
self._delete_test(channel, 'manage:channel_remove',
'manage:channels')
| bsd-3-clause |
googleads/google-ads-python | google/ads/googleads/v6/enums/types/device.py | 1 | 1201 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"DeviceEnum",},
)
class DeviceEnum(proto.Message):
r"""Container for enumeration of Google Ads devices available for
targeting.
"""
class Device(proto.Enum):
r"""Enumerates Google Ads devices available for targeting."""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
TABLET = 3
DESKTOP = 4
CONNECTED_TV = 6
OTHER = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
4rado/RepositoryForProject | Lib/site-packages/numpy/distutils/cpuinfo.py | 51 | 22808 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
__all__ = ['cpu']
import sys, re, types
import os
if sys.version_info[0] < 3:
from commands import getstatusoutput
else:
from subprocess import getstatusoutput
import warnings
import platform
from numpy.distutils.compat import get_exception
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, output
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
def command_info(successful_status=(0,), stacklevel=1, **kw):
info = {}
for key in kw:
ok, output = getoutput(kw[key], successful_status=successful_status,
stacklevel=stacklevel+1)
if ok:
info[key] = output.strip()
return info
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
ok, output = getoutput(cmd, successful_status=successful_status,
stacklevel=stacklevel+1)
if not ok:
return
for line in output.splitlines():
yield line.strip()
def key_value_from_command(cmd, sep, successful_status=(0,),
stacklevel=1):
d = {}
for line in command_by_line(cmd, successful_status=successful_status,
stacklevel=stacklevel+1):
l = [s.strip() for s in line.split(sep, 1)]
if len(l) == 2:
d[l[0]] = l[1]
return d
class CPUInfoBase(object):
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self,func):
try:
return func()
except:
pass
def __getattr__(self,name):
if not name.startswith('_'):
if hasattr(self,'_'+name):
attr = getattr(self,'_'+name)
if type(attr) is types.MethodType:
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError(name)
def _getNCPUs(self):
return 1
def __get_nbits(self):
abits = platform.architecture()[0]
nbits = re.compile('(\d+)bit').search(abits).group(1)
return nbits
def _is_32bit(self):
return self.__get_nbits() == '32'
def _is_64bit(self):
return self.__get_nbits() == '64'
class LinuxCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = [ {} ]
ok, output = getoutput('uname -m')
if ok:
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning)
else:
for line in fo:
name_value = [s.strip() for s in line.split(':', 1)]
if len(name_value) != 2:
continue
name, value = name_value
if not info or name in info[-1]: # next processor
info.append({})
info[-1][name] = value
fo.close()
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id']=='AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['family'] == '15'
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu']=='Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id']=='GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu']=='i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_PentiumM(self):
return re.match(r'.*?Pentium.*?M\b',
self.info[0]['model name']) is not None
def _is_Prescott(self):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
return self.is_Intel() \
and (self.info[0]['cpu family'] == '6' \
or self.info[0]['cpu family'] == '15' ) \
and (self.has_sse3() and not self.has_ssse3())\
and re.match(r'.*?\blm\b',self.info[0]['flags']) is not None
def _is_Core2(self):
return self.is_64bit() and self.is_Intel() and \
re.match(r'.*?Core\(TM\)2\b', \
self.info[0]['model name']) is not None
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['family']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name'],re.IGNORECASE) is not None
_is_Xeon = _is_XEON
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug']=='yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug']=='yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None
def _has_ssse3(self):
return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None
class IRIXCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = key_value_from_command('sysconf', sep=' ',
successful_status=(0,1))
self.__class__.info = info
def _not_impl(self): pass
def _is_singleCPU(self):
return self.info.get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info.get('NUM_PROCESSORS', 1))
def __cputype(self,n):
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self): return self.__cputype(2000)
def _is_r3000(self): return self.__cputype(3000)
def _is_r3900(self): return self.__cputype(3900)
def _is_r4000(self): return self.__cputype(4000)
def _is_r4100(self): return self.__cputype(4100)
def _is_r4300(self): return self.__cputype(4300)
def _is_r4400(self): return self.__cputype(4400)
def _is_r4600(self): return self.__cputype(4600)
def _is_r4650(self): return self.__cputype(4650)
def _is_r5000(self): return self.__cputype(5000)
def _is_r6000(self): return self.__cputype(6000)
def _is_r8000(self): return self.__cputype(8000)
def _is_r10000(self): return self.__cputype(10000)
def _is_r12000(self): return self.__cputype(12000)
def _is_rorion(self): return self.__cputype('orion')
def get_ip(self):
try: return self.info.get('MACHINE')
except: pass
def __machine(self,n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
def _is_IP20(self): return self.__machine(20)
def _is_IP21(self): return self.__machine(21)
def _is_IP22(self): return self.__machine(22)
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
def _is_IP24(self): return self.__machine(24)
def _is_IP25(self): return self.__machine(25)
def _is_IP26(self): return self.__machine(26)
def _is_IP27(self): return self.__machine(27)
def _is_IP28(self): return self.__machine(28)
def _is_IP30(self): return self.__machine(30)
def _is_IP32(self): return self.__machine(32)
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
class DarwinCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
machine='machine')
info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
def _is_Power_Macintosh(self):
return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
def _is_i386(self):
return self.info['arch']=='i386'
def _is_ppc(self):
return self.info['arch']=='ppc'
def __machine(self,n):
return self.info['machine'] == 'ppc%s'%n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class SunOSCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
mach='mach',
uname_i='uname_i',
isainfo_b='isainfo -b',
isainfo_n='isainfo -n',
)
info['uname_X'] = key_value_from_command('uname -X', sep='=')
for line in command_by_line('psrinfo -v 0'):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
if m:
info['processor'] = m.group('p')
break
self.__class__.info = info
def _not_impl(self): pass
def _is_i386(self):
return self.info['isainfo_n']=='i386'
def _is_sparc(self):
return self.info['isainfo_n']=='sparc'
def _is_sparcv9(self):
return self.info['isainfo_n']=='sparcv9'
def _getNCPUs(self):
return int(self.info['uname_X'].get('NumCPU', 1))
def _is_sun4(self):
return self.info['arch']=='sun4'
def _is_SUNW(self):
return re.match(r'SUNW',self.info['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5',self.info['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1',self.info['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250',self.info['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2',self.info['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30',self.info['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4',self.info['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10',self.info['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5',self.info['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60',self.info['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80',self.info['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise',self.info['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000',self.info['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire',self.info['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra',self.info['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info['processor']=='sparcv7'
def _is_cpusparcv8(self):
return self.info['processor']=='sparcv8'
def _is_cpusparcv9(self):
return self.info['processor']=='sparcv9'
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
import _winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\
"\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE)
chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum=0
while 1:
try:
proc=_winreg.EnumKey(chnd,pnum)
except _winreg.error:
break
else:
pnum+=1
info.append({"Processor":proc})
phnd=_winreg.OpenKey(chnd,proc)
pidx=0
while True:
try:
name,value,vtpe=_winreg.EnumValue(phnd,pidx)
except _winreg.error:
break
else:
pidx=pidx+1
info[-1][name]=value
if name=="Identifier":
srch=prgx.search(value)
if srch:
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except:
print(sys.exc_value,'(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [0,1,2,3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [6,7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==9
def _is_AMDK7(self):
return self.is_AMD() and self.info[0]['Family'] == 6
# To reliably distinguish between the different types of AMD64 chips
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
# require looking at the 'brand' from cpuid
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['Family'] == 15
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier']=='GenuineIntel'
def _is_i386(self):
return self.info[0]['Family']==3
def _is_i486(self):
return self.info[0]['Family']==4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family']==6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family']==5 \
and self.info[0]['Model']==4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model']==1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [3,5,6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [7,8,9,10,11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family']==15
def _is_PentiumM(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [9, 13, 14]
def _is_Core2(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [15, 16, 17]
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
or (self.info[0]['Family'] in [6,15])
elif self.is_AMD():
return self.info[0]['Family'] in [5,6,15]
else:
return False
def _has_sse(self):
if self.is_Intel():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [7,8,9,10,11]) \
or self.info[0]['Family']==15
elif self.is_AMD():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [6,7,8,10]) \
or self.info[0]['Family']==15
else:
return False
def _has_sse2(self):
if self.is_Intel():
return self.is_Pentium4() or self.is_PentiumM() \
or self.is_Core2()
elif self.is_AMD():
return self.is_AMD64()
else:
return False
def _has_3dnow(self):
return self.is_AMD() and self.info[0]['Family'] in [5,6,15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6,15]
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
cpuinfo = LinuxCPUInfo
elif sys.platform.startswith('irix'):
cpuinfo = IRIXCPUInfo
elif sys.platform == 'darwin':
cpuinfo = DarwinCPUInfo
elif sys.platform.startswith('sunos'):
cpuinfo = SunOSCPUInfo
elif sys.platform.startswith('win32'):
cpuinfo = Win32CPUInfo
elif sys.platform.startswith('cygwin'):
cpuinfo = LinuxCPUInfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = CPUInfoBase
cpu = cpuinfo()
#if __name__ == "__main__":
#
# cpu.is_blaa()
# cpu.is_Intel()
# cpu.is_Alpha()
#
# print 'CPU information:',
# for name in dir(cpuinfo):
# if name[0]=='_' and name[1]!='_':
# r = getattr(cpu,name[1:])()
# if r:
# if r!=1:
# print '%s=%s' %(name[1:],r),
# else:
# print name[1:],
# print
| gpl-3.0 |
DougFirErickson/qgisSpaceSyntaxToolkit | esstoolkit/external/pyqtgraph/graphicsItems/LegendItem.py | 34 | 6755 | from .GraphicsWidget import GraphicsWidget
from .LabelItem import LabelItem
from ..Qt import QtGui, QtCore
from .. import functions as fn
from ..Point import Point
from .ScatterPlotItem import ScatterPlotItem, drawSymbol
from .PlotDataItem import PlotDataItem
from .GraphicsWidgetAnchor import GraphicsWidgetAnchor
__all__ = ['LegendItem']
class LegendItem(GraphicsWidget, GraphicsWidgetAnchor):
"""
Displays a legend used for describing the contents of a plot.
LegendItems are most commonly created by calling PlotItem.addLegend().
Note that this item should not be added directly to a PlotItem. Instead,
Make it a direct descendant of the PlotItem::
legend.setParentItem(plotItem)
"""
def __init__(self, size=None, offset=None):
"""
============== ===============================================================
**Arguments:**
size Specifies the fixed size (width, height) of the legend. If
this argument is omitted, the legend will autimatically resize
to fit its contents.
offset Specifies the offset position relative to the legend's parent.
Positive values offset from the left or top; negative values
offset from the right or bottom. If offset is None, the
legend must be anchored manually by calling anchor() or
positioned by calling setPos().
============== ===============================================================
"""
GraphicsWidget.__init__(self)
GraphicsWidgetAnchor.__init__(self)
self.setFlag(self.ItemIgnoresTransformations)
self.layout = QtGui.QGraphicsGridLayout()
self.setLayout(self.layout)
self.items = []
self.size = size
self.offset = offset
if size is not None:
self.setGeometry(QtCore.QRectF(0, 0, self.size[0], self.size[1]))
def setParentItem(self, p):
ret = GraphicsWidget.setParentItem(self, p)
if self.offset is not None:
offset = Point(self.offset)
anchorx = 1 if offset[0] <= 0 else 0
anchory = 1 if offset[1] <= 0 else 0
anchor = (anchorx, anchory)
self.anchor(itemPos=anchor, parentPos=anchor, offset=offset)
return ret
def addItem(self, item, name):
"""
Add a new entry to the legend.
============== ========================================================
**Arguments:**
item A PlotDataItem from which the line and point style
of the item will be determined or an instance of
ItemSample (or a subclass), allowing the item display
to be customized.
title The title to display for this item. Simple HTML allowed.
============== ========================================================
"""
label = LabelItem(name)
if isinstance(item, ItemSample):
sample = item
else:
sample = ItemSample(item)
row = self.layout.rowCount()
self.items.append((sample, label))
self.layout.addItem(sample, row, 0)
self.layout.addItem(label, row, 1)
self.updateSize()
def removeItem(self, name):
"""
Removes one item from the legend.
============== ========================================================
**Arguments:**
title The title displayed for this item.
============== ========================================================
"""
# Thanks, Ulrich!
# cycle for a match
for sample, label in self.items:
if label.text == name: # hit
self.items.remove( (sample, label) ) # remove from itemlist
self.layout.removeItem(sample) # remove from layout
sample.close() # remove from drawing
self.layout.removeItem(label)
label.close()
self.updateSize() # redraq box
def updateSize(self):
if self.size is not None:
return
height = 0
width = 0
#print("-------")
for sample, label in self.items:
height += max(sample.height(), label.height()) + 3
width = max(width, sample.width()+label.width())
#print(width, height)
#print width, height
self.setGeometry(0, 0, width+25, height)
def boundingRect(self):
return QtCore.QRectF(0, 0, self.width(), self.height())
def paint(self, p, *args):
p.setPen(fn.mkPen(255,255,255,100))
p.setBrush(fn.mkBrush(100,100,100,50))
p.drawRect(self.boundingRect())
def hoverEvent(self, ev):
ev.acceptDrags(QtCore.Qt.LeftButton)
def mouseDragEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton:
dpos = ev.pos() - ev.lastPos()
self.autoAnchor(self.pos() + dpos)
class ItemSample(GraphicsWidget):
""" Class responsible for drawing a single item in a LegendItem (sans label).
This may be subclassed to draw custom graphics in a Legend.
"""
## Todo: make this more generic; let each item decide how it should be represented.
def __init__(self, item):
GraphicsWidget.__init__(self)
self.item = item
def boundingRect(self):
return QtCore.QRectF(0, 0, 20, 20)
def paint(self, p, *args):
#p.setRenderHint(p.Antialiasing) # only if the data is antialiased.
opts = self.item.opts
if opts.get('fillLevel',None) is not None and opts.get('fillBrush',None) is not None:
p.setBrush(fn.mkBrush(opts['fillBrush']))
p.setPen(fn.mkPen(None))
p.drawPolygon(QtGui.QPolygonF([QtCore.QPointF(2,18), QtCore.QPointF(18,2), QtCore.QPointF(18,18)]))
if not isinstance(self.item, ScatterPlotItem):
p.setPen(fn.mkPen(opts['pen']))
p.drawLine(2, 18, 18, 2)
symbol = opts.get('symbol', None)
if symbol is not None:
if isinstance(self.item, PlotDataItem):
opts = self.item.scatter.opts
pen = fn.mkPen(opts['pen'])
brush = fn.mkBrush(opts['brush'])
size = opts['size']
p.translate(10,10)
path = drawSymbol(p, symbol, size, pen, brush)
| gpl-3.0 |
SymbiFlow/python-symbiflow-v2x | v2x/vlog_to_model.py | 1 | 8475 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
"""
Convert a Verilog simulation model to a VPR `model.xml`
The following Verilog attributes are considered on ports:
- `(* CLOCK *)` or `(* CLOCK=1 *)` : force a given port to be a clock
- `(* CLOCK=0 *)` : force a given port not to be a clock
- `(* ASSOC_CLOCK="RDCLK" *)` : force a port's associated
clock to a given value
- `(* COMB_INCLUDE_CLOCKS *)` : When specified on a clock input port
allows it to have combinational relations
with other ports.
- `(* NO_COMB *)` : Forces removal of all combinational relations of an
input port.
- `(* NO_SEQ *)` : Forces removal of all sequential relations of an input
port.
The following Verilog attributes are considered on modules:
- `(* MODEL_NAME="model" *)` : override the name used for
<model> and for ".subckt name" in the BLIF model. Mostly
intended for use with w.py, when several different pb_types
implement the same model.
- `(* CLASS="lut|routing|mux|flipflop|mem" *)` : specify the
class of an given instance. A model will not be generated for
the `lut`, `routing` or `flipflop` class.
"""
import os
import re
import sys
import lxml.etree as ET
from .yosys import run
from .yosys.json import YosysJSON
from .yosys import utils as utils
from .xmlinc import xmlinc
def is_clock_assoc(infiles, module, clk, port, direction):
"""Checks if a specific port is associated with a clk clock
Returns a boolean value
-------
is_clock_assoc: bool
"""
clock_assoc_signals = run.get_clock_assoc_signals(
infiles, module, clk
)
if direction == "input":
assoc_outputs = run.get_related_output_for_input(
infiles, module, port
)
for out in assoc_outputs:
if out in clock_assoc_signals:
return True
elif direction == "output":
if port in clock_assoc_signals:
return True
else:
assert False, "Bidirectional ports are not supported yet"
return False
def is_registered_path(tmod, pin, pout):
"""Checks if a i/o path is sequential. If that is the case
no combinational_sink_port is needed
Returns a boolean value
"""
for cell, ctype in tmod.all_cells:
if ctype != "$dff":
continue
if tmod.port_conns(pin) == tmod.cell_conn_list(
cell, "D") and tmod.port_conns(pout) == tmod.cell_conn_list(
cell, "Q"):
return True
return False
def vlog_to_model(infiles, includes, top, outfile=None):
# Check Yosys version
pfx = run.determine_select_prefix()
if pfx != "=":
print("ERROR The version of Yosys found is outdated and not supported"
" by V2X")
sys.exit(-1)
iname = os.path.basename(infiles[0])
if outfile is None:
outfile = "model.xml"
if includes:
for include in includes.split(','):
run.add_include(include)
aig_json = run.vlog_to_json(infiles, flatten=True, aig=True)
if top is not None:
top = top.upper()
else:
yj = YosysJSON(aig_json)
if yj.top is not None:
top = yj.top
else:
wm = re.match(r"([A-Za-z0-9_]+)\.sim\.v", iname)
if wm:
top = wm.group(1).upper()
else:
print(
"""\
ERROR file name not of format %.sim.v ({}), cannot detect top level.
Manually specify the top level module using --top"""
).format(iname)
sys.exit(1)
assert top is not None
yj = YosysJSON(aig_json, top)
if top is None:
print(
"""\
ERROR: more than one module in design, cannot detect top level.
Manually specify the top level module using --top"""
)
sys.exit(1)
tmod = yj.top_module
models_xml = ET.Element("models", nsmap={'xi': xmlinc.xi_url})
inc_re = re.compile(r'^\s*`include\s+"([^"]+)"')
deps_files = set()
# XML dependencies need to correspond 1:1 with Verilog includes, so we have
# to do this manually rather than using Yosys
with open(infiles[0], 'r') as f:
for line in f:
im = inc_re.match(line)
if not im:
continue
deps_files.add(im.group(1))
if len(deps_files) > 0:
# Has dependencies, not a leaf model
for df in sorted(deps_files):
abs_base = os.path.dirname(os.path.abspath(infiles[0]))
abs_dep = os.path.normpath(os.path.join(abs_base, df))
module_path = os.path.dirname(abs_dep)
module_basename = os.path.basename(abs_dep)
wm = re.match(r"([A-Za-z0-9_]+)\.sim\.v", module_basename)
if wm:
model_path = "{}/{}.model.xml".format(
module_path,
wm.group(1).lower()
)
else:
assert False, "included Verilog file name {} does \
not follow pattern %%.sim.v".format(
module_basename
)
xmlinc.include_xml(
parent=models_xml,
href=model_path,
outfile=outfile,
xptr="xpointer(models/child::node())"
)
else:
# Is a leaf model
topname = tmod.attr("MODEL_NAME", top)
assert topname == topname.upper(
), "Leaf model names should be all uppercase!"
modclass = tmod.attr("CLASS", "")
if modclass not in ("input", "output", "lut", "routing", "flipflop"):
model_xml = ET.SubElement(models_xml, "model", {'name': topname})
ports = tmod.ports
inports_xml = ET.SubElement(model_xml, "input_ports")
outports_xml = ET.SubElement(model_xml, "output_ports")
clocks = run.list_clocks(infiles, top)
for name, width, bits, iodir in ports:
port_attrs = tmod.port_attrs(name)
is_clock = name in clocks or utils.is_clock_name(name)
if "CLOCK" in port_attrs:
is_clock = int(port_attrs["CLOCK"]) != 0
attrs = dict(name=name)
sinks = run.get_combinational_sinks(infiles, top, name)
# Removes comb sinks if path from in to out goes through a dff
for sink in sinks:
if is_registered_path(tmod, name, sink):
sinks.remove(sink)
if is_clock:
attrs["is_clock"] = "1"
# Remove comb sinks that do not have "COMB_INCLUDE_CLOCKS"
# attribute
for sink in sinks:
sink_attrs = tmod.port_attrs(sink)
if int(sink_attrs.get("COMB_INCLUDE_CLOCKS", 0)) == 0:
sinks.remove(sink)
else:
clks = list()
for clk in clocks:
if is_clock_assoc(
infiles, top, clk, name, iodir):
clks.append(clk)
if clks and int(port_attrs.get("NO_SEQ", 0)) == 0:
attrs["clock"] = " ".join(clks)
if len(sinks) > 0 and iodir == "input" and \
int(port_attrs.get("NO_COMB", 0)) == 0:
attrs["combinational_sink_ports"] = " ".join(sinks)
if iodir == "input":
ET.SubElement(inports_xml, "port", attrs)
elif iodir == "output":
ET.SubElement(outports_xml, "port", attrs)
else:
assert False, "bidirectional ports not permitted \
in VPR models"
if len(models_xml) == 0:
models_xml.insert(0,
ET.Comment("this file is intentionally left blank"))
return ET.tostring(models_xml, pretty_print=True).decode('utf-8')
| isc |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/win32/Demos/CreateFileTransacted_MiniVersion.py | 17 | 2759 | """
This demonstrates the creation of miniversions of a file during a transaction.
The FSCTL_TXFS_CREATE_MINIVERSION control code saves any changes to a new
miniversion (effectively a savepoint within a transaction).
"""
import win32file, win32api, win32transaction
import win32con, winioctlcon
import struct
import os
from pywin32_testutil import str2bytes # py3k-friendly helper
"""
Definition of buffer used with FSCTL_TXFS_CREATE_MINIVERSION:
typedef struct _TXFS_CREATE_MINIVERSION_INFO{
USHORT StructureVersion;
USHORT StructureLength;
ULONG BaseVersion;
USHORT MiniVersion;}
"""
buf_fmt='HHLH0L' ## buffer size must include struct padding
buf_size=struct.calcsize(buf_fmt)
tempdir=win32api.GetTempPath()
tempfile=win32api.GetTempFileName(tempdir,'cft')[0]
print "Demonstrating transactions on tempfile", tempfile
f=open(tempfile,'w')
f.write('This is original file.\n')
f.close()
trans=win32transaction.CreateTransaction(Description='Test creating miniversions of a file')
hfile=win32file.CreateFileW(tempfile, win32con.GENERIC_READ|win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
None, win32con.OPEN_EXISTING, 0 , None, Transaction=trans)
win32file.WriteFile(hfile, str2bytes('This is first miniversion.\n'))
buf=win32file.DeviceIoControl(hfile, winioctlcon.FSCTL_TXFS_CREATE_MINIVERSION,None,buf_size,None)
struct_ver, struct_len, base_ver, ver_1=struct.unpack(buf_fmt, buf)
win32file.SetFilePointer(hfile, 0, win32con.FILE_BEGIN)
win32file.WriteFile(hfile, str2bytes('This is second miniversion!\n'))
buf=win32file.DeviceIoControl(hfile, winioctlcon.FSCTL_TXFS_CREATE_MINIVERSION,None,buf_size,None)
struct_ver, struct_len, base_ver, ver_2=struct.unpack(buf_fmt, buf)
hfile.Close()
## miniversions can't be opened with write access
hfile_0=win32file.CreateFileW(tempfile, win32con.GENERIC_READ,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
None, win32con.OPEN_EXISTING, 0 , None, Transaction=trans, MiniVersion=base_ver)
print 'version:',base_ver,win32file.ReadFile(hfile_0, 100)
hfile_0.Close()
hfile_1=win32file.CreateFileW(tempfile, win32con.GENERIC_READ,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
None, win32con.OPEN_EXISTING, 0 , None, Transaction=trans, MiniVersion=ver_1)
print 'version:',ver_1,win32file.ReadFile(hfile_1, 100)
hfile_1.Close()
hfile_2=win32file.CreateFileW(tempfile, win32con.GENERIC_READ,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
None, win32con.OPEN_EXISTING, 0 , None, Transaction=trans, MiniVersion=ver_2)
print 'version:',ver_2,win32file.ReadFile(hfile_2, 100)
hfile_2.Close()
## MiniVersions are destroyed when transaction is committed or rolled back
win32transaction.CommitTransaction(trans)
os.unlink(tempfile)
| gpl-3.0 |
hrasdt/cult-panel | Taskbar.py | 1 | 7577 | from gi.repository import Clutter, Wnck
import arrow
from PagerModel import PagerModel
from PagerModel import is_skipped_tasklist, is_mini, is_urgent, is_active, is_normal
def new_pixbuf_texture(ww, hh, pb):
t = Clutter.Texture.new()
# Set dimensions.
t.set_width(ww)
t.set_height(hh)
# Set pixel data.
t.set_from_rgb_data(
pb.get_pixels(),
pb.props.has_alpha,
pb.get_width(), pb.get_height(),
pb.get_rowstride(),
4 if pb.props.has_alpha else 3, 0)
return t
class TaskbarItem(Clutter.Box):
def __init__(self, window, conf, height = 16):
Clutter.Box.__init__(self)
self.conf = conf
# The window is a WnckWindow.
self.wnck_win = window
self.lm = Clutter.BoxLayout.new()
self.lm.set_spacing(height / 4)
self.set_layout_manager(self.lm)
# Get the icon.
if conf.getboolean("Taskbar", "show-icons"):
self.icon = new_pixbuf_texture(height, height,
window.get_icon())
else:
self.icon = None
# And the text label.
fontspec = conf.get("Taskbar", "font-name") + " " + conf.get("Taskbar", "font-size")
self.label = Clutter.Text.new_full(fontspec,
window.get_name(),
conf.getcolour("Taskbar",
"font-colour"))
if conf.getboolean("Taskbar", "fixed-width"):
if self.icon:
if self.label:
self.label.set_size(conf.getint("Taskbar", "width") - self.icon.get_width() - self.lm.get_spacing(), -1)
else:
if self.label:
self.label.set_size(conf.getint("Taskbar", "width"), -1)
# Add the parts.
if conf.getboolean("Taskbar", "show-icons"):
self.add_actor(self.icon)
if conf.getboolean("Taskbar", "show-labels"):
self.add_actor(self.label)
# Hook up input.
self.set_reactive(True)
self.connect("button-press-event",
self.press_event)
# And connect signals.
self.wnck_win.connect("icon-changed", self.icon_changed)
self.wnck_win.connect("name-changed", self.name_changed)
self.wnck_win.connect("workspace-changed", self.workspace_changed)
self.wnck_win.connect("state-changed", self.state_changed)
def belongs_to(self, window):
return window is self.wnck_win
def get_window(self):
return self.wnck_win
def press_event(self, actor, event):
if event.button == 3:
# Minimise.
self.wnck_win.minimize()
elif event.button == 2:
# Close.
self.wnck_win.close(arrow.now().timestamp)
elif event.button == 1:
# Or raise.
self.wnck_win.activate(arrow.now().timestamp)
def name_changed(self, win):
self.label.set_text(win.get_name())
def icon_changed(self, win):
""" Update the icon to reflect a change. """
self.icon = new_pixbuf_texture(self.icon.get_width(),
self.icon.get_height(),
win.get_icon())
def state_changed(self, win, change_mask, new_state):
self.update_colour()
def update_colour(self):
col = None
if is_urgent(self.wnck_win):
col = self.conf.getcolour("Taskbar", "urgent")
elif is_mini(self.wnck_win):
col = self.conf.getcolour("Taskbar", "minimised")
elif is_active(self.wnck_win):
col = self.conf.getcolour("Taskbar", "active")
elif is_normal(self.wnck_win):
col = self.conf.getcolour("Taskbar", "normal")
self.set_color(col)
def workspace_changed(self, win):
""" Update the workspace that this window is on. """
# The parent must be a Taskbar.
self.get_parent().window_workspace_changed(win, self)
class Taskbar(Clutter.Box):
def __init__(self, conf, size):
Clutter.Box.__init__(self)
self.screen = conf.getscreen()
active_ws = self.screen.get_active_workspace()
self.conf = conf
self.panel_size = size
self.lm = Clutter.BoxLayout()
self.lm.set_spacing(2)
self.set_layout_manager(self.lm)
# Work out the orientation/size.
if self.conf.is_vertical():
self.lm.set_vertical(True)
for w in conf.getpagermodel().get_tasklist(None, True):
item = TaskbarItem(w, self.conf, height = min(*size))
self.set_visibility(item, [None, active_ws])
item.update_colour()
self.add_actor(item)
# Make sure the right windows are visible.
self.refresh()
# And connect the signals.
self.screen.connect("active-workspace-changed",
self.active_workspace_changed)
self.screen.connect("window-opened", self.add_window)
self.screen.connect("window-closed", self.remove_window)
self.screen.connect("active-window-changed", self.update_active_window)
def set_visibility(self, task_item, dat):
""" Set the correct visibility for a TaskbarItem. """
prev, cur = dat
window = task_item.wnck_win
ws = window.get_workspace()
# Never show these.
if is_skipped_tasklist(window):
task_item.hide()
# Always show in this case.
elif self.conf.getboolean("Taskbar", "all-workspaces"):
task_item.show()
# Sticky window, but not skip-tasklist.
elif ws is None:
task_item.show()
# It was on the last one; hide it!
elif ws is prev:
task_item.hide()
# Nope; we need to show this now.
elif ws is cur:
task_item.show()
else:
task_item.hide()
def refresh(self):
self.active_workspace_changed(None, None)
def active_workspace_changed(self, screen, prev):
if screen != self.screen: return # Ignore other screens' events.
cur = self.screen.get_active_workspace()
self.foreach(self.set_visibility, [prev, cur])
def add_window(self, screen, window):
item = TaskbarItem(window, self.conf, height = min(*self.panel_size))
self.set_visibility(item, [None, screen.get_active_workspace()])
self.add_actor(item)
def remove_window(self, screen, window):
""" Remove an item from the taskbar completely.
This will be called when a window is closed.
"""
def do_remove(task, dat = None):
if task.belongs_to(window):
self.remove_actor(task)
self.foreach(do_remove, None)
def update_active_window(self, screen, prev):
cur = self.screen.get_active_window()
did_change = 0
for i in self.get_children():
if did_change >= 2: return
if i.belongs_to(prev) or i.belongs_to(cur):
did_change += 1
i.update_colour()
def window_workspace_changed(self, window, taskbar_child):
if window.get_workspace() != self.screen.get_active_workspace():
taskbar_child.hide()
else:
taskbar_child.show()
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.