repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ericwjr/wandboard_kernel | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
UstadMobile/exelearning-extjs5-mirror | twisted/pb/pb.py | 14 | 19829 | # -*- test-case-name: twisted.pb.test.test_pb -*-
# This is the primary entry point for newpb
from twisted.python import failure, log, urlpath
from twisted.python.components import registerAdapter
from twisted.internet import defer, protocol
from twisted.application import service, strports
import urlparse
urlparse.uses_netloc.append("pb")
from twisted.pb import ipb, broker, base32, negotiate, tokens, referenceable
from twisted.pb.tokens import PBError
try:
from twisted.pb import crypto
except ImportError:
crypto = None
if crypto and not crypto.available:
crypto = None
try:
# we want to use the random-number generation code from PyCrypto
from Crypto.Util import randpool
except ImportError:
randpool = None
# fall back to the stdlib 'random' module if we can't get something that
# uses /dev/random. This form is seeded from the current system time,
# which is much less satisfying.
log.msg("Warning: PyCrypto not available, so secure URLs will be "
"less random than we'd really prefer")
import random
# names we import so that others can reach them as pb.foo
from twisted.pb.remoteinterface import RemoteInterface
from twisted.pb.referenceable import Referenceable, SturdyRef
from twisted.pb.copyable import Copyable, RemoteCopy, registerRemoteCopy
from twisted.pb.ipb import DeadReferenceError
from twisted.pb.tokens import BananaError
Listeners = []
class Listener(protocol.ServerFactory):
"""I am responsible for a single listening port, which may connect to
multiple Tubs. I have a strports-based Service, which I will attach as a
child of one of my Tubs. If that Tub disconnects, I will reparent the
Service to a remaining one.
Unencrypted Tubs use a TubID of 'None'. There may be at most one such Tub
attached to any given Listener."""
# this also serves as the ServerFactory
def __init__(self, port, options={}):
"""
@type port: string
@param port: a L{twisted.application.strports} -style description.
"""
name, args, kw = strports.parse(port, None)
assert name in ("TCP", "UNIX") # TODO: IPv6
self.port = port
self.options = options
self.parentTub = None
self.tubs = {}
self.redirects = {}
self.s = strports.service(port, self)
Listeners.append(self)
def getPortnum(self):
"""When this Listener was created with a strport string of '0' or
'tcp:0' (meaning 'please allocate me something'), and if the Listener
is active (it is attached to a Tub which is in the 'running' state),
this method will return the port number that was allocated. This is
useful for the following pattern:
t = PBService()
l = t.listenOn('tcp:0')
t.setLocation('localhost:%d' % l.getPortnum())
"""
assert self.s.running
name, args, kw = strports.parse(self.port, None)
assert name in ("TCP",)
return self.s._port.getHost().port
def __repr__(self):
if self.tubs:
return "<Listener at 0x%x on %s with tubs %s>" % (
abs(id(self)),
self.port,
",".join([str(k) for k in self.tubs.keys()]))
return "<Listener at 0x%x on %s with no tubs>" % (abs(id(self)),
self.port)
def addTub(self, tub):
if tub.tubID in self.tubs:
if tub.tubID is None:
raise RuntimeError("This Listener (on %s) already has an "
"unencrypted Tub, you cannot add a second "
"one" % self.port)
raise RuntimeError("This Listener (on %s) is already connected "
"to TubID '%s'" % (self,port, tub.tubID))
self.tubs[tub.tubID] = tub
if self.parentTub is None:
self.parentTub = tub
self.s.setServiceParent(self.parentTub)
def removeTub(self, tub):
# this returns a Deferred, since the removal might cause the Listener
# to shut down
del self.tubs[tub.tubID]
if self.parentTub is tub:
# we need to switch to a new one
tubs = self.tubs.values()
if tubs:
self.parentTub = tubs[0]
# TODO: I want to do this without first doing
# disownServiceParent, so the port remains listening. Can we
# do this? It looks like setServiceParent does
# disownServiceParent first, so it may glitch.
self.s.setServiceParent(self.parentTub)
else:
# no more tubs, this Listener will go away now
d = self.s.disownServiceParent()
Listeners.remove(self)
return d
return defer.succeed(None)
def getService(self):
return self.s
def addRedirect(self, tubID, location):
assert tubID is not None # unencrypted Tubs don't get redirects
self.redirects[tubID] = location
def removeRedirect(self, tubID):
del self.redirects[tubID]
def buildProtocol(self, addr):
"""Return a Broker attached to me (as the service provider).
"""
proto = negotiate.Negotiation()
proto.initServer(self)
proto.factory = self
return proto
def lookupTubID(self, tubID):
return self.tubs.get(tubID), self.redirects.get(tubID)
class PBService(service.MultiService):
"""I am a presence in the PB universe, also known as a Tub.
This is the primary entry point for all PB-using applications, both
clients and servers.
I am known to the outside world by a base URL, which may include
authentication information (a yURL). This is my 'TubID'.
I contain Referenceables, and manage RemoteReferences to Referenceables
that live in other Tubs.
@param encrypted: True if this Tub should provide secure references.
'True' is the default if crypto is available.
@param certData: if provided, use it as a certificate rather than
generating a new one. This is a PEM-encoded
private/public keypair, as returned by Tub.getCertData()
@param options: a dictionary of options that can influence connection
connection negotiation. Currently defined keys are:
- debug_slow: if True, wait half a second between
each negotiation response
@itype tubID: string
@ivar tubID: a global identifier for this Tub, possibly including
authentication information, hash of SSL certificate
@ivar brokers: maps TubIDs to L{Broker} instances
@itype listeners: maps strport to TCPServer service
@ivar referenceToName: maps Referenceable to a name
@ivar nameToReference: maps name to Referenceable
"""
unsafeTracebacks = True # TODO: better way to enable this
debugBanana = False
NAMEBITS = 160 # length of swissnumber for each reference
TUBIDBITS = 16 # length of non-crypto tubID
if randpool:
randpool = randpool.RandomPool()
else:
randpool = None
def __init__(self, encrypted=None, certData=None, options={}):
service.MultiService.__init__(self)
if encrypted is None:
if crypto:
encrypted = True
else:
encrypted = False
assert encrypted in (True, False)
self.options = options
self.listeners = []
self.locationHints = []
self.encrypted = encrypted
if encrypted and not crypto:
raise RuntimeError("crypto for PB is not available, "
"try importing twisted.pb.crypto and see "
"what happens")
if encrypted:
if certData:
cert = crypto.sslverify.PrivateCertificate.loadPEM(certData)
else:
cert = self.createCertificate()
self.myCertificate = cert
self.tubID = crypto.digest32(cert.digest("sha1"))
else:
self.myCertificate = None
self.tubID = None
# local Referenceables
self.nameToReference = {}
self.referenceToName = {}
# remote stuff. Most of these use a TubRef (or NoAuthTubRef) as a
# dictionary key
self.tubConnectors = {} # maps TubRef to a TubConnector
self.waitingForBrokers = {} # maps TubRef to list of Deferreds
self.brokers = {} # maps TubRef to a Broker that connects to them
self.unencryptedBrokers = [] # inbound Brokers without TubRefs
def createCertificate(self):
# this is copied from test_sslverify.py
dn = crypto.DistinguishedName(commonName="newpb_thingy")
keypair = crypto.KeyPair.generate()
req = keypair.certificateRequest(dn)
certData = keypair.signCertificateRequest(dn, req,
lambda dn: True,
132)
cert = keypair.newCertificate(certData)
#opts = cert.options()
# 'opts' can be given to reactor.listenSSL, or to transport.startTLS
return cert
def getCertData(self):
# the string returned by this method can be used as the certData=
# argument to create a new PBService with the same identity. TODO:
# actually test this, I don't know if dump/keypair.newCertificate is
# the right pair of methods.
return self.myCertificate.dumpPEM()
def setLocation(self, *hints):
"""Tell this service what its location is: a host:port description of
how to reach it from the outside world. You need to use this because
the Tub can't do it without help. If you do a
C{s.listenOn('tcp:1234')}, and the host is known as
C{foo.example.com}, then it would be appropriate to do:
s.setLocation('foo.example.com:1234')
You must set the location before you can register any references.
Encrypted Tubs can have multiple location hints, just provide
multiple arguments. Unencrypted Tubs can only have one location."""
if not self.encrypted and len(hints) > 1:
raise PBError("Unencrypted tubs may only have one location hint")
self.locationHints = hints
def listenOn(self, what, options={}):
"""Start listening for connections.
@type what: string or Listener instance
@param what: a L{twisted.application.strports} -style description,
or a L{Listener} instance returned by a previous call to
listenOn.
@param options: a dictionary of options that can influence connection
negotiation before the target Tub has been determined
@return: The Listener object that was created. This can be used to
stop listening later on, to have another Tub listen on the same port,
and to figure out which port was allocated when you used a strports
specification of'tcp:0'. """
if type(what) is str:
l = Listener(what, options)
else:
assert not options
l = what
assert l not in self.listeners
l.addTub(self)
self.listeners.append(l)
return l
def stopListeningOn(self, l):
self.listeners.remove(l)
d = l.removeTub(self)
return d
def getListeners(self):
"""Return the set of Listener objects that allow the outside world to
connect to this Tub."""
return self.listeners[:]
def clone(self):
"""Return a new Tub, listening on the same ports as this one. """
t = PBService(encrypted=self.encrypted)
for l in self.listeners:
t.listenOn(l)
return t
def stopService(self):
dl = []
for l in self.listeners:
# TODO: rethink this, what I want is for stopService to cause all
# Listeners to shut down, but I'm not sure this is the right way
# to do it.
dl.append(l.removeTub(self))
dl.append(service.MultiService.stopService(self))
for b in self.brokers.values():
d = defer.maybeDeferred(b.transport.loseConnection)
dl.append(d)
for b in self.unencryptedBrokers:
d = defer.maybeDeferred(b.transport.loseConnection)
dl.append(d)
return defer.DeferredList(dl)
def generateSwissnumber(self, bits):
if self.randpool:
bytes = self.randpool.get_bytes(bits/8)
else:
bytes = "".join([chr(random.randint(0,255))
for n in range(bits/8)])
return base32.encode(bytes)
def buildURL(self, name):
if self.encrypted:
# TODO: IPv6 dotted-quad addresses have colons, but need to have
# host:port
hints = ",".join(self.locationHints)
return "pb://" + self.tubID + "@" + hints + "/" + name
return "pbu://" + self.locationHints[0] + "/" + name
def registerReference(self, ref, name=None):
"""Make a Referenceable available to the outside world. A URL is
returned which can be used to access this object. This registration
will remain in effect until explicitly unregistered.
@type name: string (optional)
@param name: if provided, the object will be registered with this
name. If not, a random (unguessable) string will be
used.
@rtype: string
@return: the URL which points to this object. This URL can be passed
to PBService.getReference() in any PBService on any host which can
reach this one.
"""
if not self.locationHints:
raise RuntimeError("you must setLocation() before "
"you can registerReference()")
if self.referenceToName.has_key(ref):
return self.buildURL(self.referenceToName[ref])
if name is None:
name = self.generateSwissnumber(self.NAMEBITS)
self.referenceToName[ref] = name
self.nameToReference[name] = ref
return self.buildURL(name)
def getReferenceForName(self, name):
return self.nameToReference[name]
def getReferenceForURL(self, url):
# TODO: who should this be used by?
sturdy = SturdyRef(url)
assert sturdy.tubID == self.tubID
return self.getReferenceForName(sturdy.name)
def getURLForReference(self, ref):
"""Return the global URL for the reference, if there is one, or None
if there is not."""
name = self.referenceToName.get(ref)
if name:
return self.buildURL(name)
return None
def revokeReference(self, ref):
# TODO
pass
def unregisterURL(self, url):
sturdy = SturdyRef(url)
name = sturdy.name
ref = self.nameToReference[name]
del self.nameToReference[name]
del self.referenceToName[ref]
self.revokeReference(ref)
def unregisterReference(self, ref):
name = self.referenceToName[ref]
url = self.buildURL(name)
sturdy = SturdyRef(url)
name = sturdy.name
del self.nameToReference[name]
del self.referenceToName[ref]
self.revokeReference(ref)
def getReference(self, sturdyOrURL):
"""Acquire a RemoteReference for the given SturdyRef/URL.
@return: a Deferred that fires with the RemoteReference
"""
if isinstance(sturdyOrURL, SturdyRef):
sturdy = sturdyOrURL
else:
sturdy = SturdyRef(sturdyOrURL)
# pb->pb: ok, requires crypto
# pbu->pb: ok, requires crypto
# pbu->pbu: ok
# pb->pbu: ok, requires crypto
if sturdy.encrypted and not crypto:
e = BananaError("crypto for PB is not available, "
"we cannot handle encrypted PB-URLs like %s"
% sturdy.getURL())
return defer.fail(e)
name = sturdy.name
d = self.getBrokerForTubRef(sturdy.getTubRef())
d.addCallback(lambda b: b.getYourReferenceByName(name))
return d
def getBrokerForTubRef(self, tubref):
if tubref in self.brokers:
return defer.succeed(self.brokers[tubref])
d = defer.Deferred()
if tubref not in self.waitingForBrokers:
self.waitingForBrokers[tubref] = []
self.waitingForBrokers[tubref].append(d)
if tubref not in self.tubConnectors:
# the TubConnector will call our brokerAttached when it finishes
# negotiation, which will fire waitingForBrokers[tubref].
c = negotiate.TubConnector(self, tubref)
self.tubConnectors[tubref] = c
c.connect()
return d
def connectionFailed(self, tubref, why):
# we previously initiated an outbound TubConnector to this tubref, but
# it was unable to establish a connection. 'why' is the most useful
# Failure that occurred (i.e. it is a NegotiationError if we made it
# that far, otherwise it's a ConnectionFailed).
if tubref in self.tubConnectors:
del self.tubConnectors[tubref]
if tubref in self.brokers:
# oh, but fortunately an inbound connection must have succeeded.
# Nevermind.
return
# inform hopeful Broker-waiters that they aren't getting one
if tubref in self.waitingForBrokers:
waiting = self.waitingForBrokers[tubref]
del self.waitingForBrokers[tubref]
for d in waiting:
d.errback(why)
def brokerAttached(self, tubref, broker, isClient):
if not tubref:
# this is an inbound connection from an unencrypted Tub
assert not isClient
# we just track it so we can disconnect it later
self.unencryptedBrokers.append(broker)
return
if tubref in self.tubConnectors:
# we initiated an outbound connection to this tubref
if not isClient:
# however, the connection we got was from an inbound
# connection. The completed (inbound) connection wins, so
# abandon the outbound TubConnector
self.tubConnectors[tubref].shutdown()
# we don't need the TubConnector any more
del self.tubConnectors[tubref]
if tubref in self.brokers:
# oops, this shouldn't happen but it isn't fatal. Raise
# BananaError so the Negotiation will drop the connection
raise BananaError("unexpected duplicate connection")
self.brokers[tubref] = broker
# now inform everyone who's been waiting on it
if tubref in self.waitingForBrokers:
waiting = self.waitingForBrokers[tubref]
del self.waitingForBrokers[tubref]
for d in waiting:
d.callback(broker)
def brokerDetached(self, broker, why):
# the Broker will have already severed all active references
for tubref in self.brokers.keys():
if self.brokers[tubref] is broker:
del self.brokers[tubref]
if broker in self.unencryptedBrokers:
self.unencryptedBrokers.remove(broker)
def getRemoteURL_TCP(host, port, pathname, *interfaces):
url = "pb://%s:%d/%s" % (host, port, pathname)
s = PBService()
d = s.getReference(url, interfaces)
return d
| gpl-2.0 |
cherusk/ansible | test/units/plugins/connection/test_connection.py | 52 | 6390 | # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
#from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
#from ansible.plugins.connection.chroot import Connection as ChrootConnection
#from ansible.plugins.connection.funcd import Connection as FuncdConnection
#from ansible.plugins.connection.jail import Connection as JailConnection
#from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
from ansible.plugins.connection.lxc import Connection as LxcConnection
from ansible.plugins.connection.local import Connection as LocalConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.docker import Connection as DockerConnection
#from ansible.plugins.connection.winrm import Connection as WinRmConnection
from ansible.plugins.connection.network_cli import Connection as NetworkCliConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
self.play_context = PlayContext()
self.in_stream = StringIO()
def tearDown(self):
pass
def test_subclass_error(self):
class ConnectionModule1(ConnectionBase):
pass
with self.assertRaises(TypeError):
ConnectionModule1()
class ConnectionModule2(ConnectionBase):
def get(self, key):
super(ConnectionModule2, self).get(key)
with self.assertRaises(TypeError):
ConnectionModule2()
def test_subclass_success(self):
class ConnectionModule3(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
#
# def test_chroot_connection_module(self):
# self.assertIsInstance(ChrootConnection(), ChrootConnection)
#
# def test_funcd_connection_module(self):
# self.assertIsInstance(FuncdConnection(), FuncdConnection)
#
# def test_jail_connection_module(self):
# self.assertIsInstance(JailConnection(), JailConnection)
#
# def test_libvirt_lxc_connection_module(self):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_lxc_connection_module(self):
self.assertIsInstance(LxcConnection(self.play_context, self.in_stream), LxcConnection)
def test_local_connection_module(self):
self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.2.3', '', 0))
def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.3.4', '', 0))
def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
DockerConnection)
# old version and new version fail
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('false', 'garbage', '', 1))
def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
def test_network_cli_connection_module(self):
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), NetworkCliConnection)
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), ParamikoConnection)
| gpl-3.0 |
MRtrix3/mrtrix3 | lib/mrtrix3/_5ttgen/freesurfer.py | 1 | 4010 | # Copyright (c) 2008-2021 the MRtrix3 contributors.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Covered Software is provided under this License on an "as is"
# basis, without warranty of any kind, either expressed, implied, or
# statutory, including, without limitation, warranties that the
# Covered Software is free of defects, merchantable, fit for a
# particular purpose or non-infringing.
# See the Mozilla Public License v. 2.0 for more details.
#
# For more details, see http://www.mrtrix.org/.
import os.path, shutil
from mrtrix3 import MRtrixError
from mrtrix3 import app, path, run
def usage(base_parser, subparsers): #pylint: disable=unused-variable
parser = subparsers.add_parser('freesurfer', parents=[base_parser])
parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)')
parser.set_synopsis('Generate the 5TT image based on a FreeSurfer parcellation image')
parser.add_argument('input', help='The input FreeSurfer parcellation image (any image containing \'aseg\' in its name)')
parser.add_argument('output', help='The output 5TT image')
options = parser.add_argument_group('Options specific to the \'freesurfer\' algorithm')
options.add_argument('-lut', help='Manually provide path to the lookup table on which the input parcellation image is based (e.g. FreeSurferColorLUT.txt)')
def check_output_paths(): #pylint: disable=unused-variable
app.check_output_path(app.ARGS.output)
def get_inputs(): #pylint: disable=unused-variable
run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif'))
if app.ARGS.lut:
run.function(shutil.copyfile, path.from_user(app.ARGS.lut, False), path.to_scratch('LUT.txt', False))
def execute(): #pylint: disable=unused-variable
lut_input_path = 'LUT.txt'
if not os.path.exists('LUT.txt'):
freesurfer_home = os.environ.get('FREESURFER_HOME', '')
if not freesurfer_home:
raise MRtrixError('Environment variable FREESURFER_HOME is not set; please run appropriate FreeSurfer configuration script, set this variable manually, or provide script with path to file FreeSurferColorLUT.txt using -lut option')
lut_input_path = os.path.join(freesurfer_home, 'FreeSurferColorLUT.txt')
if not os.path.isfile(lut_input_path):
raise MRtrixError('Could not find FreeSurfer lookup table file (expected location: ' + lut_input_path + '), and none provided using -lut')
if app.ARGS.sgm_amyg_hipp:
lut_output_file_name = 'FreeSurfer2ACT_sgm_amyg_hipp.txt'
else:
lut_output_file_name = 'FreeSurfer2ACT.txt'
lut_output_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), lut_output_file_name)
if not os.path.isfile(lut_output_path):
raise MRtrixError('Could not find lookup table file for converting FreeSurfer parcellation output to tissues (expected location: ' + lut_output_path + ')')
# Initial conversion from FreeSurfer parcellation to five principal tissue types
run.command('labelconvert input.mif ' + lut_input_path + ' ' + lut_output_path + ' indices.mif')
# Crop to reduce file size
if app.ARGS.nocrop:
image = 'indices.mif'
else:
image = 'indices_cropped.mif'
run.command('mrthreshold indices.mif - -abs 0.5 | mrgrid indices.mif crop ' + image + ' -mask -')
# Convert into the 5TT format for ACT
run.command('mrcalc ' + image + ' 1 -eq cgm.mif')
run.command('mrcalc ' + image + ' 2 -eq sgm.mif')
run.command('mrcalc ' + image + ' 3 -eq wm.mif')
run.command('mrcalc ' + image + ' 4 -eq csf.mif')
run.command('mrcalc ' + image + ' 5 -eq path.mif')
run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - result.mif -datatype float32')
run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE)
| mpl-2.0 |
kennethgillen/ansible | test/units/modules/network/eos/test_eos_config.py | 41 | 4971 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_config
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosConfigModule(TestEosModule):
module = eos_config
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.eos.eos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_config.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_config_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_config_no_change(self):
args = dict(lines=['hostname localhost'])
set_module_args(args)
result = self.execute_module()
def test_eos_config_src(self):
args = dict(src=load_fixture('eos_config_candidate.cfg'))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01', 'interface Ethernet1',
'description test interface', 'no shutdown', 'ip routing']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_eos_config_lines(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_eos_config_before(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
before=['before command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['before command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('before command', result['commands'][0])
def test_eos_config_after(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
after=['after command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['after command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('after command', result['commands'][-1])
def test_eos_config_parents(self):
args = dict(lines=['ip address 1.2.3.4/5', 'no shutdown'], parents=['interface Ethernet10'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown']
self.assertEqual(config, result['commands'], result['commands'])
def test_eos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_eos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_eos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_eos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_eos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
def test_eos_config_backup_returns__backup__(self):
args = dict(backup=True)
set_module_args(args)
result = self.execute_module()
self.assertIn('__backup__', result)
| gpl-3.0 |
bogdan-kulynych/cloudlectures | werkzeug/_internal.py | 89 | 14082 | # -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import inspect
from weakref import WeakKeyDictionary
from cStringIO import StringIO
from Cookie import SimpleCookie, Morsel, CookieError
from time import gmtime
from datetime import datetime, date
_logger = None
_empty_stream = StringIO('')
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
def _proxy_repr(cls):
def proxy_repr(self):
return '%s(%s)' % (self.__class__.__name__, cls.__repr__(self))
return proxy_repr
def _get_environ(obj):
env = getattr(obj, 'environ', obj)
assert isinstance(env, dict), \
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
return env
def _log(type, message, *args, **kwargs):
"""Log into the internal werkzeug logger."""
global _logger
if _logger is None:
import logging
_logger = logging.getLogger('werkzeug')
# Only set up a default log handler if the
# end-user application didn't set anything up.
if not logging.root.handlers and _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
_logger.addHandler(handler)
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, 'im_func'):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError('cannot parse functions that unpack tuples '
'in the function signature')
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and not kwarg_var is not None:
extra.update(kwargs)
kwargs = {}
return new_args, kwargs, missing, extra, extra_positional, \
arguments, vararg_var, kwarg_var
_signature_cache[func] = parse
return parse
def _patch_wrapper(old, new):
"""Helper function that forwards all the function details to the
decorated function."""
try:
new.__name__ = old.__name__
new.__module__ = old.__module__
new.__doc__ = old.__doc__
new.__dict__ = old.__dict__
except Exception:
pass
return new
def _decode_unicode(value, charset, errors):
"""Like the regular decode function but this one raises an
`HTTPUnicodeError` if errors is `strict`."""
fallback = None
if errors.startswith('fallback:'):
fallback = errors[9:]
errors = 'strict'
try:
return value.decode(charset, errors)
except UnicodeError, e:
if fallback is not None:
return value.decode(fallback, 'replace')
from werkzeug.exceptions import HTTPUnicodeError
raise HTTPUnicodeError(str(e))
def _iter_modules(path):
"""Iterate over all modules in a package."""
import os
import pkgutil
if hasattr(pkgutil, 'iter_modules'):
for importer, modname, ispkg in pkgutil.iter_modules(path):
yield modname, ispkg
return
from inspect import getmodulename
from pydoc import ispackage
found = set()
for path in path:
for filename in os.listdir(path):
p = os.path.join(path, filename)
modname = getmodulename(filename)
if modname and modname != '__init__':
if modname not in found:
found.add(modname)
yield modname, ispackage(modname)
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (int, long, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, (int, long, float)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _ExtendedMorsel(Morsel):
_reserved = {'httponly': 'HttpOnly'}
_reserved.update(Morsel._reserved)
def __init__(self, name=None, value=None):
Morsel.__init__(self)
if name is not None:
self.set(name, value, value)
def OutputString(self, attrs=None):
httponly = self.pop('httponly', False)
result = Morsel.OutputString(self, attrs).rstrip('\t ;')
if httponly:
result += '; HttpOnly'
return result
class _ExtendedCookie(SimpleCookie):
"""Form of the base cookie that doesn't raise a `CookieError` for
malformed keys. This has the advantage that broken cookies submitted
by nonstandard browsers don't cause the cookie to be empty.
"""
def _BaseCookie__set(self, key, real_value, coded_value):
morsel = self.get(key, _ExtendedMorsel())
try:
morsel.set(key, real_value, coded_value)
except CookieError:
pass
dict.__setitem__(self, key, morsel)
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(self, name, default=None, load_func=None, dump_func=None,
read_only=None, doc=None):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('read only property')
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('read only property')
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name
)
def _easteregg(app):
"""Like the name says. But who knows how it works?"""
gyver = '\n'.join([x + (77 - len(x)) * ' ' for x in '''
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t'''.decode('base64').decode('zlib').splitlines()])
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(('X-Powered-By', 'Werkzeug'))
return start_response(status, headers, exc_info)
if environ.get('QUERY_STRING') != 'macgybarchakku':
return app(environ, injecting_start_response)
injecting_start_response('200 OK', [('Content-Type', 'text/html')])
return ['''
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>''' % gyver]
return easteregged
| mit |
wiki2014/Learning-Summary | alps/cts/apps/CameraITS/tools/run_all_tests.py | 1 | 11812 | # Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import tempfile
import subprocess
import time
import sys
import textwrap
import its.device
from its.device import ItsSession
def main():
"""Run all the automated tests, saving intermediate files, and producing
a summary/report of the results.
Script should be run from the top-level CameraITS directory.
Command line Arguments:
camera: the camera(s) to be tested. Use comma to separate multiple
camera Ids. Ex: "camera=0,1" or "camera=1"
scenes: the test scene(s) to be executed. Use comma to separate multiple
scenes. Ex: "scenes=scene0,scene1" or "scenes=0,1,sensor_fusion"
(sceneX can be abbreviated by X where X is a integer)
chart: [Experimental] another android device served as test chart
display. When this argument presents, change of test scene will
be handled automatically. Note that this argument requires
special physical/hardware setup to work and may not work on
all android devices.
"""
SKIP_RET_CODE = 101
# Not yet mandated tests
NOT_YET_MANDATED = {
"scene0":[
"test_jitter"
],
"scene1":[
"test_ae_precapture_trigger",
"test_crop_region_raw",
"test_ev_compensation_advanced",
"test_ev_compensation_basic",
"test_yuv_plus_jpeg"
],
"scene2":[],
"scene3":[],
"scene4":[],
"scene5":[],
"sensor_fusion":[]
}
all_scenes = ["scene0", "scene1", "scene2", "scene3", "scene4", "scene5"]
auto_scenes = ["scene0", "scene1", "scene2", "scene3", "scene4"]
scene_req = {
"scene0" : None,
"scene1" : "A grey card covering at least the middle 30% of the scene",
"scene2" : "A picture containing human faces",
"scene3" : "A chart containing sharp edges like ISO 12233",
"scene4" : "A specific test page of a circle covering at least the "
"middle 50% of the scene. See CameraITS.pdf section 2.3.4 "
"for more details",
"scene5" : "Capture images with a diffuser attached to the camera. See "
"CameraITS.pdf section 2.3.4 for more details",
"sensor_fusion" : "Rotating checkboard pattern. See "
"sensor_fusion/SensorFusion.pdf for detailed "
"instructions. Note that this test will be skipped "
"on devices not supporting REALTIME camera timestamp."
"If that is the case, no scene setup is required and "
"you can just answer Y when being asked if the scene "
"is okay"
}
scene_extra_args = {
"scene5" : ["doAF=False"]
}
camera_ids = []
scenes = []
chart_host_id = None
for s in sys.argv[1:]:
if s[:7] == "camera=" and len(s) > 7:
camera_ids = s[7:].split(',')
elif s[:7] == "scenes=" and len(s) > 7:
scenes = s[7:].split(',')
elif s[:6] == 'chart=' and len(s) > 6:
chart_host_id = s[6:]
auto_scene_switch = chart_host_id is not None
# Run through all scenes if user does not supply one
possible_scenes = auto_scenes if auto_scene_switch else all_scenes
if not scenes:
scenes = possible_scenes
else:
# Validate user input scene names
valid_scenes = True
temp_scenes = []
for s in scenes:
if s in possible_scenes:
temp_scenes.append(s)
else:
try:
# Try replace "X" to "sceneX"
scene_num = int(s)
scene_str = "scene" + s
if scene_str not in possible_scenes:
valid_scenes = False
break
temp_scenes.append(scene_str)
except ValueError:
valid_scenes = False
break
if not valid_scenes:
print "Unknown scene specifiied:", s
assert(False)
scenes = temp_scenes
# Initialize test results
results = {}
result_key = ItsSession.RESULT_KEY
for s in all_scenes:
results[s] = {result_key: ItsSession.RESULT_NOT_EXECUTED}
# Make output directories to hold the generated files.
topdir = tempfile.mkdtemp()
print "Saving output files to:", topdir, "\n"
device_id = its.device.get_device_id()
device_id_arg = "device=" + device_id
print "Testing device " + device_id
# user doesn't specify camera id, run through all cameras
if not camera_ids:
camera_ids_path = os.path.join(topdir, "camera_ids.txt")
out_arg = "out=" + camera_ids_path
cmd = ['python',
os.path.join(os.getcwd(),"tools/get_camera_ids.py"), out_arg,
device_id_arg]
retcode = subprocess.call(cmd,cwd=topdir)
assert(retcode == 0)
with open(camera_ids_path, "r") as f:
for line in f:
camera_ids.append(line.replace('\n', ''))
print "Running ITS on camera: %s, scene %s" % (camera_ids, scenes)
if auto_scene_switch:
print 'Waking up chart screen: ', chart_host_id
screen_id_arg = ('screen=%s' % chart_host_id)
cmd = ['python', os.path.join(os.environ['CAMERA_ITS_TOP'], 'tools',
'wake_up_screen.py'), screen_id_arg]
retcode = subprocess.call(cmd)
assert retcode == 0
for camera_id in camera_ids:
# Loop capturing images until user confirm test scene is correct
camera_id_arg = "camera=" + camera_id
print "Preparing to run ITS on camera", camera_id
os.mkdir(os.path.join(topdir, camera_id))
for d in scenes:
os.mkdir(os.path.join(topdir, camera_id, d))
for scene in scenes:
tests = [(s[:-3],os.path.join("tests", scene, s))
for s in os.listdir(os.path.join("tests",scene))
if s[-3:] == ".py" and s[:4] == "test"]
tests.sort()
summary = "Cam" + camera_id + " " + scene + "\n"
numpass = 0
numskip = 0
num_not_mandated_fail = 0
numfail = 0
if scene_req[scene] != None:
out_path = os.path.join(topdir, camera_id, scene+".jpg")
out_arg = "out=" + out_path
if auto_scene_switch:
scene_arg = "scene=" + scene
cmd = ['python',
os.path.join(os.getcwd(), 'tools/load_scene.py'),
scene_arg, screen_id_arg]
else:
scene_arg = "scene=" + scene_req[scene]
extra_args = scene_extra_args.get(scene, [])
cmd = ['python',
os.path.join(os.getcwd(),"tools/validate_scene.py"),
camera_id_arg, out_arg,
scene_arg, device_id_arg] + extra_args
retcode = subprocess.call(cmd,cwd=topdir)
assert(retcode == 0)
print "Start running ITS on camera %s, %s" % (camera_id, scene)
# Run each test, capturing stdout and stderr.
for (testname,testpath) in tests:
if auto_scene_switch:
# Send an input event to keep the screen not dimmed.
# Since we are not using camera of chart screen, FOCUS event
# should does nothing but keep the screen from dimming.
# The "sleep after x minutes of inactivity" display setting
# determines how long this command can keep screen bright.
# Setting it to something like 30 minutes should be enough.
cmd = ('adb -s %s shell input keyevent FOCUS'
% chart_host_id)
subprocess.call(cmd.split())
cmd = ['python', os.path.join(os.getcwd(),testpath)] + \
sys.argv[1:] + [camera_id_arg]
outdir = os.path.join(topdir,camera_id,scene)
outpath = os.path.join(outdir,testname+"_stdout.txt")
errpath = os.path.join(outdir,testname+"_stderr.txt")
t0 = time.time()
with open(outpath,"w") as fout, open(errpath,"w") as ferr:
retcode = subprocess.call(
cmd,stderr=ferr,stdout=fout,cwd=outdir)
t1 = time.time()
test_failed = False
if retcode == 0:
retstr = "PASS "
numpass += 1
elif retcode == SKIP_RET_CODE:
retstr = "SKIP "
numskip += 1
elif retcode != 0 and testname in NOT_YET_MANDATED[scene]:
retstr = "FAIL*"
num_not_mandated_fail += 1
else:
retstr = "FAIL "
numfail += 1
test_failed = True
msg = "%s %s/%s [%.1fs]" % (retstr, scene, testname, t1-t0)
print msg
msg_short = "%s %s [%.1fs]" % (retstr, testname, t1-t0)
if test_failed:
summary += msg_short + "\n"
if numskip > 0:
skipstr = ", %d test%s skipped" % (
numskip, "s" if numskip > 1 else "")
else:
skipstr = ""
test_result = "\n%d / %d tests passed (%.1f%%)%s" % (
numpass + num_not_mandated_fail, len(tests) - numskip,
100.0 * float(numpass + num_not_mandated_fail) /
(len(tests) - numskip)
if len(tests) != numskip else 100.0,
skipstr)
print test_result
if num_not_mandated_fail > 0:
msg = "(*) tests are not yet mandated"
print msg
summary_path = os.path.join(topdir, camera_id, scene, "summary.txt")
with open(summary_path, "w") as f:
f.write(summary)
passed = numfail == 0
results[scene][result_key] = (ItsSession.RESULT_PASS if passed
else ItsSession.RESULT_FAIL)
results[scene][ItsSession.SUMMARY_KEY] = summary_path
print "Reporting ITS result to CtsVerifier"
its.device.report_result(device_id, camera_id, results)
if auto_scene_switch:
print 'Shutting down chart screen: ', chart_host_id
screen_id_arg = ('screen=%s' % chart_host_id)
cmd = ['python', os.path.join(os.environ['CAMERA_ITS_TOP'], 'tools',
'turn_off_screen.py'), screen_id_arg]
retcode = subprocess.call(cmd)
assert retcode == 0
print "ITS tests finished. Please go back to CtsVerifier and proceed"
if __name__ == '__main__':
main()
| gpl-3.0 |
0x0d/typhon-vx | kraken/TableGeneration/MultiFileWriter.py | 6 | 2537 | import sys
import string
import struct
import os
class MultiFileWriter:
def __init__(self, path):
self.path = path
self.files = [None]*256
self.buffers = [[]]*256
self.open = False
def Open(self, name):
if self.open:
self.Close()
outdir = self.path+"/"+name
if not os.path.isdir(outdir):
os.makedirs(outdir)
for i in range(256):
fname = outdir +"/"+("%02x"%(i,))+".tbl"
f = open( fname, "a" )
if f == None:
print "Could not open file ", fname
sys.exit(-1)
size = f.tell()
if size%11:
# Recover from partially written chain
size = size - (size%11)
f.truncate(size)
self.files[i] = f
for i in range(256):
self.buffers[i] = []
self.open = True
def Close(self):
if not self.open:
return
for i in range(256):
outdata = string.join(self.buffers[i],"")
self.files[i].write(outdata)
self.files[i].close()
self.files[i]=0
self.open = False
def Write(self, endpoint, index):
fi = endpoint >> 56
dat = struct.pack("<Q", endpoint )[1:-1] + \
struct.pack("<Q", index )[:-3]
self.buffers[fi].append(dat)
# print self.buffers[fi]
if len(self.buffers[fi])>256:
#Flush write buffer
outdata = string.join(self.buffers[fi],"")
self.files[fi].write(outdata)
self.buffers[fi] = []
def FindHighIndex(self, name):
if self.open:
return -1
high = 0
for i in range(256):
fname = self.path+"/"+name+"/"+("%02x"%(i,))+".tbl"
f = open( fname, "r" )
if f == None:
print "Could not open file ", fname
sys.exit(-1)
f.seek(0, os.SEEK_END)
size = f.tell()
# Recover from partially written chain
size = size - (size%11)
pos = size - 1000*11
if pos<0:
pos = 0
f.seek(pos, os.SEEK_SET )
for j in range((size-pos)/11):
dat1 = f.read(11)
dat = dat1[6:]+"\000\000\000"
val = struct.unpack("<Q", dat)[0]
if val > high:
high = val
f.close()
return high
| gpl-3.0 |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/pyqtgraph/frozenSupport.py | 52 | 1830 | ## Definitions helpful in frozen environments (eg py2exe)
import os, sys, zipfile
def listdir(path):
"""Replacement for os.listdir that works in frozen environments."""
if not hasattr(sys, 'frozen'):
return os.listdir(path)
(zipPath, archivePath) = splitZip(path)
if archivePath is None:
return os.listdir(path)
with zipfile.ZipFile(zipPath, "r") as zipobj:
contents = zipobj.namelist()
results = set()
for name in contents:
# components in zip archive paths are always separated by forward slash
if name.startswith(archivePath) and len(name) > len(archivePath):
name = name[len(archivePath):].split('/')[0]
results.add(name)
return list(results)
def isdir(path):
"""Replacement for os.path.isdir that works in frozen environments."""
if not hasattr(sys, 'frozen'):
return os.path.isdir(path)
(zipPath, archivePath) = splitZip(path)
if archivePath is None:
return os.path.isdir(path)
with zipfile.ZipFile(zipPath, "r") as zipobj:
contents = zipobj.namelist()
archivePath = archivePath.rstrip('/') + '/' ## make sure there's exactly one '/' at the end
for c in contents:
if c.startswith(archivePath):
return True
return False
def splitZip(path):
"""Splits a path containing a zip file into (zipfile, subpath).
If there is no zip file, returns (path, None)"""
components = os.path.normpath(path).split(os.sep)
for index, component in enumerate(components):
if component.endswith('.zip'):
zipPath = os.sep.join(components[0:index+1])
archivePath = ''.join([x+'/' for x in components[index+1:]])
return (zipPath, archivePath)
else:
return (path, None)
| gpl-3.0 |
jensengroup/propka-3.1 | propka/version.py | 1 | 12070 | """
Version-based configuration
===========================
Contains version-specific methods and parameters.
TODO - this module unnecessarily confuses the code. Can we eliminate it?
"""
from propka.lib import info
from propka.hydrogens import setup_bonding_and_protonation, setup_bonding
from propka.hydrogens import setup_bonding_and_protonation_30_style
from propka.energy import radial_volume_desolvation, calculate_pair_weight
from propka.energy import hydrogen_bond_energy, hydrogen_bond_interaction
from propka.energy import electrostatic_interaction, check_coulomb_pair
from propka.energy import coulomb_energy, check_exceptions
from propka.energy import backbone_reorganization
class Version:
"""Store version-specific methods and parameters."""
def __init__(self, parameters):
self.parameters = parameters
self.desolvation_model = self.empty_function
self.weight_pair_method = self.empty_function
self.hydrogen_bond_interaction_model = self.empty_function
self.sidechain_interaction_model = self.empty_function
self.electrostatic_interaction_model = self.empty_function
self.coulomb_interaction_model = self.empty_function
self.check_coulomb_pair_method = self.empty_function
self.backbone_reorganisation_method = self.empty_function
self.exception_check_method = self.empty_function
self.molecular_preparation_method = self.empty_function
self.prepare_bonds = self.empty_function
@staticmethod
def empty_function(*args):
"""Placeholder function so we don't use uninitialized variables.
Args:
args: whatever arguments would have been passed to the function
Raises:
NotImplementedError
"""
err = "Called an empty Version function with args {0:s}".format(args)
raise NotImplementedError(err)
def calculate_desolvation(self, group):
"""Calculate desolvation energy using assigned model."""
return self.desolvation_model(self.parameters, group)
def calculate_pair_weight(self, num_volume1, num_volume2):
"""Calculate pair weight using assigned model."""
return self.weight_pair_method(
self.parameters, num_volume1, num_volume2)
def hydrogen_bond_interaction(self, group1, group2):
"""Calculate H-bond energy using assigned model."""
return self.hydrogen_bond_interaction_model(group1, group2, self)
def calculate_side_chain_energy(self, distance, dpka_max, cutoff, _,
f_angle):
"""Calculate sidechain energy using assigned model."""
return self.sidechain_interaction_model(
distance, dpka_max, cutoff, f_angle)
def electrostatic_interaction(self, group1, group2, distance):
"""Calculate electrostatic energy using assigned model."""
return self.electrostatic_interaction_model(
group1, group2, distance, self)
def calculate_coulomb_energy(self, distance, weight):
"""Calculate Coulomb energy using assigned model."""
return self.coulomb_interaction_model(
distance, weight, self.parameters)
def check_coulomb_pair(self, group1, group2, distance):
"""Check Coulomb pair using assigned model."""
return self.check_coulomb_pair_method(
self.parameters, group1, group2, distance)
def calculate_backbone_reorganization(self, conformation):
"""Calculate backbone reorganization using assigned model."""
return self.backbone_reorganisation_method(
self.parameters, conformation)
def check_exceptions(self, group1, group2):
"""Calculate exceptions using assigned model."""
return self.exception_check_method(self, group1, group2)
def setup_bonding_and_protonation(self, molecular_container):
"""Setup bonding and protonation using assigned model."""
return self.molecular_preparation_method(molecular_container)
def setup_bonding(self, molecular_container):
"""Setup bonding using assigned model."""
return self.prepare_bonds(self.parameters, molecular_container)
class VersionA(Version):
"""TODO - figure out what this is."""
def __init__(self, parameters):
"""Initialize object with parameters."""
# set the calculation rutines used in this version
super().__init__(parameters)
self.molecular_preparation_method = setup_bonding_and_protonation
self.prepare_bonds = setup_bonding
self.desolvation_model = radial_volume_desolvation
self.weight_pair_method = calculate_pair_weight
self.sidechain_interaction_model = hydrogen_bond_energy
self.hydrogen_bond_interaction_model = hydrogen_bond_interaction
self.electrostatic_interaction_model = electrostatic_interaction
self.check_coulomb_pair_method = check_coulomb_pair
self.coulomb_interaction_model = coulomb_energy
self.backbone_interaction_model = hydrogen_bond_energy
self.backbone_reorganisation_method = backbone_reorganization
self.exception_check_method = check_exceptions
def get_hydrogen_bond_parameters(self, atom1, atom2):
"""Get hydrogen bond parameters for two atoms.
Args:
atom1: first atom
atom2: second atom
Returns:
[dpka_max, cutoff]
"""
dpka_max = self.parameters.sidechain_interaction
cutoff = self.parameters.sidechain_cutoffs.get_value(
atom1.group_type, atom2.group_type)
return [dpka_max, cutoff]
def get_backbone_hydrogen_bond_parameters(self, backbone_atom, atom):
"""Get hydrogen bond parameters between backbone atom and other atom.
Args:
backbone_atom: backbone atom
atom: other atom
Returns
[v, [c1, c3]] TODO - figure out what this is
"""
if backbone_atom.group_type == 'BBC':
if atom.group_type in self.parameters.backbone_CO_hydrogen_bond.keys():
[v, c1, c2] = self.parameters.backbone_CO_hydrogen_bond[
atom.group_type]
return [v, [c1, c2]]
if backbone_atom.group_type == 'BBN':
if atom.group_type in self.parameters.backbone_NH_hydrogen_bond.keys():
[v, c1, c2] = self.parameters.backbone_NH_hydrogen_bond[
atom.group_type]
return [v, [c1, c2]]
return None
class SimpleHB(VersionA):
"""A simple hydrogen bond version."""
def __init__(self, parameters):
"""Initialize object with parameters."""
# set the calculation rutines used in this version
super().__init__(parameters)
info('Using simple hb model')
def get_hydrogen_bond_parameters(self, atom1, atom2):
"""Get hydrogen bond parameters for two atoms.
Args:
atom1: first atom
atom2: second atom
Returns:
[dpka_max, cutoff]
"""
return self.parameters.hydrogen_bonds.get_value(
atom1.element, atom2.element)
def get_backbone_hydrogen_bond_parameters(self, backbone_atom, atom):
"""Get hydrogen bond parameters between backbone atom and other atom.
Args:
backbone_atom: backbone atom
atom: other atom
Returns
[v, [c1, c3]] TODO - figure out what this is
"""
return self.parameters.hydrogen_bonds.get_value(
backbone_atom.element, atom.element)
class ElementBasedLigandInteractions(VersionA):
"""TODO - figure out what this is."""
def __init__(self, parameters):
"""Initialize object with parameters."""
# set the calculation rutines used in this version
super().__init__(parameters)
info('Using detailed SC model!')
return
def get_hydrogen_bond_parameters(self, atom1, atom2):
"""Get hydrogen bond parameters for two atoms.
Args:
atom1: first atom
atom2: second atom
Returns:
[dpka_max, cutoff]
"""
if 'hetatm' not in [atom1.type, atom2.type]:
# this is a protein-protein interaction
dpka_max = self.parameters.sidechain_interaction.get_value(
atom1.group_type, atom2.group_type)
cutoff = self.parameters.sidechain_cutoffs.get_value(
atom1.group_type, atom2.group_type)
return [dpka_max, cutoff]
# at least one ligand atom is involved in this interaction
# make sure that we are using the heavy atoms for finding paramters
elements = []
for atom in [atom1, atom2]:
if atom.element == 'H':
elements.append(atom.bonded_atoms[0].element)
else:
elements.append(atom.element)
return self.parameters.hydrogen_bonds.get_value(
elements[0], elements[1])
def get_backbone_hydrogen_bond_parameters(self, backbone_atom, atom):
"""Get hydrogen bond parameters between backbone atom and other atom.
Args:
backbone_atom: backbone atom
atom: other atom
Returns
[v, [c1, c3]] TODO - figure out what this is
"""
if atom.type == 'atom':
# this is a backbone-protein interaction
if (backbone_atom.group_type == 'BBC'
and atom.group_type
in self.parameters.backbone_CO_hydrogen_bond.keys()):
[v, c1, c2] = self.parameters.backbone_CO_hydrogen_bond[
atom.group_type]
return [v, [c1, c2]]
if (backbone_atom.group_type == 'BBN'
and atom.group_type
in self.parameters.backbone_NH_hydrogen_bond.keys()):
[v, c1, c2] = self.parameters.backbone_NH_hydrogen_bond[
atom.group_type]
return [v, [c1, c2]]
else:
# this is a backbone-ligand interaction
# make sure that we are using the heavy atoms for finding paramters
elements = []
for atom2 in [backbone_atom, atom]:
if atom2.element == 'H':
elements.append(atom2.bonded_atoms[0].element)
else:
elements.append(atom2.element)
res = self.parameters.hydrogen_bonds.get_value(
elements[0], elements[1])
if not res:
info(
'Could not determine backbone interaction parameters for:',
backbone_atom, atom)
return None
return None
class Propka30(Version):
"""Version class for PROPKA 3.0."""
def __init__(self, parameters):
"""Initialize object with parameters."""
# set the calculation routines used in this version
super().__init__(parameters)
self.molecular_preparation_method = (
setup_bonding_and_protonation_30_style)
self.desolvation_model = radial_volume_desolvation
self.weight_pair_method = calculate_pair_weight
self.sidechain_interaction_model = hydrogen_bond_energy
self.check_coulomb_pair_method = check_coulomb_pair
self.coulomb_interaction_model = coulomb_energy
self.backbone_reorganisation_method = backbone_reorganization
self.exception_check_method = check_exceptions
def get_hydrogen_bond_parameters(self, atom1, atom2):
"""Get hydrogen bond parameters for two atoms.
Args:
atom1: first atom
atom2: second atom
Returns:
[dpka_max, cutoff]
"""
dpka_max = self.parameters.sidechain_interaction.get_value(
atom1.group_type, atom2.group_type)
cutoff = self.parameters.sidechain_cutoffs.get_value(
atom1.group_type, atom2.group_type)
return [dpka_max, cutoff]
| lgpl-2.1 |
jessekl/flixr | venv/lib/python2.7/site-packages/alembic/ddl/mysql.py | 2 | 10606 | from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types as sqltypes
from sqlalchemy import schema
from ..compat import string_types
from .. import util
from .impl import DefaultImpl
from .base import ColumnNullable, ColumnName, ColumnDefault, \
ColumnType, AlterColumn, format_column_name, \
format_server_default
from .base import alter_table
from ..autogenerate import compare
class MySQLImpl(DefaultImpl):
__dialect__ = 'mysql'
transactional_ddl = False
def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
type_=None,
schema=None,
autoincrement=None,
existing_type=None,
existing_server_default=None,
existing_nullable=None,
existing_autoincrement=None
):
if name is not None:
self._exec(
MySQLChangeColumn(
table_name, column_name,
schema=schema,
newname=name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif nullable is not None or \
type_ is not None or \
autoincrement is not None:
self._exec(
MySQLModifyColumn(
table_name, column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default,
schema=schema,
)
)
def compare_server_default(self, inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default):
# partially a workaround for SQLAlchemy issue #3023; if the
# column were created without "NOT NULL", MySQL may have added
# an implicit default of '0' which we need to skip
if metadata_column.type._type_affinity is sqltypes.Integer and \
inspector_column.primary_key and \
not inspector_column.autoincrement and \
not rendered_metadata_default and \
rendered_inspector_default == "'0'":
return False
else:
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(self, conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
# TODO: if SQLA 1.0, make use of "duplicates_index"
# metadata
removed = set()
for idx in list(conn_indexes):
if idx.unique:
continue
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily.
# the name of the index may be the column name or it may
# be the name of the FK constraint.
for col in idx.columns:
if idx.name == col.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
for fk in col.foreign_keys:
if fk.name == idx.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
if idx.name in removed:
break
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
# then dedupe unique indexes vs. constraints, since MySQL
# doesn't really have unique constraints as a separate construct.
# but look in the metadata and try to maintain constructs
# that already seem to be defined one way or the other
# on that side. See #276
metadata_uq_names = set([
cons.name for cons in metadata_unique_constraints
if cons.name is not None])
unnamed_metadata_uqs = set([
compare._uq_constraint_sig(cons).sig
for cons in metadata_unique_constraints
if cons.name is None
])
metadata_ix_names = set([
cons.name for cons in metadata_indexes if cons.unique])
conn_uq_names = dict(
(cons.name, cons) for cons in conn_unique_constraints
)
conn_ix_names = dict(
(cons.name, cons) for cons in conn_indexes if cons.unique
)
for overlap in set(conn_uq_names).intersection(conn_ix_names):
if overlap not in metadata_uq_names:
if compare._uq_constraint_sig(conn_uq_names[overlap]).sig \
not in unnamed_metadata_uqs:
conn_unique_constraints.discard(conn_uq_names[overlap])
elif overlap not in metadata_ix_names:
conn_indexes.discard(conn_ix_names[overlap])
class MySQLAlterDefault(AlterColumn):
def __init__(self, name, column_name, default, schema=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(self, name, column_name, schema=None,
newname=None,
type_=None,
nullable=None,
default=False,
autoincrement=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, 'mysql')
@compiles(ColumnName, 'mysql')
@compiles(ColumnDefault, 'mysql')
@compiles(ColumnType, 'mysql')
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql")
def _mysql_alter_default(element, compiler, **kw):
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
)
@compiles(MySQLModifyColumn, "mysql")
def _mysql_modify_column(element, compiler, **kw):
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
@compiles(MySQLChangeColumn, "mysql")
def _mysql_change_column(element, compiler, **kw):
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
def _render_value(compiler, expr):
if isinstance(expr, string_types):
return "'%s'" % expr
else:
return compiler.sql_compiler.process(expr)
def _mysql_colspec(compiler, nullable, server_default, type_,
autoincrement):
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL"
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % _render_value(compiler, server_default)
return spec
@compiles(schema.DropConstraint, "mysql")
def _mysql_drop_constraint(element, compiler, **kw):
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(constraint, (schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint)
):
return compiler.visit_drop_constraint(element, **kw)
elif isinstance(constraint, schema.CheckConstraint):
raise NotImplementedError(
"MySQL does not support CHECK constraints.")
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type")
| mit |
gsnedders/presto-testo | wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/lib/python/autobahn/case/case9_4_4.py | 14 | 1255 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_4_1 import Case9_4_1
class Case9_4_4(Case9_4_1):
DESCRIPTION = """Send fragmented binary message message with message payload of length 4 * 2**20 (4M). Sent out in fragments of 4k."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent)."""
def init(self):
self.DATALEN = 4 * 2**20
self.FRAGSIZE = 4 * 2**10
self.PAYLOAD = "*" * self.DATALEN
self.WAITSECS = 100
self.reportTime = True
| bsd-3-clause |
Newman101/scipy | scipy/weave/examples/dict_sort.py | 100 | 3235 | # Borrowed from Alex Martelli's sort from Python cookbook using inlines
# 2x over fastest Python version -- again, maybe not worth the effort...
# Then again, 2x is 2x...
#
# C:\home\eric\wrk\scipy\weave\examples>python dict_sort.py
# Dict sort of 1000 items for 300 iterations:
# speed in python: 0.250999927521
# [0, 1, 2, 3, 4]
# speed in c: 0.110000014305
# speed up: 2.28
# [0, 1, 2, 3, 4]
# speed in c (scxx): 0.200000047684
# speed up: 1.25
# [0, 1, 2, 3, 4]
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import inline_tools
def c_sort(adict):
assert(type(adict) is dict)
code = """
#line 24 "dict_sort.py"
py::list keys = adict.keys();
py::list items(keys.length());
keys.sort();
PyObject* item = NULL;
int N = keys.length();
for(int i = 0; i < N;i++)
{
item = PyList_GetItem(keys,i);
item = PyDict_GetItem(adict,item);
Py_XINCREF(item);
PyList_SetItem(items,i,item);
}
return_val = items;
"""
return inline_tools.inline(code,['adict'])
def c_sort2(adict):
assert(type(adict) is dict)
code = """
#line 44 "dict_sort.py"
py::list keys = adict.keys();
py::list items(keys.len());
keys.sort();
int N = keys.length();
for(int i = 0; i < N;i++)
{
items[i] = adict[int( keys[i] )];
}
return_val = items;
"""
return inline_tools.inline(code,['adict'],verbose=1)
# (IMHO) the simplest approach:
def sortedDictValues1(adict):
items = adict.items()
items.sort()
return [value for key, value in items]
# an alternative implementation, which
# happens to run a bit faster for large
# dictionaries on my machine:
def sortedDictValues2(adict):
keys = adict.keys()
keys.sort()
return [adict[key] for key in keys]
# a further slight speed-up on my box
# is to map a bound-method:
def sortedDictValues3(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
import time
def sort_compare(a,n):
print('Dict sort of %d items for %d iterations:' % (len(a),n))
t1 = time.time()
for i in range(n):
b = sortedDictValues3(a)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1))
print(b[:5])
b = c_sort(a)
t1 = time.time()
for i in range(n):
b = c_sort(a)
t2 = time.time()
print(' speed in c (Python API):',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
print(b[:5])
b = c_sort2(a)
t1 = time.time()
for i in range(n):
b = c_sort2(a)
t2 = time.time()
print(' speed in c (scxx):',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
print(b[:5])
def setup_dict(m):
" does insertion order matter?"
import random
a = range(m)
d = {}
for i in range(m):
key = random.choice(a)
a.remove(key)
d[key] = key
return d
if __name__ == "__main__":
m = 1000
a = setup_dict(m)
n = 3000
sort_compare(a,n)
| bsd-3-clause |
timokoola/finnkinotxt | botocore/translate.py | 13 | 21988 | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Translate the raw json files into python specific descriptions."""
import os
import re
from copy import deepcopy
import jmespath
from botocore.compat import OrderedDict, json
from botocore.utils import merge_dicts
from botocore import xform_name
class ModelFiles(object):
"""Container object to hold all the various parsed json files.
Includes:
* The json service description.
* The _retry.json file.
* The <service>.extra.json enhancements file.
* The name of the service.
"""
def __init__(self, model, retry, enhancements, name=''):
self.model = model
self.retry = retry
self.enhancements = enhancements
self.name = name
def load_model_files(args):
model = json.load(open(args.modelfile),
object_pairs_hook=OrderedDict)
retry = json.load(open(args.retry_file),
object_pairs_hook=OrderedDict)
enhancements = _load_enhancements_file(args.enhancements_file)
service_name = os.path.splitext(os.path.basename(args.modelfile))[0]
return ModelFiles(model, retry, enhancements, name=service_name)
def _load_enhancements_file(file_path):
if not os.path.isfile(file_path):
return {}
else:
return json.load(open(file_path),
object_pairs_hook=OrderedDict)
def translate(model):
new_model = deepcopy(model.model)
new_model.update(model.enhancements.get('extra', {}))
try:
del new_model['pagination']
except KeyError:
pass
handle_op_renames(new_model, model.enhancements)
handle_remove_deprecated_params(new_model, model.enhancements)
handle_remove_deprecated_operations(new_model, model.enhancements)
handle_filter_documentation(new_model, model.enhancements)
handle_rename_params(new_model, model.enhancements)
add_pagination_configs(
new_model,
model.enhancements.get('pagination', {}))
add_waiter_configs(
new_model,
model.enhancements.get('waiters', {}))
# Merge in any per operation overrides defined in the .extras.json file.
merge_dicts(new_model['operations'],
model.enhancements.get('operations', {}))
add_retry_configs(
new_model, model.retry.get('retry', {}),
definitions=model.retry.get('definitions', {}))
return new_model
def handle_op_renames(new_model, enhancements):
# This allows for operations to be renamed. The only
# implemented transformation is removing part of the operation name
# (because that's all we currently need.)
remove = enhancements.get('transformations', {}).get(
'operation-name', {}).get('remove')
if remove is not None:
# We're going to recreate the dictionary because we want to preserve
# the order. This is the only option we have unless we have our own
# custom OrderedDict.
remove_regex = re.compile(remove)
operations = new_model['operations']
new_operation = OrderedDict()
for key in operations:
new_key = remove_regex.sub('', key)
new_operation[new_key] = operations[key]
new_model['operations'] = new_operation
def handle_remove_deprecated_operations(new_model, enhancements):
# This removes any operation whose documentation string contains
# the specified phrase that marks a deprecated parameter.
keyword = enhancements.get('transformations', {}).get(
'remove-deprecated-operations', {}).get('deprecated_keyword')
remove = []
if keyword is not None:
operations = new_model['operations']
for op_name in operations:
operation = operations[op_name]
if operation:
docs = operation['documentation']
if docs and docs.find(keyword) >= 0:
remove.append(op_name)
for op in remove:
del new_model['operations'][op]
def handle_remove_deprecated_params(new_model, enhancements):
# This removes any parameter whose documentation string contains
# the specified phrase that marks a deprecated parameter.
keyword = enhancements.get('transformations', {}).get(
'remove-deprecated-params', {}).get('deprecated_keyword')
if keyword is not None:
operations = new_model['operations']
for op_name in operations:
operation = operations[op_name]
params = operation.get('input', {}).get('members')
if params:
new_params = OrderedDict()
for param_name in params:
param = params[param_name]
docs = param['documentation']
if docs and docs.find(keyword) >= 0:
continue
new_params[param_name] = param
operation['input']['members'] = new_params
def _filter_param_doc(param, replacement, regex):
# Recurse into complex parameters looking for documentation.
doc = param.get('documentation')
if doc:
param['documentation'] = regex.sub(replacement, doc)
if param['type'] == 'structure':
for member_name in param['members']:
member = param['members'][member_name]
_filter_param_doc(member, replacement, regex)
if param['type'] == 'map':
_filter_param_doc(param['keys'], replacement, regex)
_filter_param_doc(param['members'], replacement, regex)
elif param['type'] == 'list':
_filter_param_doc(param['members'], replacement, regex)
def handle_filter_documentation(new_model, enhancements):
# This provides a way to filter undesireable content (e.g. CDATA)
# from documentation strings.
doc_filter = enhancements.get('transformations', {}).get(
'filter-documentation', {}).get('filter')
if doc_filter is not None:
filter_regex = re.compile(doc_filter.get('regex', ''), re.DOTALL)
replacement = doc_filter.get('replacement')
operations = new_model['operations']
for op_name in operations:
operation = operations[op_name]
doc = operation.get('documentation')
if doc:
new_doc = filter_regex.sub(replacement, doc)
operation['documentation'] = new_doc
params = operation.get('input', {}).get('members')
if params:
for param_name in params:
param = params[param_name]
_filter_param_doc(param, replacement, filter_regex)
def handle_rename_params(new_model, enhancements):
renames = enhancements.get('transformations', {}).get(
'renames', {})
if not renames:
return
# This is *extremely* specific to botocore's translations, but
# we support a restricted set of argument renames based on a
# jmespath expression.
for expression, new_value in renames.items():
# First we take everything up until the last dot.
parent_expression, key = expression.rsplit('.', 1)
matched = jmespath.search(parent_expression, new_model['operations'])
current = matched[key]
del matched[key]
matched[new_value] = current
def resembles_jmespath_exp(value):
# For now, we'll do a naive check.
if '.' in value or '[' in value:
return True
return False
def add_pagination_configs(new_model, pagination):
# Adding in pagination configs means copying the config to a top level
# 'pagination' key in the new model, and it also means adding the
# pagination config to each individual operation.
# Also, the input_token needs to be transformed to the python specific
# name, so we're adding a py_input_token (e.g. NextToken -> next_token).
if pagination:
new_model['pagination'] = pagination
for name in pagination:
config = pagination[name]
_check_known_pagination_keys(config)
if 'py_input_token' not in config:
_add_py_input_token(config)
_validate_result_key_exists(config)
_validate_referenced_operation_exists(new_model, name)
operation = new_model['operations'][name]
_validate_operation_has_output(operation, name)
_check_input_keys_match(config, operation)
_check_output_keys_match(config, operation,
new_model.get('endpoint_prefix', ''))
operation['pagination'] = config.copy()
def _validate_operation_has_output(operation, name):
if not operation['output']:
raise ValueError("Trying to add pagination config for an "
"operation with no output members: %s" % name)
def _validate_referenced_operation_exists(new_model, name):
if name not in new_model['operations']:
raise ValueError("Trying to add pagination config for non "
"existent operation: %s" % name)
def _validate_result_key_exists(config):
# result_key must be defined.
if 'result_key' not in config:
raise ValueError("Required key 'result_key' is missing from "
"from pagination config: %s" % config)
def _add_py_input_token(config):
input_token = config['input_token']
if isinstance(input_token, list):
py_input_token = []
for token in input_token:
py_input_token.append(xform_name(token))
config['py_input_token'] = py_input_token
else:
config['py_input_token'] = xform_name(input_token)
def add_waiter_configs(new_model, waiters):
if waiters:
denormalized = denormalize_waiters(waiters)
# Before adding it to the new model, we need to verify the
# final denormalized model.
for value in denormalized.values():
if value['operation'] not in new_model['operations']:
raise ValueError()
new_model['waiters'] = denormalized
def denormalize_waiters(waiters):
# The waiter configuration is normalized to avoid duplication.
# You can inherit defaults, and extend from other definitions.
# We're going to denormalize this so that the implementation for
# consuming waiters is simple.
default = waiters.get('__default__', {})
new_waiters = {}
for key, value in waiters.items():
if key.startswith('__'):
# Keys that start with '__' are considered abstract/internal
# and are only used for inheritance. Because we're going
# to denormalize the configs and perform all the lookups
# during this translation process, the abstract/internal
# configs don't need to make it into the final translated
# config so we can just skip these.
continue
new_waiters[key] = denormalize_single_waiter(value, default, waiters)
return new_waiters
def denormalize_single_waiter(value, default, waiters):
"""Denormalize a single waiter config.
:param value: The dictionary of a single waiter config, e.g.
the ``InstanceRunning`` or ``TableExists`` config. This
is the config we're going to denormalize.
:param default: The ``__default__`` (if any) configuration.
This is needed to resolve the lookup process.
:param waiters: The full configuration of the waiters.
This is needed if we need to look up at parent class that the
current config extends.
:return: The denormalized config.
:rtype: dict
"""
# First we need to resolve all the keys based on the inheritance
# hierarchy. The lookup process is:
# The most bottom/leaf class is ``value``. From there we need
# to look up anything it inherits from (denoted via the ``extends``
# key). We need to perform this process recursively until we hit
# a config that has no ``extends`` key.
# And finally if we haven't found our value yet, we check in the
# ``__default__`` key.
# So the first thing we need to do is build the lookup chain that
# starts with ``value`` and ends with ``__default__``.
lookup_chain = [value]
current = value
while True:
if 'extends' not in current:
break
current = waiters[current.get('extends')]
lookup_chain.append(current)
lookup_chain.append(default)
new_waiter = {}
# Now that we have this lookup chain we can build the entire set
# of values by starting at the most parent class and walking down
# to the children. At each step the child is merged onto the parent's
# config items. This is the desired behavior as a child's values
# overrides its parents. This is what the ``reversed(...)`` call
# is for.
for element in reversed(lookup_chain):
new_waiter.update(element)
# We don't care about 'extends' so we can safely remove that key.
new_waiter.pop('extends', {})
# Now we need to resolve the success/failure values. We
# want to completely remove the acceptor types.
# The logic here is that if there is no success/failure_* variable
# defined, it inherits this value from the matching acceptor_* variable.
new_waiter['success_type'] = new_waiter.get(
'success_type', new_waiter.get('acceptor_type'))
new_waiter['success_path'] = new_waiter.get(
'success_path', new_waiter.get('acceptor_path'))
new_waiter['success_value'] = new_waiter.get(
'success_value', new_waiter.get('acceptor_value'))
new_waiter['failure_type'] = new_waiter.get(
'failure_type', new_waiter.get('acceptor_type'))
new_waiter['failure_path'] = new_waiter.get(
'failure_path', new_waiter.get('acceptor_path'))
new_waiter['failure_value'] = new_waiter.get(
'failure_value', new_waiter.get('acceptor_value'))
# We can remove acceptor_* vars because they're only used for lookups
# and we've already performed this step in the lines above.
new_waiter.pop('acceptor_type', '')
new_waiter.pop('acceptor_path', '')
new_waiter.pop('acceptor_value', '')
# Remove any keys with a None value.
for key in list(new_waiter.keys()):
if new_waiter[key] is None:
del new_waiter[key]
# Check required keys.
for required in ['operation', 'success_type']:
if required not in new_waiter:
raise ValueError('Missing required waiter configuration '
'value "%s": %s' % (required, new_waiter))
if new_waiter.get(required) is None:
raise ValueError('Required waiter configuration '
'value cannot be None "%s": %s' %
(required, new_waiter))
# Finally, success/failure values can be a scalar or a list. We're going
# to just always make them a list.
if 'success_value' in new_waiter and not \
isinstance(new_waiter['success_value'], list):
new_waiter['success_value'] = [new_waiter['success_value']]
if 'failure_value' in new_waiter and not \
isinstance(new_waiter['failure_value'], list):
new_waiter['failure_value'] = [new_waiter['failure_value']]
_transform_waiter(new_waiter)
return new_waiter
def _transform_waiter(new_waiter):
# This transforms the waiters into a format that's slightly
# easier to consume.
if 'success_type' in new_waiter:
success = {'type': new_waiter.pop('success_type')}
if 'success_path' in new_waiter:
success['path'] = new_waiter.pop('success_path')
if 'success_value' in new_waiter:
success['value'] = new_waiter.pop('success_value')
new_waiter['success'] = success
if 'failure_type' in new_waiter:
failure = {'type': new_waiter.pop('failure_type')}
if 'failure_path' in new_waiter:
failure['path'] = new_waiter.pop('failure_path')
if 'failure_value' in new_waiter:
failure['value'] = new_waiter.pop('failure_value')
new_waiter['failure'] = failure
def _check_known_pagination_keys(config):
# Verify that the pagination config only has keys we expect to see.
expected = set(['input_token', 'py_input_token', 'output_token',
'result_key', 'limit_key', 'more_results',
'non_aggregate_keys'])
for key in config:
if key not in expected:
raise ValueError("Unknown key in pagination config: %s" % key)
def _check_output_keys_match(config, operation, service_name):
output_members = list(operation['output']['members'])
jmespath_seen = False
for output_key in _get_all_page_output_keys(config):
if resembles_jmespath_exp(output_key):
# We don't validate jmespath expressions for now.
jmespath_seen = True
continue
if output_key not in output_members:
raise ValueError("Key %r is not an output member: %s" %
(output_key,
output_members))
output_members.remove(output_key)
# Some services echo the input parameters in the response
# output. We should not trigger a validation error
# if those params are still not accounted for.
for input_name in operation['input']['members']:
if input_name in output_members:
output_members.remove(input_name)
if not jmespath_seen and output_members:
# Because we can't validate jmespath expressions yet,
# we can't say for user if output_members actually has
# remaining keys or not.
if service_name == 's3' and output_members == ['Name']:
# The S3 model uses 'Name' for the output key, which
# actually maps to the 'Bucket' input param so we don't
# need to validate this output member. This is the only
# model that has this, so we can just special case this
# for now.
return
raise ValueError("Output members still exist for operation %s: %s" % (
operation['name'], output_members))
def _get_all_page_output_keys(config):
if not isinstance(config['result_key'], list):
yield config['result_key']
else:
for result_key in config['result_key']:
yield result_key
if not isinstance(config['output_token'], list):
yield config['output_token']
else:
for result_key in config['output_token']:
yield result_key
if 'more_results' in config:
yield config['more_results']
for key in config.get('non_aggregate_keys', []):
yield key
def _check_input_keys_match(config, operation):
input_tokens = config['input_token']
if not isinstance(input_tokens, list):
input_tokens = [input_tokens]
valid_input_names = operation['input']['members']
for token in input_tokens:
if token not in valid_input_names:
raise ValueError("input_token refers to a non existent "
"input name for operation %s: %s. "
"Must be one of: %s" % (operation['name'], token,
list(valid_input_names)))
if 'limit_key' in config and config['limit_key'] not in valid_input_names:
raise ValueError("limit_key refers to a non existent input name for "
"operation %s: %s. Must be one of: %s" % (
operation['name'], config['limit_key'],
list(valid_input_names)))
def add_retry_configs(new_model, retry_model, definitions):
if not retry_model:
new_model['retry'] = {}
return
# The service specific retry config is keyed off of the endpoint
# prefix as defined in the JSON model.
endpoint_prefix = new_model.get('endpoint_prefix', '')
final_retry_config = build_retry_config(endpoint_prefix, retry_model,
definitions)
new_model['retry'] = final_retry_config
def build_retry_config(endpoint_prefix, retry_model, definitions):
service_config = retry_model.get(endpoint_prefix, {})
resolve_references(service_config, definitions)
# We want to merge the global defaults with the service specific
# defaults, with the service specific defaults taking precedence.
# So we use the global defaults as the base.
final_retry_config = {'__default__': retry_model.get('__default__', {})}
resolve_references(final_retry_config, definitions)
# The merge the service specific config on top.
merge_dicts(final_retry_config, service_config)
return final_retry_config
def resolve_references(config, definitions):
"""Recursively replace $ref keys.
To cut down on duplication, common definitions can be declared
(and passed in via the ``definitions`` attribute) and then
references as {"$ref": "name"}, when this happens the reference
dict is placed with the value from the ``definition`` dict.
This is recursively done.
"""
for key, value in config.items():
if isinstance(value, dict):
if len(value) == 1 and list(value.keys())[0] == '$ref':
# Then we need to resolve this reference.
config[key] = definitions[list(value.values())[0]]
else:
resolve_references(value, definitions)
| apache-2.0 |
sadanandb/pmt | src/client/test/sample3d_test.py | 7 | 5044 | #!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Sample3dTest']
import tacticenv
from tactic_client_lib import TacticServerStub
import unittest
import xmlrpclib, sys, os, shutil
class Sample3dTest(unittest.TestCase):
def setUp(my):
pass
def test_all(my):
print "Running Sample3d Test"
from pyasm.security import Batch
from pyasm.biz import Project
Batch()
Project.set_project("sample3d")
#my.server = TacticServerStub(protocol="local")
my.server = TacticServerStub(protocol="xmlrpc")
project_code = "sample3d"
my.server.set_project(project_code)
my.server.start("Sample3d Test")
try:
my._test_create_search_type()
my._test_create_submission()
my._test_get_submission()
my._test_shot_sequence_hierarchy()
my._test_query_snapshots()
#my._test_performance()
except Exception:
my.server.abort()
raise
my.server.abort()
#try:
# my.server.query("prod/asset")
#except Exception:
# my.server.abort()
# raise
#my.server.abort()
def _test_query_snapshots(my):
filters = []
filters.append( ['context', 'model'] )
filters.append( ['search_type', 'prod/asset?project=sample3d'] )
snapshots = my.server.query_snapshots(filters=filters, include_paths=True)
import time
start = time.time()
for snapshot in snapshots:
print snapshot.get('__search_key__')
print snapshot.get('__paths__')
print "parent: ", snapshot.get('__parent__')
print time.time() - start
def _test_create_search_type(my):
search_type = 'test'
search_type_obj = my.server.create_search_type(search_type)
print search_type_obj
def _test_performance(my):
for i in range(0,1):
assets = my.server.query("prod/asset")
for asset in assets:
asset_key = asset.get("__search_key__")
snapshots = my.server.get_all_children(asset_key,'sthpw/snapshot')
#snapshot = my.server.get_snapshot(asset_key,context='model', include_paths=True)
#print snapshot.get('__paths__')
def _test_get_submission(my):
server = TacticServerStub()
server.set_project("sample3d")
# choose some arbitrary bin
bin_id = 4
filters = []
filters.append( ['bin_id', bin_id] )
connectors = server.query("prod/submission_in_bin", filters)
# get all of the submissions from the bin
submission_ids = [x.get('submission_id') for x in connectors]
filters = [ ['id', submission_ids] ]
submissions = server.query("prod/submission", filters)
# get all of the snapshots from the submissions
for submission in submissions:
search_key = submission.get('__search_key__')
print "-"*20
snapshot = server.get_snapshot(search_key, include_paths=True)
paths = snapshot.get('__paths__')
for path in paths:
print path
def _test_create_submission(my):
server = TacticServerStub()
server.set_project("sample3d")
# choose some arbitrary bin
bin_id = 4
filters = []
# asset
parent_type = "prod/asset"
parent_code = "chr001"
parent_key = server.build_search_key(parent_type, parent_code)
parent = server.get_by_search_key(parent_key)
parent_id = parent.get('id')
# create a submission
data = {
'description': 'A test submission',
'artist': 'joe',
'context': 'model'
}
submission = server.insert("prod/submission", data, parent_key=parent_key)
submission_key = submission.get('__search_key__')
submission_id = submission.get('id')
file_path = './miso_ramen.jpg'
context = "publish"
snapshot = server.simple_checkin(submission_key, context, file_path, mode="upload")
# no connect to the bin with a connector
data = {
"bin_id": bin_id,
'submission_id': submission_id
}
server.insert("prod/submission_in_bin", data)
def _test_shot_sequence_hierarchy(my):
shot_key = "prod/shot?project=sample3d&code=RC_001_001"
shot = my.server.get_by_search_key(shot_key)
parent = my.server.get_parent(shot_key)
my.assertEquals("RC_001", parent.get("code") )
if __name__ == "__main__":
unittest.main()
| epl-1.0 |
tmhm/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
fintech-circle/edx-platform | lms/djangoapps/certificates/tests/test_signals.py | 29 | 1324 | """ Unit tests for enabling self-generated certificates by default
for self-paced courses.
"""
from certificates import api as certs_api
from certificates.models import CertificateGenerationConfiguration
from certificates.signals import _listen_for_course_publish
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class SelfGeneratedCertsSignalTest(ModuleStoreTestCase):
""" Tests for enabling self-generated certificates by default
for self-paced courses.
"""
def setUp(self):
super(SelfGeneratedCertsSignalTest, self).setUp()
SelfPacedConfiguration(enabled=True).save()
self.course = CourseFactory.create(self_paced=True)
# Enable the feature
CertificateGenerationConfiguration.objects.create(enabled=True)
def test_cert_generation_enabled_for_self_paced(self):
""" Verify the signal enable the self-generated certificates by default for
self-paced courses.
"""
self.assertFalse(certs_api.cert_generation_enabled(self.course.id))
_listen_for_course_publish('store', self.course.id)
self.assertTrue(certs_api.cert_generation_enabled(self.course.id))
| agpl-3.0 |
fabioz/Pydev | plugins/org.python.pydev.jython/Lib/opcode.py | 264 | 5474 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG"]
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('STOP_CODE', 0)
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('ROT_FOUR', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_CONVERT', 13)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_DIVIDE', 21)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('SLICE+0', 30)
def_op('SLICE+1', 31)
def_op('SLICE+2', 32)
def_op('SLICE+3', 33)
def_op('STORE_SLICE+0', 40)
def_op('STORE_SLICE+1', 41)
def_op('STORE_SLICE+2', 42)
def_op('STORE_SLICE+3', 43)
def_op('DELETE_SLICE+0', 50)
def_op('DELETE_SLICE+1', 51)
def_op('DELETE_SLICE+2', 52)
def_op('DELETE_SLICE+3', 53)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_DIVIDE', 58)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('PRINT_ITEM', 71)
def_op('PRINT_NEWLINE', 72)
def_op('PRINT_ITEM_TO', 73)
def_op('PRINT_NEWLINE_TO', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('LOAD_LOCALS', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('EXEC_STMT', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('BUILD_CLASS', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('LIST_APPEND', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('DUP_TOPX', 99) # number of items to duplicate
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
jrel_op('SETUP_WITH', 143)
def_op('EXTENDED_ARG', 145)
EXTENDED_ARG = 145
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
del def_op, name_op, jrel_op, jabs_op
| epl-1.0 |
vabs22/zulip | zerver/lib/logging_util.py | 3 | 3051 | from __future__ import absolute_import
from django.utils.timezone import now as timezone_now
from django.utils.timezone import utc as timezone_utc
import hashlib
import logging
import traceback
from datetime import datetime, timedelta
from django.conf import settings
from zerver.lib.str_utils import force_bytes
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
class _RateLimitFilter(object):
last_error = datetime.min.replace(tzinfo=timezone_utc)
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
600) # seconds
if rate > 0:
# Test if the cache works
try:
cache.set('RLF_TEST_KEY', 1, 1)
use_cache = cache.get('RLF_TEST_KEY') == 1
except Exception:
use_cache = False
if use_cache:
if record.exc_info is not None:
tb = force_bytes('\n'.join(traceback.format_exception(*record.exc_info)))
else:
tb = force_bytes(u'%s' % (record,))
key = self.__class__.__name__.upper() + hashlib.sha1(tb).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone_now() - timedelta(seconds=rate)
duplicate = (self.last_error >= min_date)
if not duplicate:
self.last_error = timezone_now()
return not duplicate
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return True
class ReturnEnabled(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return settings.LOGGING_NOT_DISABLED
class RequireReallyDeployed(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
return settings.PRODUCTION
def skip_200_and_304(record):
# type: (logging.LogRecord) -> bool
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') in [200, 304]:
return False
return True
def skip_site_packages_logs(record):
# type: (logging.LogRecord) -> bool
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if 'site-packages' in record.pathname:
return False
return True
| apache-2.0 |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/json/tests/test_pass1.py | 108 | 1841 | from json.tests import PyTest, CTest
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E66,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"0123456789": "digit",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066,
1e1,
0.1e1,
1e-1,
1e00,2e+00,2e-00
,"rosebud"]
'''
class TestPass1(object):
def test_parse(self):
# test in/out equivalence and parsing
res = self.loads(JSON)
out = self.dumps(res)
self.assertEqual(res, self.loads(out))
class TestPyPass1(TestPass1, PyTest): pass
class TestCPass1(TestPass1, CTest): pass
| mit |
AlbianWarp/warpserver | warpserver/models.py | 1 | 3426 | import datetime
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from werkzeug.security import (generate_password_hash,
check_password_hash)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
created = sqlalchemy.Column(sqlalchemy.DateTime,
default=datetime.datetime.utcnow)
username = sqlalchemy.Column(sqlalchemy.String(30), unique=True)
password_hash = sqlalchemy.Column(sqlalchemy.String())
def __init__(self, username, password):
self.username = username
self.password_hash = self.hash_password(password)
@staticmethod
def hash_password(password):
return generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def to_dict(self):
"""Return a dictionary representation of this
user, which keys are in jsonStyle.
"""
return {'username': self.username,
'id': self.id,
'created': self.created.isoformat("T") + 'Z'}
class Message(Base):
"""A submission to the site, just some text,
a timestamp, an associated user, and an ID.
"""
__tablename__ = 'posts'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
user_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey(User.id))
created = sqlalchemy.Column(sqlalchemy.DateTime,
default=datetime.datetime.utcnow)
text = sqlalchemy.Column(sqlalchemy.String())
user = sqlalchemy.orm.relationship('User', foreign_keys='Message.user_id',
lazy='subquery')
def __init__(self, user_id, text):
self.user_id = user_id
self.text = text
def __repr__(self):
return '<Message #%s>' % self.id
def to_dict(self):
"""Return a dictionary representation of this
user, which keys are in jsonStyle.
"""
return {'id': self.id,
'text': self.text,
'user': self.user.to_dict(),
'created': self.created.isoformat("T") + 'Z'}
class Creature(Base):
__tablename__ = 'creatures'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
user_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey(User.id))
uploaded = sqlalchemy.Column(sqlalchemy.DateTime,
default=datetime.datetime.utcnow)
creaturename = sqlalchemy.Column(sqlalchemy.String(30), unique=True)
species = sqlalchemy.Column(sqlalchemy.String())
user = sqlalchemy.orm.relationship('User',
foreign_keys='Creature.user_id',
lazy='subquery')
def __init__(self, creaturename, species):
self.creaturename = creaturename
self.species = species
def to_dict(self):
"""Return a dictionary representation of this
creature, which keys are in jsonStyle.
"""
return {'creaturename': self.creaturename,
'species': self.species,
'id': self.id,
'uploaded': self.uploaded.isoformat("T") + 'Z'}
| mit |
kived/kivy-designer | designer/project_loader.py | 1 | 46342 | import re
import os
import sys
import inspect
import time
import functools
import shutil
import imp
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.base import runTouchApp
from kivy.factory import Factory, FactoryException
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.sandbox import Sandbox
from kivy.clock import Clock
from designer.helper_functions import get_indentation, get_indent_str,\
get_line_start_pos, get_kivy_designer_dir
from designer.proj_watcher import ProjectWatcher
PROJ_DESIGNER = '.designer'
KV_PROJ_FILE_NAME = os.path.join(PROJ_DESIGNER, 'kvproj')
PROJ_FILE_CONFIG = os.path.join(PROJ_DESIGNER, 'file_config.ini')
class Comment(object):
def __init__(self, string, path, _file):
super(Comment, self).__init__()
self.string = string
self.path = path
self.kv_file = _file
class WidgetRule(object):
'''WidgetRule is an Abstract class for representing a rule of Widget.
'''
def __init__(self, widget, parent):
super(WidgetRule, self).__init__()
self.name = widget
self.parent = parent
self.file = None
self.kv_file = None
self.module = None
class ClassRule(WidgetRule):
'''ClassRule is a class for representing a class rule in kv
'''
def __init__(self, class_name):
super(ClassRule, self).__init__(class_name, None)
class CustomWidgetRule(ClassRule):
'''CustomWidgetRule is a class for representing a custom widgets rule in kv
'''
def __init__(self, class_name, kv_file, py_file):
super(ClassRule, self).__init__(class_name, None)
self.class_name = class_name
self.kv_file = kv_file
self.py_file = py_file
class RootRule(ClassRule):
'''RootRule is a class for representing root rule in kv.
'''
def __init__(self, class_name, widget):
super(RootRule, self).__init__(class_name)
self.widget = widget
class ProjectLoaderException(Exception):
pass
class ProjectLoader(object):
'''ProjectLoader class, used to load Project
'''
def __init__(self, proj_watcher):
super(ProjectLoader, self).__init__()
self._dir_list = []
self.proj_watcher = proj_watcher
self.class_rules = []
self.root_rule = None
self.new_project = None
self.dict_file_type_and_path = {}
self.kv_file_list = []
self.kv_code_input = None
self.tab_pannel = None
self._root_rule = None
self.file_list = []
self.proj_dir = ""
self._is_root_already_in_factory = False
def _get_file_list(self, path):
'''This function is recursively called for loading all py file files
in the current directory.
'''
file_list = []
if '.designer' in path:
return []
sys.path.insert(0, path)
self._dir_list.append(path)
for _file in os.listdir(path):
file_path = os.path.join(path, _file)
if os.path.isdir(file_path):
file_list += self._get_file_list(file_path)
else:
# Consider only kv, py and buildozer(spec) files
if file_path[file_path.rfind('.'):] in [".py", ".spec"]:
if os.path.dirname(file_path) == self.proj_dir:
file_list.insert(0, file_path)
else:
file_list.append(file_path)
return file_list
def add_custom_widget(self, py_path):
'''This function is used to add a custom widget given path to its
py file.
'''
f = open(py_path, 'r')
py_string = f.read()
f.close()
# Find path to kv. py file will have Builder.load_file('path/to/kv')
_r = re.findall(r'Builder\.load_file\s*\(\s*.+\s*\)', py_string)
if _r == []:
raise ProjectLoaderException('Cannot find widget\'s kv file.')
py_string = py_string.replace(_r[0], '')
kv_path = _r[0][_r[0].find('(') + 1: _r[0].find(')')]
py_string = py_string.replace(kv_path, '')
kv_path = kv_path.replace("'", '').replace('"', '')
f = open(kv_path, 'r')
kv_string = f.read()
f.close()
# Remove all the 'app' lines
for app_str in re.findall(r'.+app+.+', kv_string):
kv_string = kv_string.replace(
app_str,
app_str[:get_indentation(app_str)] + '#' + app_str.lstrip())
Builder.load_string(kv_string)
sys.path.insert(0, os.path.dirname(kv_path))
_to_check = []
# Get all the class_rules
for class_str in re.findall(r'<+([\w_]+)>', kv_string):
if re.search(r'\bclass\s+%s+.+:' % class_str, py_string):
module = imp.new_module('CustomWidget')
exec py_string in module.__dict__
sys.modules['AppModule'] = module
class_rule = CustomWidgetRule(class_str, kv_path, py_path)
class_rule.file = py_path
class_rule.module = module
self.custom_widgets.append(class_rule)
def get_root_str(self, kv_str=''):
'''This function will get the root widgets rule from either kv_str
or if it is empty string then from the kv file of root widget
'''
if kv_str == '':
f = open(self.root_rule.kv_file, 'r')
kv_str = f.read()
f.close()
# Find the start position of root_rule
start_pos = kv_str.find(self.root_rule.name)
if start_pos == -1:
raise ProjectLoaderException(
'Cannot find root rule in its file')
# Get line for start_pos
_line = 0
_line_pos = 0
_line_pos = kv_str.find('\n', _line_pos + 1)
while _line_pos != -1 and _line_pos < start_pos:
_line_pos = kv_str.find('\n', _line_pos + 1)
_line += 1
# Find the end position of root_rule, where indentation becomes 0
# or file ends
_line += 1
lines = kv_str.splitlines()
_total_lines = len(lines)
while _line < _total_lines and (lines[_line].strip() == '' or
get_indentation(lines[_line]) != 0):
_line_pos = kv_str.find('\n', _line_pos + 1)
_line += 1
end_pos = _line_pos
root_old_str = kv_str[start_pos: end_pos]
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
root_old_str = "<" + root_old_str
return root_old_str
def get_full_str(self):
'''This function will give the full string of all detected kv files.
'''
text = ''
for _file in self.kv_file_list:
f = open(_file, 'r')
text += f.read() + '\n'
f.close()
return text
def load_new_project(self, kv_path):
'''To load a new project given by kv_path
'''
self.new_project = True
self._load_project(kv_path)
def load_project(self, kv_path):
'''To load a project given by kv_path
'''
ret = self._load_project(kv_path)
self.new_project = False
# Add project_dir to watch
self.proj_watcher.start_watching(self.proj_dir)
return ret
def _load_project(self, kv_path):
'''Pivate function to load any project given by kv_path
'''
if os.path.isdir(kv_path):
self.proj_dir = kv_path
else:
self.proj_dir = os.path.dirname(kv_path)
parent_proj_dir = os.path.dirname(self.proj_dir)
sys.path.insert(0, parent_proj_dir)
self.class_rules = []
all_files_loaded = True
_file = None
for _file in os.listdir(self.proj_dir):
# Load each kv file in the directory
_file = os.path.join(self.proj_dir, _file)
if _file[_file.rfind('.'):] != '.kv':
continue
self.kv_file_list.append(_file)
f = open(_file, 'r')
kv_string = f.read()
f.close()
# Remove all the 'app' lines
for app_str in re.findall(r'.+app+.+', kv_string):
kv_string = kv_string.replace(
app_str,
app_str[:get_indentation(app_str)] +
'#' + app_str.lstrip())
# Get all the class_rules
for class_str in re.findall(r'<+([\w_]+)>', kv_string):
class_rule = ClassRule(class_str)
class_rule.kv_file = _file
self.class_rules.append(class_rule)
try:
root_name = re.findall(r'^([\w\d_]+)\:', kv_string,
re.MULTILINE)
if root_name != []:
# It will occur when there is a root rule and it can't
# be loaded by Builder because the its file
# has been imported
root_name = root_name[0]
if not hasattr(Factory, root_name):
match = re.search(r'^([\w\d_]+)\:', kv_string,
re.MULTILINE)
kv_string = kv_string[:match.start()] + \
'<' + root_name + '>:' + kv_string[match.end():]
self.root_rule = RootRule(root_name, None)
self.root_rule.kv_file = _file
self._root_rule = self.root_rule
self._is_root_already_in_factory = False
else:
self._is_root_already_in_factory = True
else:
self._is_root_already_in_factory = False
root_rule = Builder.load_string(re.sub(r'\s+on_\w+:\w+',
'', kv_string))
if root_rule:
self.root_rule = RootRule(root_rule.__class__.__name__,
root_rule)
self.root_rule.kv_file = _file
self._root_rule = self.root_rule
except Exception as e:
all_files_loaded = False
if not all_files_loaded:
raise ProjectLoaderException('Cannot load file "%s"' % (_file))
if os.path.exists(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME)):
projdir_mtime = os.path.getmtime(self.proj_dir)
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'r')
proj_str = f.read()
f.close()
_file_is_valid = True
# Checking if the file is valid
if proj_str == '' or\
proj_str.count('<files>') != proj_str.count('</files>') or\
proj_str.count('<file>') != proj_str.count('</file>') or\
proj_str.count('<class>') != proj_str.count('</class>'):
_file_is_valid = False
if _file_is_valid:
projdir_time = proj_str[
proj_str.find('<time>') + len('<time>'):
proj_str.find('</time>')]
projdir_time = float(projdir_time.strip())
if _file_is_valid and projdir_mtime <= projdir_time:
# Project Directory folder hasn't been modified,
# file list will remain same
self.file_list = []
un_modified_files = []
start_pos = proj_str.find('<files>')
end_pos = proj_str.find('</files>')
if start_pos != -1 and end_pos != -1:
start_pos = proj_str.find('<file>', start_pos)
end_pos1 = proj_str.find('</file>', start_pos)
while start_pos < end_pos and start_pos != -1:
_file = proj_str[
start_pos + len('<file>'):end_pos1].strip()
self.file_list.append(_file)
if os.path.getmtime(_file) <= projdir_time:
un_modified_files.append(_file)
start_pos = proj_str.find('<file>', end_pos1)
end_pos1 = proj_str.find('</file>', start_pos)
for _file in self.file_list:
_dir = os.path.dirname(_file)
if _dir not in sys.path:
sys.path.insert(0, _dir)
# Reload information for app
start_pos = proj_str.find('<app>')
end_pos = proj_str.find('</app>')
if start_pos != -1 and end_pos != -1:
self._app_class = proj_str[
proj_str.find('<class>', start_pos) + len('<class>'):
proj_str.find('</class>', start_pos)].strip()
self._app_file = proj_str[
proj_str.find('<file>', start_pos) + len('<file>'):
proj_str.find('</file>', start_pos)].strip()
f = open(self._app_file, 'r')
self._app_module = self._import_module(f.read(),
self._app_file)
f.close()
# Reload information for the files which haven't been modified
start_pos = proj_str.find('<classes>')
end_pos = proj_str.find('</classes>')
if start_pos != -1 and end_pos != -1:
while start_pos < end_pos and start_pos != -1:
start_pos = proj_str.find('<class>', start_pos) +\
len('<class>')
end_pos1 = proj_str.find('</class>', start_pos)
_file = proj_str[
proj_str.find('<file>', start_pos) + len('<file>'):
proj_str.find('</file>', start_pos)].strip()
if _file in un_modified_files:
# If _file is un modified then assign it to
# class rule with _name
_name = proj_str[
proj_str.find('<name>', start_pos) +
len('<name>'):
proj_str.find('</name>', start_pos)].strip()
for _rule in self.class_rules:
if _name == _rule.name:
_rule.file = _file
f = open(_file, 'r')
_rule.module = self._import_module(
f.read(), _file, _fromlist=[_name])
f.close()
start_pos = proj_str.find('<class>', start_pos)
end_pos1 = proj_str.find('</class>', start_pos)
if self.file_list == []:
self.file_list = self._get_file_list(self.proj_dir)
# Get all files corresponding to each class
self._get_class_files()
# If root widget is not created but root class is known
# then create widget
if self.root_rule and not self.root_rule.widget and \
self.root_rule.name:
self.root_rule.widget = self.get_widget_of_class(
self.root_rule.name)
self.load_proj_config()
def load_proj_config(self):
'''To load project's config file. Project's config file is stored in
.designer directory in project's directory.
'''
try:
f = open(os.path.join(self.proj_dir, PROJ_FILE_CONFIG), 'r')
s = f.read()
f.close()
start_pos = -1
end_pos = -1
start_pos = s.find('<file_type_and_dirs>\n')
end_pos = s.find('</file_type_and_dirs>\n')
if start_pos != -1 and end_pos != -1:
for searchiter in re.finditer(r'<file_type=.+', s):
if searchiter.start() < start_pos:
continue
if searchiter.start() > end_pos:
break
found_str = searchiter.group(0)
file_type = found_str[found_str.find('"') + 1:
found_str.find(
'"', found_str.find('"') + 1)]
folder = found_str[
found_str.find('"', found_str.find('dir=') + 1) + 1:
found_str.rfind('"')]
self.dict_file_type_and_path[file_type] = folder
except IOError:
pass
def save_proj_config(self):
'''To save project's config file.
'''
string = '<file_type_and_dirs>\n'
for file_type in self.dict_file_type_and_path.keys():
string += ' <file_type="' + file_type + '"' + ' dir="' + \
self.dict_file_type_and_path[file_type] + '">\n'
string += '</file_type_and_dirs>\n'
f = open(os.path.join(self.proj_dir, PROJ_CONFIG), 'w')
f.write(string)
f.close()
def add_dir_for_file_type(self, file_type, folder):
'''To add directory for specified file_type. More information in
add_file.py
'''
self.dict_file_type_and_path[file_type] = folder
self.save_proj_config()
def perform_auto_save(self, *args):
'''To perform auto save. Auto Save is done after every 5 min.
'''
if not self.root_rule:
return
auto_save_dir = os.path.join(self.proj_dir, '.designer')
auto_save_dir = os.path.join(auto_save_dir, 'auto_save')
if not os.path.exists(auto_save_dir):
os.makedirs(auto_save_dir)
else:
shutil.rmtree(auto_save_dir)
os.mkdir(auto_save_dir)
for _file in os.listdir(self.proj_dir):
if '.designer' in _file:
continue
old_file = os.path.join(self.proj_dir, _file)
new_file = os.path.join(auto_save_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
root_rule_file = os.path.join(auto_save_dir,
os.path.basename(self.root_rule.kv_file))
f = open(root_rule_file, 'r')
_file_str = f.read()
f.close()
text = self.kv_code_input.text
root_str = self.get_root_str()
f = open(root_rule_file, 'w')
_file_str = _file_str.replace(root_str, text)
f.write(_file_str)
f.close()
# For custom widgets copy py and kv file
for widget in self.custom_widgets:
custom_kv = os.path.join(auto_save_dir,
os.path.basename(widget.kv_file))
if not os.path.exists(custom_kv):
shutil.copy(widget.kv_file, custom_kv)
custom_py = os.path.join(auto_save_dir,
os.path.basename(widget.py_file))
if not os.path.exists(custom_py):
shutil.copy(widget.py_file, custom_py)
def save_project(self, proj_dir=''):
'''To save project to proj_dir. If proj_dir is not empty string then
project is saved to a new directory other than its
current directory and otherwise it is saved to the
current directory.
'''
# To stop ProjectWatcher from emitting event when project is saved
self.proj_watcher.allow_event_dispatch = False
proj_dir_changed = False
if self.new_project:
# Create dir and copy new_proj.kv and new_proj.py to new directory
if not os.path.exists(proj_dir):
os.mkdir(proj_dir)
kivy_designer_dir = get_kivy_designer_dir()
kivy_designer_new_proj_dir = os.path.join(kivy_designer_dir,
"new_proj")
for _file in os.listdir(kivy_designer_new_proj_dir):
old_file = os.path.join(kivy_designer_new_proj_dir, _file)
new_file = os.path.join(proj_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
self.file_list = self._get_file_list(proj_dir)
new_kv_file = os.path.join(proj_dir, "main.kv")
new_py_file = os.path.join(proj_dir, "main.py")
self.proj_dir = proj_dir
if self.root_rule:
self.root_rule.kv_file = new_kv_file
self.root_rule.py_file = new_py_file
if self.class_rules:
self.class_rules[0].py_file = new_py_file
self.class_rules[0].kv_file = new_kv_file
self.new_project = False
else:
if proj_dir != '' and proj_dir != self.proj_dir:
proj_dir_changed = True
# Remove previous project directories from sys.path
for _dir in self._dir_list:
try:
sys.path.remove(_dir)
except:
pass
# if proj_dir and self.proj_dir differs then user wants to save
# an already opened project to somewhere else
# Copy all the files
if not os.path.exists(proj_dir):
os.mkdir(proj_dir)
for _file in os.listdir(self.proj_dir):
old_file = os.path.join(self.proj_dir, _file)
new_file = os.path.join(proj_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
self.file_list = self._get_file_list(proj_dir)
# Change the path of all files in the class rules,
# root rule and app
relative_path = self._app_file[
self._app_file.find(self.proj_dir):]
self._app_file = os.path.join(proj_dir, relative_path)
f = open(self._app_file, 'r')
s = f.read()
f.close()
self._import_module(s, self._app_file,
_fromlist=[self._app_class])
for _rule in self.class_rules:
relative_path = _rule.kv_file[
_rule.kv_file.find(self.proj_dir):]
_rule.kv_file = os.path.join(proj_dir, relative_path)
relative_path = _rule.file[_rule.file.find(self.proj_dir):]
_rule.file = os.path.join(proj_dir, relative_path)
f = open(_rule.file, 'r')
s = f.read()
f.close()
self._import_module(s, _rule.file, _fromlist=[_rule.name])
relative_path = self.root_rule.kv_file[
self.root_rule.kv_file.find(self.proj_dir):]
self.root_rule.kv_file = os.path.join(proj_dir, relative_path)
relative_path = self.root_rule.file[
self.root_rule.file.find(self.proj_dir):]
self.root_rule.file = os.path.join(proj_dir, relative_path)
self.proj_dir = proj_dir
# For custom widgets copy py and kv file to project directory
for widget in self.custom_widgets:
custom_kv = os.path.join(self.proj_dir,
os.path.basename(widget.kv_file))
if not os.path.exists(custom_kv):
shutil.copy(widget.kv_file, custom_kv)
custom_py = os.path.join(self.proj_dir,
os.path.basename(widget.py_file))
if not os.path.exists(custom_py):
shutil.copy(widget.py_file, custom_py)
# Saving all opened py files and also reimport them
for _code_input in self.tab_pannel.list_py_code_inputs:
path = os.path.join(self.proj_dir, _code_input.rel_file_path)
f = open(path, 'w')
f.write(_code_input.text)
f.close()
_from_list = []
for rule in self.class_rules:
if rule.file == path:
_from_list.append(rule.file)
if not self.is_root_a_class_rule():
if self.root_rule.file == path:
_from_list.append(self.root_rule.name)
# Ignore all types that are not .py
if path.endswith(".py"):
self._import_module(_code_input.text, path,
_fromlist=_from_list)
# Save all class rules
text = self.kv_code_input.text
for _rule in self.class_rules:
# Get the kv text from KVLangArea and write it to class rule's file
f = open(_rule.kv_file, 'r')
_file_str = f.read()
f.close()
old_str = self.get_class_str_from_text(_rule.name, _file_str)
new_str = self.get_class_str_from_text(_rule.name, text)
f = open(_rule.kv_file, 'w')
_file_str = _file_str.replace(old_str, new_str)
f.write(_file_str)
f.close()
# If root widget is not changed
if self._root_rule.name == self.root_rule.name:
# Save root widget's rule
is_root_class = False
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
is_root_class = True
break
if not is_root_class:
f = open(self.root_rule.kv_file, 'r')
_file_str = f.read()
f.close()
old_str = self.get_class_str_from_text(self.root_rule.name,
_file_str,
is_class=False)
new_str = self.get_class_str_from_text(self.root_rule.name,
text, is_class=False)
f = open(self.root_rule.kv_file, 'w')
_file_str = _file_str.replace(old_str, new_str)
f.write(_file_str)
f.close()
else:
# If root widget is changed
# Root Widget changes, there can be these cases:
root_name = self.root_rule.name
f = open(self._app_file, 'r')
file_str = f.read()
f.close()
self._root_rule = self.root_rule
if self.is_root_a_class_rule() and self._app_file:
# Root Widget's class rule is a custom class
# and its rule is class rule. So, it already have been saved
# the string of App's build() function will be changed to
# return new root widget's class
if self._app_class != 'runTouchApp':
s = re.search(r'class\s+%s.+:' % self._app_class, file_str)
if s:
build_searchiter = None
for searchiter in re.finditer(
r'[ \ \t]+def\s+build\s*\(\s*self.+\s*:',
file_str):
if searchiter.start() > s.start():
build_searchiter = searchiter
break
if build_searchiter:
indent = get_indentation(build_searchiter.group(0))
file_str = file_str[:build_searchiter.end()] +\
'\n' + get_indent_str(2 * indent) + "return " +\
root_name + "()\n" + \
file_str[build_searchiter.end():]
else:
file_str = file_str[:s.end()] + \
"\n def build(self):\n return " + \
root_name + '()\n' + file_str[s.end():]
else:
file_str = re.sub(r'runTouchApp\s*\(.+\)',
'runTouchApp(' + root_name + '())',
file_str)
f = open(self._app_file, 'w')
f.write(file_str)
f.close()
else:
# Root Widget's rule is not a custom class
# and its rule is root rule
# Its kv_file should be of App's class name
# and App's build() function should be cleared
if not self.root_rule.kv_file:
s = self._app_class.replace('App', '').lower()
root_file = None
for _file in self.kv_file_list:
if os.path.basename(_file).find(s) == 0:
self.root_rule.kv_file = _file
break
f = open(self.root_rule.kv_file, 'r')
_file_str = f.read()
f.close()
new_str = self.get_class_str_from_text(self.root_rule.name,
text, False)
f = open(self.root_rule.kv_file, 'a')
f.write(new_str)
f.close()
if self._app_class != 'runTouchApp':
s = re.search(r'class\s+%s.+:' % self._app_class, file_str)
if s:
build_searchiter = None
for searchiter in re.finditer(
r'[ \ \t]+def\s+build\s*\(\s*self.+\s*:',
file_str):
if searchiter.start() > s.start():
build_searchiter = searchiter
break
if build_searchiter:
lines = file_str.splitlines()
total_lines = len(lines)
indent = get_indentation(build_searchiter.group(0))
_line = 0
_line_pos = -1
_line_pos = file_str.find('\n', _line_pos + 1)
while _line_pos <= build_searchiter.start():
_line_pos = file_str.find('\n', _line_pos + 1)
_line += 1
_line += 1
while _line < total_lines:
if lines[_line].strip() != '' and\
get_indentation(lines[_line]) <= \
indent:
break
_line += 1
_line -= 1
end = get_line_start_pos(file_str, _line)
start = build_searchiter.start()
file_str = file_str.replace(file_str[start:end],
' pass')
f = open(self._app_file, 'w')
f.write(file_str)
f.close()
# Allow Project Watcher to emit events
Clock.schedule_once(self._allow_proj_watcher_dispatch, 1)
def get_class_str_from_text(self, class_name, _file_str, is_class=True):
'''To return the full class rule of class_name from _file_str
'''
_file_str += '\n'
start_pos = -1
# Find the start position of class_name
if is_class:
start_pos = _file_str.find('<' + class_name + '>:')
else:
while True:
start_pos = _file_str.find(class_name, start_pos + 1)
if start_pos == 0 or not (_file_str[start_pos - 1].isalnum() and
_file_str[start_pos - 1] != ''):
break
_line = 0
_line_pos = 0
_line_pos = _file_str.find('\n', _line_pos + 1)
while _line_pos != -1 and _line_pos < start_pos:
_line_pos = _file_str.find('\n', _line_pos + 1)
_line += 1
# Find the end position of class_name, where indentation becomes 0
# or file ends
_line += 1
lines = _file_str.splitlines()
_total_lines = len(lines)
hash_pos = 0
while hash_pos == 0 and _line < _total_lines:
hash_pos = lines[_line].find('#')
if hash_pos == 0:
_line_pos += 1 + len(lines[_line])
_line += 1
while _line < _total_lines and (lines[_line].strip() == '' or
get_indentation(lines[_line]) != 0):
_line_pos = _file_str.find('\n', _line_pos + 1)
_line += 1
hash_pos = 0
while hash_pos == 0 and _line < _total_lines:
hash_pos = lines[_line].find('#')
if hash_pos == 0:
_line += 1
end_pos = _line_pos
old_str = _file_str[start_pos: end_pos]
return old_str
def _allow_proj_watcher_dispatch(self, *args):
'''To start project_watcher to start watching self.proj_dir
'''
self.proj_watcher.allow_event_dispatch = True
# self.proj_watcher.start_watching(self.proj_dir)
def _app_in_string(self, s):
'''To determine if there is an App class or runTouchApp
defined/used in string s.
'''
if 'runTouchApp' in s:
self._app_class = 'runTouchApp'
return True
elif 'kivy.app' in s:
for _class in re.findall(r'\bclass\b.+:', s):
b_index1 = _class.find('(')
b_index2 = _class.find(')')
if _class[b_index1 + 1:b_index2].strip() == 'App':
self._app_class = _class[_class.find(' '):b_index1].strip()
return True
return False
def _get_class_files(self):
'''To search through all detected class rules and find
their python files and to search for app.
'''
if self._app_file is None:
# Search for main.py
for _file in self.file_list:
if _file[_file.rfind('/') + 1:] == 'main.py':
f = open(_file, 'r')
s = f.read()
f.close()
if self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
# Search for a file with app in its name
if not self._app_class:
for _file in self.file_list:
if 'app' in _file[_file.rfind('/'):]:
f = open(_file, 'r')
s = f.read()
f.close()
if self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
to_find = []
for _rule in self.class_rules:
if _rule.file is None:
to_find.append(_rule)
if self.root_rule:
to_find.append(self.root_rule)
# If cannot find due to above methods, search every file
for _file in self.file_list:
f = open(_file, 'r')
s = f.read()
f.close()
if not self._app_file and self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
for _rule in to_find[:]:
if _rule.file:
continue
if re.search(r'\bclass\s*%s+.+:' % (_rule.name), s):
mod = self._import_module(s, _file, _fromlist=[_rule.name])
if hasattr(mod, _rule.name):
_rule.file = _file
to_find.remove(_rule)
_rule.module = mod
# Cannot Find App, So, use default runTouchApp
if not self._app_file:
self._app_class = 'runTouchApp'
# Root Widget may be in Factory not in file
if self.root_rule:
if not self.root_rule.file and\
hasattr(Factory, self.root_rule.name):
to_find.remove(self.root_rule)
# to_find should be empty, if not some class's files are not detected
if to_find != []:
raise ProjectLoaderException(
'Cannot find class files for all classes')
def _import_module(self, s, _file, _fromlist=[]):
module = None
import_from_s = False
_r = re.findall(r'Builder\.load_file\s*\(\s*.+\s*\)', s)
if _r:
s = s.replace(_r[0], '')
import_from_s = True
run_pos = s.rfind('().run()')
if run_pos != -1:
run_pos -= 1
while not s[run_pos].isspace():
run_pos -= 1
i = run_pos - 1
while s[i] == ' ':
i -= 1
if i == run_pos - 1 or _r != []:
if i == run_pos - 1:
s = s.replace('%s().run()' % self._app_class, '')
if 'AppModule' in sys.modules:
del sys.modules['AppModule']
module = imp.new_module('AppModule')
exec s in module.__dict__
sys.modules['AppModule'] = module
return module
module_name = _file[_file.rfind(os.sep) + 1:].replace('.py', '')
if module_name in sys.modules:
del sys.modules[module_name]
module = __import__(module_name, fromlist=_fromlist)
return module
def cleanup(self, stop_watcher=True):
'''To cleanup everything loaded by previous project.
'''
if stop_watcher:
self.proj_watcher.stop()
# Remove all class rules and root rules of previous project
rules = []
try:
rules = Builder.match(self.root_rule.widget)
for _rule in rules:
for _tuple in Builder.rules[:]:
if _tuple[1] == _rule:
Builder.rules.remove(_tuple)
except:
pass
for _tuple in Builder.rules[:]:
for _rule in self.class_rules:
if "<" + _rule.name + ">" == _tuple[1].name:
Builder.rules.remove(_tuple)
if self.root_rule and not self._is_root_already_in_factory and\
hasattr(Factory, self.root_rule.name):
Factory.unregister(self.root_rule.name)
self._app_file = None
self._app_class = None
self._app_module = None
self._app = None
# Remove previous project directories
for _dir in self._dir_list:
try:
sys.path.remove(_dir)
except:
pass
self.kv_file_list = []
self.file_list = []
self._dir_list = []
self.class_rules = []
self.list_comments = []
self.custom_widgets = []
self.dict_file_type_and_path = {}
self.root_rule = None
self._root_rule = None
def get_app(self, reload_app=False):
'''To get the applications app class instance
'''
if not self._app_file or not self._app_class or not self._app_module:
return None
if not reload_app and self._app:
return self._app
for name, obj in inspect.getmembers(self._app_module):
if inspect.isclass(obj) and self._app_class == name:
self._app = obj()
return self._app
# if still couldn't get app, although that shouldn't happen
return None
def reload_from_str(self, root_str):
'''To reload from root_str
'''
rules = []
# Cleaning root rules
try:
rules = Builder.match(self.root_rule.widget)
for _rule in rules:
for _tuple in Builder.rules[:]:
if _tuple[1] == _rule:
Builder.rules.remove(_tuple)
except:
pass
# Cleaning class rules
for _rule in self.class_rules:
for rule in Builder.rules[:]:
if rule[1].name == '<' + _rule.name + '>':
Builder.rules.remove(rule)
break
root_widget = None
# Remove all the 'app' lines
root_str = re.sub(r'.+app+.+', '', root_str)
root_widget = Builder.load_string(root_str)
if not root_widget:
root_widget = self.get_widget_of_class(self.root_rule.name)
self.root_rule.widget = root_widget
if not root_widget:
root_name = root_str[:root_str.find('\n')]
root_name = root_widget.replace(':', '').replace('<', '')
root_name = root_widget.replace('>', '')
root_widget = self.set_root_widget(root_name)
return root_widget
def is_root_a_class_rule(self):
'''Returns True if root rule is a class rule
'''
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
return True
return False
def set_root_widget(self, root_name, widget=None):
'''To set root_name as the root rule.
'''
root_widget = None
if not widget:
root_widget = self.get_widget_of_class(root_name)
else:
root_widget = widget
self.root_rule = RootRule(root_name, root_widget)
for _rule in self.class_rules:
if _rule.name == root_name:
self.root_rule.kv_file = _rule.kv_file
self.root_rule.py_file = _rule.file
break
if not self._root_rule:
self._root_rule = self.root_rule
return root_widget
def get_root_widget(self, new_root=False):
'''To get the root widget of the current project.
'''
if not new_root and self.root_rule and self.root_rule.name != '':
return self.root_rule.widget
if self._app_file is None:
return None
f = open(self._app_file, 'r')
s = f.read()
f.close()
current_app = App.get_running_app()
app = self.get_app(reload_app=True)
root_widget = None
if app is not None:
root_widget = app.build()
if not root_widget:
root_widget = app.root
App._running_app = current_app
if root_widget:
self.root_rule = RootRule(root_widget.__class__.__name__,
root_widget)
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
self.root_rule.kv_file = _rule.kv_file
self.root_rule.file = _rule.file
break
if not self._root_rule:
self._root_rule = self.root_rule
if not self.root_rule.kv_file:
raise ProjectLoaderException("Cannot find root widget's kv file")
return root_widget
def get_widget_of_class(self, class_name):
'''To get instance of the class_name
'''
self.root = getattr(Factory, class_name)()
return self.root
def is_widget_custom(self, widget):
for rule in self.class_rules:
if rule.name == type(widget).__name__:
return True
return False
def record(self):
'''To record all the findings in ./designer/kvproj. These will
be loaded again if project hasn't been modified
outside Kivy Designer
'''
if not os.path.exists(os.path.join(
self.proj_dir, os.path.dirname(KV_PROJ_FILE_NAME))):
os.mkdir(os.path.join(self.proj_dir, ".designer"))
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'w')
f.close()
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'w')
proj_file_str = '<time>\n' + ' ' + str(time.time()) + '\n</time>\n'
proj_file_str += '<files>\n'
for _file in self.file_list:
proj_file_str += ' <file>\n'
proj_file_str += ' ' + _file
proj_file_str += '\n </file>\n'
proj_file_str += '</files>\n'
proj_file_str += '<classes>\n'
for _rule in self.class_rules:
proj_file_str += ' <class>\n'
proj_file_str += ' <name>\n'
proj_file_str += ' ' + _rule.name
proj_file_str += '\n </name>\n'
proj_file_str += ' <file>\n'
proj_file_str += ' ' + _rule.file
proj_file_str += '\n </file>\n'
proj_file_str += '\n </class>\n'
proj_file_str += '</classes>\n'
if self._app_class and self._app_file:
proj_file_str += '<app>\n'
proj_file_str += ' <class>\n'
proj_file_str += ' ' + self._app_class
proj_file_str += '\n </class>\n'
proj_file_str += ' <file>\n'
proj_file_str += ' ' + self._app_file
proj_file_str += '\n </file>\n'
proj_file_str += '</app>\n'
f.write(proj_file_str)
f.close()
| mit |
brianr747/SFC_models | sfc_models/examples/scripts/build_run_all_scripts.py | 1 | 1705 | """
build_run_all_scripts.py
Build the file 'run_all_scripts.py'. Used to validate that all scripts run without crashing.
Currently, no attempt to validate output; that's the job of unit tests.
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def main():
f_in = open('script_list.txt', 'r')
f_out = open('run_all_scripts.py', 'w')
f_out.write('"""\n')
f_out.write('Machine-generated file used to validate that all scripts run without crashing.\n')
f_out.write('Generated by build_run_all_scripts.py\n')
f_out.write('Have a nice day!\n')
f_out.write('"""\n\n\n')
f_out.write('from __future__ import print_function\n')
f_out.write('import sfc_models.examples.Quick2DPlot as extras\n')
f_out.write('extras.plt = None # Prang the plots\n\n\n')
for fname in f_in:
fname = fname.strip()
if not fname.endswith('.py'):
raise ValueError('Bad file in script_list.txt')
fname = fname[:-3]
f_out.write('import {0}\n'.format(fname))
f_out.write('if "main" in dir({0}):\n'.format(fname))
f_out.write(' {0}.main()\n'.format(fname))
f_out.write('\n\n')
if __name__=='__main__':
main() | apache-2.0 |
glmcdona/meddle | examples/base/Lib/lib2to3/fixes/fix_types.py | 304 | 1806 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
| mit |
lgp171188/fjord | vendor/packages/urllib3/dummyserver/server.py | 8 | 6066 | #!/usr/bin/env python
"""
Dummy server used for unit testing.
"""
from __future__ import print_function
import errno
import logging
import os
import random
import string
import sys
import threading
import socket
from tornado.platform.auto import set_close_exec
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
import tornado.web
log = logging.getLogger(__name__)
CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')
DEFAULT_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.crt'),
'keyfile': os.path.join(CERTS_PATH, 'server.key'),
}
DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')
DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')
# Different types of servers we have:
class SocketServerThread(threading.Thread):
"""
:param socket_handler: Callable which receives a socket argument for one
request.
:param ready_event: Event which gets set when the socket handler is
ready to receive requests.
"""
def __init__(self, socket_handler, host='localhost', port=8081,
ready_event=None):
threading.Thread.__init__(self)
self.socket_handler = socket_handler
self.host = host
self.ready_event = ready_event
def _start_server(self):
sock = socket.socket(socket.AF_INET6)
if sys.platform != 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, 0))
self.port = sock.getsockname()[1]
# Once listen() returns, the server socket is ready
sock.listen(1)
if self.ready_event:
self.ready_event.set()
self.socket_handler(sock)
sock.close()
def run(self):
self.server = self._start_server()
# FIXME: there is a pull request patching bind_sockets in Tornado directly.
# If it gets merged and released we can drop this and use
# `tornado.netutil.bind_sockets` again.
# https://github.com/facebook/tornado/pull/977
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128,
flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
binded_port = None
for res in set(socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0, flags)):
af, socktype, proto, canonname, sockaddr = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if e.args[0] == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and binded_port is not None:
sockaddr = tuple([host, binded_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
binded_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
def run_tornado_app(app, io_loop, certs, scheme, host):
if scheme == 'https':
http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs,
io_loop=io_loop)
else:
http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)
sockets = bind_sockets(None, address=host)
port = sockets[0].getsockname()[1]
http_server.add_sockets(sockets)
return http_server, port
def run_loop_in_thread(io_loop):
t = threading.Thread(target=io_loop.start)
t.start()
return t
def get_unreachable_address():
while True:
host = ''.join(random.choice(string.ascii_lowercase)
for _ in range(60))
sockaddr = (host, 54321)
# check if we are really "lucky" and hit an actual server
try:
s = socket.create_connection(sockaddr)
except socket.error:
return sockaddr
else:
s.close()
| bsd-3-clause |
liberorbis/libernext | env/lib/python2.7/site-packages/IPython/kernel/tests/test_connect.py | 7 | 4358 | """Tests for kernel connection utilities
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
import os
import nose.tools as nt
from IPython.config import Config
from IPython.consoleapp import IPythonConsoleApp
from IPython.core.application import BaseIPythonApplication
from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
from IPython.utils.py3compat import str_to_bytes
from IPython.kernel import connect
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DummyConsoleApp(BaseIPythonApplication, IPythonConsoleApp):
def initialize(self, argv=[]):
BaseIPythonApplication.initialize(self, argv=argv)
self.init_connection_file()
sample_info = dict(ip='1.2.3.4', transport='ipc',
shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5,
key=b'abc123', signature_scheme='hmac-md5',
)
def test_write_connection_file():
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
nt.assert_true(os.path.exists(cf))
with open(cf, 'r') as f:
info = json.load(f)
info['key'] = str_to_bytes(info['key'])
nt.assert_equal(info, sample_info)
def test_app_load_connection_file():
"""test `ipython console --existing` loads a connection file"""
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
app = DummyConsoleApp(connection_file=cf)
app.initialize(argv=[])
for attr, expected in sample_info.items():
if attr in ('key', 'signature_scheme'):
continue
value = getattr(app, attr)
nt.assert_equal(value, expected, "app.%s = %s != %s" % (attr, value, expected))
def test_get_connection_file():
cfg = Config()
with TemporaryWorkingDirectory() as d:
cfg.ProfileDir.location = d
cf = 'kernel.json'
app = DummyConsoleApp(config=cfg, connection_file=cf)
app.initialize(argv=[])
profile_cf = os.path.join(app.profile_dir.location, 'security', cf)
nt.assert_equal(profile_cf, app.connection_file)
with open(profile_cf, 'w') as f:
f.write("{}")
nt.assert_true(os.path.exists(profile_cf))
nt.assert_equal(connect.get_connection_file(app), profile_cf)
app.connection_file = cf
nt.assert_equal(connect.get_connection_file(app), profile_cf)
def test_find_connection_file():
cfg = Config()
with TemporaryDirectory() as d:
cfg.ProfileDir.location = d
cf = 'kernel.json'
app = DummyConsoleApp(config=cfg, connection_file=cf)
app.initialize(argv=[])
BaseIPythonApplication._instance = app
profile_cf = os.path.join(app.profile_dir.location, 'security', cf)
with open(profile_cf, 'w') as f:
f.write("{}")
for query in (
'kernel.json',
'kern*',
'*ernel*',
'k*',
):
nt.assert_equal(connect.find_connection_file(query), profile_cf)
BaseIPythonApplication._instance = None
def test_get_connection_info():
with TemporaryDirectory() as d:
cf = os.path.join(d, 'kernel.json')
connect.write_connection_file(cf, **sample_info)
json_info = connect.get_connection_info(cf)
info = connect.get_connection_info(cf, unpack=True)
nt.assert_equal(type(json_info), type(""))
nt.assert_equal(info, sample_info)
info2 = json.loads(json_info)
info2['key'] = str_to_bytes(info2['key'])
nt.assert_equal(info2, sample_info)
| gpl-2.0 |
ibis-project/ibis-bigquery | ibis_bigquery/datatypes.py | 1 | 3584 | import ibis.expr.datatypes as dt
from multipledispatch import Dispatcher
class TypeTranslationContext:
"""A tag class to allow alteration of the way a particular type is
translated.
Notes
-----
This is used to raise an exception when INT64 types are encountered to
avoid suprising results due to BigQuery's handling of INT64 types in
JavaScript UDFs.
"""
__slots__ = ()
class UDFContext(TypeTranslationContext):
__slots__ = ()
ibis_type_to_bigquery_type = Dispatcher('ibis_type_to_bigquery_type')
@ibis_type_to_bigquery_type.register(str)
def trans_string_default(datatype):
return ibis_type_to_bigquery_type(dt.dtype(datatype))
@ibis_type_to_bigquery_type.register(dt.DataType)
def trans_default(t):
return ibis_type_to_bigquery_type(t, TypeTranslationContext())
@ibis_type_to_bigquery_type.register(str, TypeTranslationContext)
def trans_string_context(datatype, context):
return ibis_type_to_bigquery_type(dt.dtype(datatype), context)
@ibis_type_to_bigquery_type.register(dt.Floating, TypeTranslationContext)
def trans_float64(t, context):
return 'FLOAT64'
@ibis_type_to_bigquery_type.register(dt.Integer, TypeTranslationContext)
def trans_integer(t, context):
return 'INT64'
@ibis_type_to_bigquery_type.register(dt.Binary, TypeTranslationContext)
def trans_binary(t, context):
return 'BYTES'
@ibis_type_to_bigquery_type.register(
dt.UInt64, (TypeTranslationContext, UDFContext)
)
def trans_lossy_integer(t, context):
raise TypeError(
'Conversion from uint64 to BigQuery integer type (int64) is lossy'
)
@ibis_type_to_bigquery_type.register(dt.Array, TypeTranslationContext)
def trans_array(t, context):
return 'ARRAY<{}>'.format(
ibis_type_to_bigquery_type(t.value_type, context)
)
@ibis_type_to_bigquery_type.register(dt.Struct, TypeTranslationContext)
def trans_struct(t, context):
return 'STRUCT<{}>'.format(
', '.join(
'{} {}'.format(
name, ibis_type_to_bigquery_type(dt.dtype(type), context)
)
for name, type in zip(t.names, t.types)
)
)
@ibis_type_to_bigquery_type.register(dt.Date, TypeTranslationContext)
def trans_date(t, context):
return 'DATE'
@ibis_type_to_bigquery_type.register(dt.Timestamp, TypeTranslationContext)
def trans_timestamp(t, context):
if t.timezone is not None:
raise TypeError('BigQuery does not support timestamps with timezones')
return 'TIMESTAMP'
@ibis_type_to_bigquery_type.register(dt.DataType, TypeTranslationContext)
def trans_type(t, context):
return str(t).upper()
@ibis_type_to_bigquery_type.register(dt.Integer, UDFContext)
def trans_integer_udf(t, context):
# JavaScript does not have integers, only a Number class. BigQuery doesn't
# behave as expected with INT64 inputs or outputs
raise TypeError(
'BigQuery does not support INT64 as an argument type or a return type '
'for UDFs. Replace INT64 with FLOAT64 in your UDF signature and '
'cast all INT64 inputs to FLOAT64.'
)
@ibis_type_to_bigquery_type.register(dt.Decimal, TypeTranslationContext)
def trans_numeric(t, context):
if (t.precision, t.scale) != (38, 9):
raise TypeError(
'BigQuery only supports decimal types with precision of 38 and '
'scale of 9'
)
return 'NUMERIC'
@ibis_type_to_bigquery_type.register(dt.Decimal, TypeTranslationContext)
def trans_numeric_udf(t, context):
raise TypeError('Decimal types are not supported in BigQuery UDFs')
| apache-2.0 |
makerbot/ReplicatorG | skein_engines/skeinforge-47/fabmetheus_utilities/archive.py | 10 | 14441 | """
Boolean geometry utilities.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
import os
import sys
import traceback
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalTemporarySettingsPath = os.path.join(os.path.expanduser('~'), '.skeinforge')
def addToNamePathDictionary(directoryPath, namePathDictionary):
'Add to the name path dictionary.'
pluginFileNames = getPluginFileNamesFromDirectoryPath(directoryPath)
for pluginFileName in pluginFileNames:
namePathDictionary[pluginFileName.replace('_', '')] = os.path.join(directoryPath, pluginFileName)
def getAbsoluteFolderPath(filePath, folderName=''):
'Get the absolute folder path.'
absoluteFolderPath = os.path.dirname(os.path.abspath(filePath))
if folderName == '':
return absoluteFolderPath
return os.path.join(absoluteFolderPath, folderName)
def getAbsoluteFrozenFolderPath(filePath, folderName=''):
'Get the absolute frozen folder path.'
if hasattr(sys, 'frozen'):
if '.py' in filePath:
filePath = ''.join(filePath.rpartition('\\')[: 2])
filePath = os.path.join(filePath, 'skeinforge_application')
return getAbsoluteFolderPath(filePath, folderName)
def getAnalyzePluginsDirectoryPath(subName=''):
'Get the analyze plugins directory path.'
return getJoinedPath(getSkeinforgePluginsPath('analyze_plugins'), subName)
def getCraftPluginsDirectoryPath(subName=''):
'Get the craft plugins directory path.'
return getJoinedPath(getSkeinforgePluginsPath('craft_plugins'), subName)
def getDocumentationPath(subName=''):
'Get the documentation file path.'
return getJoinedPath(getFabmetheusPath('documentation'), subName)
def getElementsPath(subName=''):
'Get the evaluate_elements directory path.'
return getJoinedPath(getGeometryUtilitiesPath('evaluate_elements'), subName)
def getEndsWithList(word, wordEndings):
'Determine if the word ends with a list.'
for wordEnding in wordEndings:
if word.endswith(wordEnding):
return True
return False
def getFabmetheusPath(subName=''):
'Get the fabmetheus directory path.'
fabmetheusFile = None
if hasattr(sys, 'frozen'):
fabmetheusFile = unicode(sys.executable, sys.getfilesystemencoding())
else:
fabmetheusFile = os.path.dirname(os.path.abspath(__file__))
return getJoinedPath(os.path.dirname(fabmetheusFile), subName)
def getFabmetheusToolsPath(subName=''):
'Get the fabmetheus tools directory path.'
return getJoinedPath(getFabmetheusUtilitiesPath('fabmetheus_tools'), subName)
def getFabmetheusUtilitiesPath(subName=''):
'Get the fabmetheus utilities directory path.'
return getJoinedPath(getFabmetheusPath('fabmetheus_utilities'), subName)
def getFileNamesByFilePaths(pluginFilePaths):
'Get the file names of the plugins by the file paths.'
fileNames = []
for pluginFilePath in pluginFilePaths:
pluginBasename = os.path.basename(pluginFilePath)
pluginBasename = getUntilDot(pluginBasename)
fileNames.append(pluginBasename)
return fileNames
def getFilePaths(fileInDirectory=''):
'Get the file paths in the directory of the file in directory.'
directoryName = os.getcwd()
if fileInDirectory != '':
directoryName = os.path.dirname(fileInDirectory)
return getFilePathsByDirectory(directoryName)
def getFilePathsByDirectory(directoryName):
'Get the file paths in the directory of the file in directory.'
absoluteDirectoryPath = os.path.abspath(directoryName)
directory = os.listdir(directoryName)
filePaths = []
for fileName in directory:
filePaths.append(os.path.join(absoluteDirectoryPath, fileName))
return filePaths
def getFilePathsRecursively(fileInDirectory=''):
'Get the file paths in the directory of the file in directory.'
filePaths = getFilePaths(fileInDirectory)
filePathsRecursively = filePaths[:]
for filePath in filePaths:
if os.path.isdir(filePath):
directory = os.listdir(filePath)
if len(directory) > 0:
filePathsRecursively += getFilePathsRecursively(os.path.join(filePath, directory[0]))
return filePathsRecursively
def getFilePathWithUnderscoredBasename(fileName, suffix):
'Get the file path with all spaces in the basename replaced with underscores.'
suffixFileName = getUntilDot(fileName) + suffix
suffixDirectoryName = os.path.dirname(suffixFileName)
suffixReplacedBaseName = os.path.basename(suffixFileName).replace(' ', '_')
return os.path.join(suffixDirectoryName, suffixReplacedBaseName)
def getFilesWithFileTypesWithoutWords(fileTypes, words = [], fileInDirectory=''):
'Get files which have a given file type, but with do not contain a word in a list.'
filesWithFileTypes = []
for filePath in getFilePaths(fileInDirectory):
for fileType in fileTypes:
if isFileWithFileTypeWithoutWords(fileType, filePath, words):
filesWithFileTypes.append(filePath)
filesWithFileTypes.sort()
return filesWithFileTypes
def getFilesWithFileTypesWithoutWordsRecursively(fileTypes, words = [], fileInDirectory=''):
'Get files recursively which have a given file type, but with do not contain a word in a list.'
filesWithFileTypesRecursively = []
for filePath in getFilePathsRecursively(fileInDirectory):
for fileType in fileTypes:
if isFileWithFileTypeWithoutWords(fileType, filePath, words):
filesWithFileTypesRecursively.append(filePath)
filesWithFileTypesRecursively.sort()
return filesWithFileTypesRecursively
def getFilesWithFileTypeWithoutWords(fileType, words = [], fileInDirectory=''):
'Get files which have a given file type, but with do not contain a word in a list.'
filesWithFileType = []
for filePath in getFilePaths(fileInDirectory):
if isFileWithFileTypeWithoutWords(fileType, filePath, words):
filesWithFileType.append(filePath)
filesWithFileType.sort()
return filesWithFileType
def getFileText(fileName, printWarning=True, readMode='r'):
'Get the entire text of a file.'
try:
file = open(fileName, readMode)
fileText = file.read()
file.close()
return fileText
except IOError:
if printWarning:
print('The file ' + fileName + ' does not exist.')
return ''
def getFileTextInFileDirectory(fileInDirectory, fileName, readMode='r'):
'Get the entire text of a file in the directory of the file in directory.'
absoluteFilePathInFileDirectory = os.path.join(os.path.dirname(fileInDirectory), fileName)
return getFileText(absoluteFilePathInFileDirectory, True, readMode)
def getFundamentalsPath(subName=''):
'Get the evaluate_fundamentals directory path.'
return getJoinedPath(getGeometryUtilitiesPath('evaluate_fundamentals'), subName)
def getGeometryDictionary(folderName):
'Get to the geometry name path dictionary.'
geometryDictionary={}
geometryDirectory = getGeometryPath()
addToNamePathDictionary(os.path.join(geometryDirectory, folderName), geometryDictionary)
geometryPluginsDirectory = getFabmetheusUtilitiesPath('geometry_plugins')
addToNamePathDictionary(os.path.join(geometryPluginsDirectory, folderName), geometryDictionary)
return geometryDictionary
def getGeometryPath(subName=''):
'Get the geometry directory path.'
return getJoinedPath(getFabmetheusUtilitiesPath('geometry'), subName)
def getGeometryToolsPath(subName=''):
'Get the geometry tools directory path.'
return getJoinedPath(getGeometryPath('geometry_tools'), subName)
def getGeometryUtilitiesPath(subName=''):
'Get the geometry_utilities directory path.'
return getJoinedPath(getGeometryPath('geometry_utilities'), subName)
def getInterpretPluginsPath(subName=''):
'Get the interpret plugins directory path.'
return getJoinedPath(getFabmetheusToolsPath('interpret_plugins'), subName)
def getJoinedPath(path, subName=''):
'Get the joined file path.'
if subName == '':
return path
return os.path.join(path, subName)
def getModuleWithDirectoryPath(directoryPath, fileName):
'Get the module from the fileName and folder name.'
if fileName == '':
print('The file name in getModule in archive was empty.')
return None
originalSystemPath = sys.path[:]
try:
sys.path.insert(0, directoryPath)
folderPluginsModule = __import__(fileName)
sys.path = originalSystemPath
return folderPluginsModule
except:
sys.path = originalSystemPath
print('')
print('Exception traceback in getModuleWithDirectoryPath in archive:')
traceback.print_exc(file=sys.stdout)
print('')
print('That error means; could not import a module with the fileName ' + fileName)
print('and an absolute directory name of ' + directoryPath)
print('')
return None
def getModuleWithPath(path):
'Get the module from the path.'
return getModuleWithDirectoryPath(os.path.dirname(path), os.path.basename(path))
def getPluginFileNamesFromDirectoryPath(directoryPath):
'Get the file names of the python plugins in the directory path.'
fileInDirectory = os.path.join(directoryPath, '__init__.py')
return getFileNamesByFilePaths(getPythonFileNamesExceptInit(fileInDirectory))
def getProfilesPath(subName=''):
'Get the profiles directory path, which is the settings directory joined with profiles.'
return getJoinedPath(getSettingsPath('profiles'), subName)
def getPythonDirectoryNames(directoryName):
'Get the python directories.'
pythonDirectoryNames = []
directory = os.listdir(directoryName)
for fileName in directory:
subdirectoryName = os.path.join(directoryName, fileName)
if os.path.isdir(subdirectoryName):
if os.path.isfile(os.path.join(subdirectoryName, '__init__.py')):
pythonDirectoryNames.append(subdirectoryName)
return pythonDirectoryNames
def getPythonDirectoryNamesRecursively(directoryName=''):
'Get the python directories recursively.'
recursivePythonDirectoryNames = []
if directoryName == '':
directoryName = os.getcwd()
if os.path.isfile(os.path.join(directoryName, '__init__.py')):
recursivePythonDirectoryNames.append(directoryName)
pythonDirectoryNames = getPythonDirectoryNames(directoryName)
for pythonDirectoryName in pythonDirectoryNames:
recursivePythonDirectoryNames += getPythonDirectoryNamesRecursively(pythonDirectoryName)
else:
return []
return recursivePythonDirectoryNames
def getPythonFileNamesExceptInit(fileInDirectory=''):
'Get the python fileNames of the directory which the fileInDirectory is in, except for the __init__.py file.'
pythonFileNamesExceptInit = getFilesWithFileTypeWithoutWords('py', ['__init__.py'], fileInDirectory)
pythonFileNamesExceptInit.sort()
return pythonFileNamesExceptInit
def getPythonFileNamesExceptInitRecursively(directoryName=''):
'Get the python fileNames of the directory recursively, except for the __init__.py files.'
pythonDirectoryNames = getPythonDirectoryNamesRecursively(directoryName)
pythonFileNamesExceptInitRecursively = []
for pythonDirectoryName in pythonDirectoryNames:
pythonFileNamesExceptInitRecursively += getPythonFileNamesExceptInit(os.path.join(pythonDirectoryName, '__init__.py'))
pythonFileNamesExceptInitRecursively.sort()
return pythonFileNamesExceptInitRecursively
def getSettingsPath(subName=''):
'Get the settings directory path, which is the home directory joined with .skeinforge.'
global globalTemporarySettingsPath
return getJoinedPath(globalTemporarySettingsPath, subName)
def getSkeinforgePath(subName=''):
'Get the skeinforge directory path.'
return getJoinedPath(getFabmetheusPath('skeinforge_application'), subName)
def getSkeinforgePluginsPath(subName=''):
'Get the skeinforge plugins directory path.'
return getJoinedPath(getSkeinforgePath('skeinforge_plugins'), subName)
def getSummarizedFileName(fileName):
'Get the fileName basename if the file is in the current working directory, otherwise return the original full name.'
if os.getcwd() == os.path.dirname(fileName):
return os.path.basename(fileName)
return fileName
def getTemplatesPath(subName=''):
'Get the templates directory path.'
return getJoinedPath(getFabmetheusUtilitiesPath('templates'), subName)
def getTextIfEmpty(fileName, text):
'Get the text from a file if it the text is empty.'
if text != '':
return text
return getFileText(fileName)
def getTextLines(text):
'Get the all the lines of text of a text.'
if '\r' in text:
text = text.replace('\r', '\n').replace('\n\n', '\n')
textLines = text.split('\n')
if len(textLines) == 1:
if textLines[0] == '':
return []
return textLines
def getUntilDot(text):
'Get the text until the last dot, if any.'
dotIndex = text.rfind('.')
if dotIndex < 0:
return text
return text[: dotIndex]
def getVersionFileName():
'Get the file name of the version date.getFabmetheusUtilitiesPath(subName='')'
return getFabmetheusUtilitiesPath('version.txt')
def isFileWithFileTypeWithoutWords(fileType, fileName, words):
'Determine if file has a given file type, but with does not contain a word in a list.'
fileName = os.path.basename(fileName)
fileTypeDot = '.' + fileType
if not fileName.endswith(fileTypeDot):
return False
for word in words:
if fileName.find(word) >= 0:
return False
return True
def makeDirectory(directoryPath):
'Make a directory if it does not already exist.'
if os.path.isdir(directoryPath):
return
try:
print('The following directory was made:')
print(os.path.abspath(directoryPath))
os.makedirs(directoryPath)
except OSError:
print('Skeinforge can not make the directory %s so give it read/write permission for that directory and the containing directory.' % directoryPath)
def removeBackupFilesByType(fileType):
'Remove backup files by type.'
backupFilePaths = getFilesWithFileTypesWithoutWordsRecursively([fileType + '~'])
for backupFilePath in backupFilePaths:
os.remove(backupFilePath)
def removeBackupFilesByTypes(fileTypes):
'Remove backup files by types.'
for fileType in fileTypes:
removeBackupFilesByType(fileType)
def writeFileMessageEnd(end, fileName, fileText, message):
'Write to a fileName with a suffix and print a message.'
suffixFileName = getUntilDot(fileName) + end
writeFileText(suffixFileName, fileText)
print(message + getSummarizedFileName(suffixFileName))
def writeFileText(fileName, fileText, writeMode='w+'):
'Write a text to a file.'
try:
file = open(fileName, writeMode)
file.write(fileText)
file.close()
except IOError:
print('The file ' + fileName + ' can not be written to.')
| gpl-2.0 |
Argon-Zhou/django | tests/generic_inline_admin/tests.py | 154 | 22749 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(
DEBUG=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls",
)
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name='foo')
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk]))
self.assertContains(response, 'Are you sure you want to delete')
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
| bsd-3-clause |
ClearCorp/management-system | mgmtsystem_survey/__openerp__.py | 3 | 1560 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Management System - Survey",
"version": "8.0.1.0.0",
"author": "Savoir-faire Linux,Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"category": "Management System",
"description": """\
This module enables you to manage your satisfaction surveys and its answers.
""",
"depends": [
'mgmtsystem',
'survey'
],
"data": [
'data/survey_stage.xml',
'views/survey_survey.xml',
],
"demo": [
],
'installable': True,
}
| agpl-3.0 |
ddboline/pylearn2 | pylearn2/models/tests/test_s3c_misc.py | 44 | 15100 | from __future__ import print_function
from theano.sandbox.linalg.ops import alloc_diag
from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import SufficientStatistics
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.utils import as_floatX
from theano import function
import numpy as np
import theano.tensor as T
from theano import config
class TestS3C_Misc:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c these
# tests currently fail with float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model and learns on the data
creates an expression for the log likelihood of the data
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
if config.mode in ["DebugMode", "DEBUG_MODE"]:
X = np.random.RandomState([1, 2, 3]).randn(30, 108)
m, D = X.shape
N = 10
else:
X = np.random.RandomState([1, 2, 3]).randn(1000, 108)
m, D = X.shape
N = 300
self.model = S3C(nvis = D,
nhid = N,
irange = .5,
init_bias_hid = -.1,
init_B = 1.,
min_B = 1e-8,
max_B = 1e8,
tied_B = 1,
e_step = E_Step_Scan(
h_new_coeff_schedule = [ .01 ]
),
init_alpha = 1.,
min_alpha = 1e-8, max_alpha = 1e8,
init_mu = 1.,
m_step = Grad_M_Step( learning_rate = 1.0 ),
)
self.orig_params = self.model.get_param_values()
model = self.model
self.mf_obs = model.e_step.infer(X)
self.stats = SufficientStatistics.from_observations(needed_stats =
model.m_step.needed_stats(), V =X,
** self.mf_obs)
self.prob = self.model.expected_log_prob_vhs( self.stats , H_hat = self.mf_obs['H_hat'], S_hat = self.mf_obs['S_hat'])
self.X = X
self.m = m
self.D = D
self.N = N
finally:
config.floatX = self.prev_floatX
def test_expected_log_prob_vhs_batch_match(self):
""" verifies that expected_log_prob_vhs = mean(expected_log_prob_vhs_batch)
expected_log_prob_vhs_batch is implemented in terms of expected_energy_vhs
so this verifies that as well """
scalar = self.model.expected_log_prob_vhs( stats = self.stats, H_hat = self.mf_obs['H_hat'], S_hat = self.mf_obs['S_hat'])
batch = self.model.expected_log_prob_vhs_batch( V = self.X, H_hat = self.mf_obs['H_hat'], S_hat = self.mf_obs['S_hat'], var_s0_hat = self.mf_obs['var_s0_hat'], var_s1_hat = self.mf_obs['var_s1_hat'])
f = function([], [scalar, batch] )
res1, res2 = f()
res2 = res2.mean(dtype='float64')
print(res1, res2)
assert np.allclose(res1, res2)
def test_grad_alpha(self):
"""tests that the gradient of the log probability with respect to alpha
matches my analytical derivation """
#self.model.set_param_values(self.new_params)
g = T.grad(self.prob, self.model.alpha, consider_constant = self.mf_obs.values())
mu = self.model.mu
alpha = self.model.alpha
half = as_floatX(.5)
mean_sq_s = self.stats.d['mean_sq_s']
mean_hs = self.stats.d['mean_hs']
mean_h = self.stats.d['mean_h']
term1 = - half * mean_sq_s
term2 = mu * mean_hs
term3 = - half * T.sqr(mu) * mean_h
term4 = half / alpha
analytical = term1 + term2 + term3 + term4
f = function([],(g,analytical))
gv, av = f()
assert gv.shape == av.shape
max_diff = np.abs(gv-av).max()
if max_diff > self.tol:
print("gv")
print(gv)
print("av")
print(av)
raise Exception("analytical gradient on alpha deviates from theano gradient on alpha by up to "+str(max_diff))
def test_grad_W(self):
"""tests that the gradient of the log probability with respect to W
matches my analytical derivation """
#self.model.set_param_values(self.new_params)
g = T.grad(self.prob, self.model.W, consider_constant = self.mf_obs.values())
B = self.model.B
W = self.model.W
mean_hsv = self.stats.d['mean_hsv']
mean_sq_hs = self.stats.d['mean_sq_hs']
mean_HS = self.mf_obs['H_hat'] * self.mf_obs['S_hat']
m = mean_HS.shape[0]
outer_prod = T.dot(mean_HS.T,mean_HS)
outer_prod.name = 'outer_prod<from_observations>'
outer = outer_prod/m
mask = T.identity_like(outer)
second_hs = (1.-mask) * outer + alloc_diag(mean_sq_hs)
term1 = (B * mean_hsv).T
term2 = - B.dimshuffle(0,'x') * T.dot(W, second_hs)
analytical = term1 + term2
f = function([],(g,analytical))
gv, av = f()
assert gv.shape == av.shape
max_diff = np.abs(gv-av).max()
if max_diff > self.tol:
print("gv")
print(gv)
print("av")
print(av)
raise Exception("analytical gradient on W deviates from theano gradient on W by up to "+str(max_diff))
def test_d_kl_d_h(self):
"tests that the gradient of the kl with respect to h matches my analytical version of it "
model = self.model
ip = self.model.e_step
X = self.X
assert X.shape[0] == self.m
H = np.cast[config.floatX](self.model.rng.uniform(0.001,.999,(self.m, self.N)))
S = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,(self.m, self.N)))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
S_var = T.matrix(name='S_var')
S_var.tag.test_value = S
sigma0 = ip.infer_var_s0_hat()
Sigma1 = ip.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
trunc_kl = ip.truncated_KL( V = X, obs = { 'H_hat' : H_var,
'S_hat' : S_var,
'var_s0_hat' : sigma0,
'var_s1_hat' : Sigma1 } ).sum()
assert len(trunc_kl.type.broadcastable) == 0
grad_H = T.grad(trunc_kl, H_var)
grad_func = function([H_var, S_var], grad_H)
grad_theano = grad_func(H,S)
half = as_floatX(0.5)
one = as_floatX(1.)
two = as_floatX(2.)
pi = as_floatX(np.pi)
e = as_floatX(np.e)
mu = self.model.mu
alpha = self.model.alpha
W = self.model.W
B = self.model.B
w = self.model.w
term1 = T.log(H_var)
term2 = -T.log(one - H_var)
term3 = - half * T.log( Sigma1 * two * pi * e )
term4 = half * T.log(sigma0 * two * pi * e )
term5 = - self.model.bias_hid
term6 = half * ( - sigma0 + Sigma1 + T.sqr(S_var) )
term7 = - mu * alpha * S_var
term8 = half * T.sqr(mu) * alpha
term9 = - T.dot(X * self.model.B, self.model.W) * S_var
term10 = S_var * T.dot(T.dot(H_var * S_var, W.T * B),W)
term11 = - w * T.sqr(S_var) * H_var
term12 = half * (Sigma1 + T.sqr(S_var)) * T.dot(B,T.sqr(W))
analytical = term1 + term2 + term3 + term4 + term5 + term6 + term7 + term8 + term9 + term10 + term11 + term12
grad_analytical = function([H_var, S_var], analytical)(H,S)
if not np.allclose(grad_theano, grad_analytical):
print('grad theano: ',(grad_theano.min(), grad_theano.mean(), grad_theano.max()))
print('grad analytical: ',(grad_analytical.min(), grad_analytical.mean(), grad_analytical.max()))
ad = np.abs(grad_theano-grad_analytical)
print('abs diff: ',(ad.min(),ad.mean(),ad.max()))
assert False
def test_d_negent_d_h(self):
"tests that the gradient of the negative entropy with respect to h matches my analytical version of it "
model = self.model
ip = self.model.e_step
X = self.X
assert X.shape[0] == self.m
H = np.cast[config.floatX](self.model.rng.uniform(0.001,.999,(self.m, self.N)))
S = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,(self.m, self.N)))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
S_var = T.matrix(name='S_var')
S_var.tag.test_value = S
sigma0 = ip.infer_var_s0_hat()
Sigma1 = ip.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
negent = - self.model.entropy_hs( H_hat = H_var,
var_s0_hat = sigma0,
var_s1_hat = Sigma1 ).sum()
assert len(negent.type.broadcastable) == 0
grad_H = T.grad(negent, H_var)
grad_func = function([H_var, S_var], grad_H, on_unused_input = 'ignore' )
grad_theano = grad_func(H,S)
half = as_floatX(0.5)
one = as_floatX(1.)
two = as_floatX(2.)
pi = as_floatX(np.pi)
e = as_floatX(np.e)
mu = self.model.mu
alpha = self.model.alpha
W = self.model.W
B = self.model.B
w = self.model.w
term1 = T.log(H_var)
term2 = -T.log(one - H_var)
term3 = - half * T.log( Sigma1 * two * pi * e )
term4 = half * T.log( sigma0 * two * pi * e )
analytical = term1 + term2 + term3 + term4
grad_analytical = function([H_var, S_var], analytical, on_unused_input = 'ignore')(H,S)
if not np.allclose(grad_theano, grad_analytical):
print('grad theano: ',(grad_theano.min(), grad_theano.mean(), grad_theano.max()))
print('grad analytical: ',(grad_analytical.min(), grad_analytical.mean(), grad_analytical.max()))
ad = np.abs(grad_theano-grad_analytical)
print('abs diff: ',(ad.min(),ad.mean(),ad.max()))
assert False
def test_d_negent_h_d_h(self):
"tests that the gradient of the negative entropy of h with respect to \hat{h} matches my analytical version of it "
model = self.model
ip = self.model.e_step
X = self.X
assert X.shape[0] == self.m
H = np.cast[config.floatX](self.model.rng.uniform(0.001,.999,(self.m, self.N)))
S = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,(self.m, self.N)))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
S_var = T.matrix(name='S_var')
S_var.tag.test_value = S
sigma0 = ip.infer_var_s0_hat()
Sigma1 = ip.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
negent = - self.model.entropy_h( H_hat = H_var ).sum()
assert len(negent.type.broadcastable) == 0
grad_H = T.grad(negent, H_var)
grad_func = function([H_var, S_var], grad_H, on_unused_input = 'ignore')
grad_theano = grad_func(H,S)
half = as_floatX(0.5)
one = as_floatX(1.)
two = as_floatX(2.)
pi = as_floatX(np.pi)
e = as_floatX(np.e)
mu = self.model.mu
alpha = self.model.alpha
W = self.model.W
B = self.model.B
w = self.model.w
term1 = T.log(H_var)
term2 = -T.log(one - H_var)
analytical = term1 + term2
grad_analytical = function([H_var, S_var], analytical, on_unused_input = 'ignore')(H,S)
if not np.allclose(grad_theano, grad_analytical):
print('grad theano: ',(grad_theano.min(), grad_theano.mean(), grad_theano.max()))
print('grad analytical: ',(grad_analytical.min(), grad_analytical.mean(), grad_analytical.max()))
ad = np.abs(grad_theano-grad_analytical)
print('abs diff: ',(ad.min(),ad.mean(),ad.max()))
assert False
def test_d_ee_d_h(self):
"tests that the gradient of the expected energy with respect to h matches my analytical version of it "
model = self.model
ip = self.model.e_step
X = self.X
assert X.shape[0] == self.m
H = np.cast[config.floatX](self.model.rng.uniform(0.001,.999,(self.m, self.N)))
S = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,(self.m, self.N)))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
S_var = T.matrix(name='S_var')
S_var.tag.test_value = S
sigma0 = ip.infer_var_s0_hat()
Sigma1 = ip.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
ee = self.model.expected_energy_vhs( V = X, H_hat = H_var,
S_hat = S_var,
var_s0_hat = sigma0,
var_s1_hat = Sigma1 ).sum()
assert len(ee.type.broadcastable) == 0
grad_H = T.grad(ee, H_var)
grad_func = function([H_var, S_var], grad_H)
grad_theano = grad_func(H,S)
half = as_floatX(0.5)
one = as_floatX(1.)
two = as_floatX(2.)
pi = as_floatX(np.pi)
e = as_floatX(np.e)
mu = self.model.mu
alpha = self.model.alpha
W = self.model.W
B = self.model.B
w = self.model.w
term1 = - self.model.bias_hid
term2 = half * ( - sigma0 + Sigma1 + T.sqr(S_var) )
term3 = - mu * alpha * S_var
term4 = half * T.sqr(mu) * alpha
term5 = - T.dot(X * self.model.B, self.model.W) * S_var
term6 = S_var * T.dot(T.dot(H_var * S_var, W.T * B),W)
term7 = - w * T.sqr(S_var) * H_var
term8 = half * (Sigma1 + T.sqr(S_var)) * T.dot(B,T.sqr(W))
analytical = term1 + term2 + term3 + term4 + term5 + term6 + term7 + term8
grad_analytical = function([H_var, S_var], analytical, on_unused_input = 'ignore')(H,S)
if not np.allclose(grad_theano, grad_analytical):
print('grad theano: ',(grad_theano.min(), grad_theano.mean(), grad_theano.max()))
print('grad analytical: ',(grad_analytical.min(), grad_analytical.mean(), grad_analytical.max()))
ad = np.abs(grad_theano-grad_analytical)
print('abs diff: ',(ad.min(),ad.mean(),ad.max()))
assert False
if __name__ == '__main__':
obj = TestS3C_Misc()
obj.test_d_ee_d_h()
| bsd-3-clause |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/tpu/models/official/mobilenet/mobilenet_model.py | 5 | 20505 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# This file is a copy of the model available via the tensorflow/models repo:
#
# https://raw.githubusercontent.com/tensorflow/models/master/research/slim/nets/mobilenet_v1.py
"""MobileNet v1.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672
MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112
MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336
MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112
MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672
MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224
MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168
MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112
MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336
MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224
MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584
MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112
MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792
MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112
MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584
MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224
--------------------------------------------------------------------------------
Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736
MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592
MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368
MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592
MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736
MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184
MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184
MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592
MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368
MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184
MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592
MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592
MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296
MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592
MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592
MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184
--------------------------------------------------------------------------------
Total: 1,800,144 106,002,432
"""
# Tensorflow mandates these.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
# Standard Imports
import tensorflow as tf
slim = tf.contrib.slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
# _CONV_DEFS specifies the MobileNet body
_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = _CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME'):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'is_training': is_training,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
| apache-2.0 |
joh12041/quac | experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/tab_summary.py | 3 | 1630 | #!/usr/bin/env python
"""
Generate LaTeX table summaries of our results for the paper.
"""
# Copyright (c) Los Alamos National Security, LLC and others.
import sys
import xlrd
print r'''
\begin{tabular}{|ll|l|rrrr|rr|}
\hline
&
&
& \multicolumn{4}{c|}{\textbf{$\boldsymbol{r^2}$ at forecast}}
& \multicolumn{2}{c|}{\textbf{Best forec.}}
\\
\multicolumn{1}{|c}{\textbf{Disease}}
& \multicolumn{1}{c}{\textbf{Location}}
& \multicolumn{1}{|c|}{\textbf{Result}}
& \multicolumn{1}{c}{\textbf{0}}
& \multicolumn{1}{c}{\textbf{7}}
& \multicolumn{1}{c}{\textbf{14}}
& \multicolumn{1}{c|}{\textbf{28}}
& \multicolumn{1}{c}{\textbf{Days}}
& \multicolumn{1}{c|}{$\boldsymbol{r^2}$}
\\
'''
book = xlrd.open_workbook(sys.argv[1])
sh = book.sheet_by_index(0)
last_disease = None
for ri in xrange(2, sh.nrows):
v = sh.row_values(ri)
if (v[0]): # ppr?
disease = v[1]
if (disease != last_disease):
print r'\hline'
disease_pr = disease
else:
disease_pr = ''
print r' & '.join((disease_pr,
v[3], # location
v[8], # result
'%.2f' % v[15], # r^2 at nowcast
'%.2f' % v[14], # r^2 at 7-day forecast
'%.2f' % v[13], # r^2 at 14-day forecast
'%.2f' % v[11], # r^2 at 28-day forecast
'%d' % -v[10], # best offset
'%.2f' % v[9], # max(r^2)
)),
print r'\\'
last_disease = disease
print r'''
\hline
\end{tabular}
'''
| apache-2.0 |
wxgeo/geophar | wxgeometrie/sympy/strategies/tests/test_tree.py | 40 | 2593 | from sympy.strategies.tree import treeapply, greedy, allresults, brute
from sympy.core.compatibility import reduce
from functools import partial
def test_treeapply():
tree = ([3, 3], [4, 1], 2)
assert treeapply(tree, {list: min, tuple: max}) == 3
add = lambda *args: sum(args)
mul = lambda *args: reduce(lambda a, b: a*b, args, 1)
assert treeapply(tree, {list: add, tuple: mul}) == 60
def test_treeapply_leaf():
assert treeapply(3, {}, leaf=lambda x: x**2) == 9
tree = ([3, 3], [4, 1], 2)
treep1 = ([4, 4], [5, 2], 3)
assert treeapply(tree, {list: min, tuple: max}, leaf=lambda x: x+1) == \
treeapply(treep1, {list: min, tuple: max})
def test_treeapply_strategies():
from sympy.strategies import chain, minimize
join = {list: chain, tuple: minimize}
inc = lambda x: x + 1
dec = lambda x: x - 1
double = lambda x: 2*x
assert treeapply(inc, join) == inc
assert treeapply((inc, dec), join)(5) == minimize(inc, dec)(5)
assert treeapply([inc, dec], join)(5) == chain(inc, dec)(5)
tree = (inc, [dec, double]) # either inc or dec-then-double
assert treeapply(tree, join)(5) == 6
assert treeapply(tree, join)(1) == 0
maximize = partial(minimize, objective=lambda x: -x)
join = {list: chain, tuple: maximize}
fn = treeapply(tree, join)
assert fn(4) == 6 # highest value comes from the dec then double
assert fn(1) == 2 # highest value comes from the inc
def test_greedy():
inc = lambda x: x + 1
dec = lambda x: x - 1
double = lambda x: 2*x
tree = [inc, (dec, double)] # either inc or dec-then-double
fn = greedy(tree, objective=lambda x: -x)
assert fn(4) == 6 # highest value comes from the dec then double
assert fn(1) == 2 # highest value comes from the inc
tree = [inc, dec, [inc, dec, [(inc, inc), (dec, dec)]]]
lowest = greedy(tree)
assert lowest(10) == 8
highest = greedy(tree, objective=lambda x: -x)
assert highest(10) == 12
def test_allresults():
inc = lambda x: x+1
dec = lambda x: x-1
double = lambda x: x*2
square = lambda x: x**2
assert set(allresults(inc)(3)) == {inc(3)}
assert set(allresults([inc, dec])(3)) == {2, 4}
assert set(allresults((inc, dec))(3)) == {3}
assert set(allresults([inc, (dec, double)])(4)) == {5, 6}
def test_brute():
inc = lambda x: x+1
dec = lambda x: x-1
square = lambda x: x**2
tree = ([inc, dec], square)
fn = brute(tree, lambda x: -x)
assert fn(2) == (2 + 1)**2
assert fn(-2) == (-2 - 1)**2
assert brute(inc)(1) == 2
| gpl-2.0 |
aleju/ImageAugmenter | checks/check_noise.py | 2 | 2565 | from __future__ import print_function, division
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
def main():
nb_rows = 8
nb_cols = 8
h, w = (128, 128)
sample_size = 128
noise_gens = [
iap.SimplexNoise(),
iap.FrequencyNoise(exponent=-4, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=-2, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=0, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=2, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=4, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size),
upscale_method=["nearest", "linear", "cubic"]),
iap.IterativeNoiseAggregator(
other_param=iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size),
upscale_method=["nearest", "linear", "cubic"]),
iterations=(1, 3),
aggregation_method=["max", "avg"]
),
iap.IterativeNoiseAggregator(
other_param=iap.Sigmoid(
iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size),
upscale_method=["nearest", "linear", "cubic"]),
threshold=(-10, 10),
activated=0.33,
mul=20,
add=-10
),
iterations=(1, 3),
aggregation_method=["max", "avg"]
)
]
samples = [[] for _ in range(len(noise_gens))]
for _ in range(nb_rows * nb_cols):
for i, noise_gen in enumerate(noise_gens):
samples[i].append(noise_gen.draw_samples((h, w)))
rows = [np.hstack(row) for row in samples]
grid = np.vstack(rows)
ia.imshow((grid*255).astype(np.uint8))
images = [ia.quokka_square(size=(128, 128)) for _ in range(16)]
seqs = [
iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0)),
iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True),
iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0)),
iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True)
]
images_aug = []
for seq in seqs:
images_aug.append(np.hstack(seq.augment_images(images)))
images_aug = np.vstack(images_aug)
ia.imshow(images_aug)
if __name__ == "__main__":
main()
| mit |
NickRuiz/wikitrans-pootle | local_apps/pootle_profile/urls.py | 5 | 1747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django.conf.urls.defaults import *
from django.conf import settings
urlpatterns = patterns('pootle_profile.views',
(r'^login/?$', 'login'),
(r'^logout/?$', 'logout'),
(r'^edit/?$', 'profile_edit'),
(r'^personal/edit/?$', 'edit_personal_info'),
)
urlpatterns += patterns('django.contrib.auth.views',
(r'^password/change/$', 'password_change'),
(r'^password/change/done/$', 'password_change_done'),
(r'^password/reset/$', 'password_reset'),
(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'password_reset_confirm'),
(r'^password/reset/complete/$', 'password_reset_complete'),
(r'^password/reset/done/$', 'password_reset_done'),
)
# Onle include registration urls if registration is enabled
if settings.CAN_REGISTER:
urlpatterns += patterns('', (r'', include('registration.urls')))
urlpatterns += patterns('',
(r'', include('profiles.urls')),
)
| gpl-2.0 |
jjdmol/LOFAR | CEP/PyBDSM/src/python/mylogger.py | 1 | 4368 | """ WARNING, ERROR, and CRITICAL are always output to screen and to log file.
INFO and USERINFO always go to the log file. DEBUG goes to log file if debug is
True. USERINFO goes to screen only if quiet is False.
Use as follows:
mylog = mylogger.logging.getLogger("name")
mylog.info('info') --> print to logfile, but not to screen
mylog.userinfo(mylog, 'info') --> print to screen (if quiet==False)
and to logfile
"""
import logging
from socket import gethostname
import commands
import time
import copy
def init_logger(logfilename, quiet=False, debug=False):
logging.USERINFO = logging.INFO + 1
logging.addLevelName(logging.USERINFO, 'USERINFO')
logger = logging.getLogger("PyBDSM")
logger.setLevel(logging.DEBUG)
# First remove any existing handlers (in case PyBDSM has been run
# before in this session but the quiet or debug options have changed
while len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[0])
# File handlers
fh = ColorStripperHandler(logfilename)
if debug:
# For log file and debug on, print name and levelname
fh.setLevel(logging.DEBUG)
fmt1 = MultiLineFormatter('%(asctime)s %(name)-20s:: %(levelname)-8s: '\
'%(message)s',
datefmt='%a %d-%m-%Y %H:%M:%S')
else:
# For log file and debug off, don't print name and levelname as
# they have no meaning to the user.
fh.setLevel(logging.INFO)
fmt1 = MultiLineFormatter('%(asctime)s:: %(levelname)-8s: %(message)s',
datefmt='%a %d-%m-%Y %H:%M:%S')
fh.setFormatter(fmt1)
logger.addHandler(fh)
# Console handler for warning, error, and critical: format includes levelname
# ANSI colors are used
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
fmt2 = logging.Formatter('\033[31;1m%(levelname)s\033[0m: %(message)s')
ch.setFormatter(fmt2)
logger.addHandler(ch)
# Console handler for USERINFO only: format does not include levelname
# (the user does not need to see the levelname, as it has no meaning to them)
# ANSI colors are allowed
chi = logging.StreamHandler()
chi.addFilter(InfoFilter())
if quiet:
# prints nothing, since filter lets only USERINFO through
chi.setLevel(logging.WARNING)
else:
# prints only USERINFO
chi.setLevel(logging.USERINFO)
fmt3 = logging.Formatter('%(message)s')
chi.setFormatter(fmt3)
logger.addHandler(chi)
class InfoFilter(logging.Filter):
# Lets only USERINFO through
def filter(self, rec):
return rec.levelno == logging.USERINFO
class MultiLineFormatter(logging.Formatter):
def format(self, record):
str = logging.Formatter.format(self, record)
header, footer = str.split(record.message)
nocolor_header = strip_color(header)
str = str.replace('\n', '\n' + ' '*len(nocolor_header))
return str
def userinfo(mylog, desc_str, val_str=''):
"""Writes a nicely formatted string to the log file and console
mylog = logger
desc_str = description string / message
val_str = value string
Message is constructed as:
'desc_str .... : val_str'
"""
bc = '\033[1;34m' # Blue
nc = '\033[0m' # Normal text color
if val_str == '':
sep = ''
if desc_str[:1] == '\n':
bc += '\n'
desc_str = desc_str[1:]
desc_str = bc + '--> ' + desc_str + nc
else:
sep = ' : '
if len(desc_str) < 40:
desc_str += ' '
if len(desc_str) < 40:
while len(desc_str) < 41:
desc_str += '.'
else:
while len(desc_str) < 41:
desc_str += ' '
mylog.log(logging.USERINFO, desc_str+sep+val_str)
class ColorStripperHandler(logging.FileHandler):
def emit(self, record):
"""Strips ANSI color codes from file stream"""
myrecord = copy.copy(record)
nocolor_msg = strip_color(myrecord.msg)
myrecord.msg = nocolor_msg
logging.FileHandler.emit(self, myrecord)
def strip_color(msg):
"""Strips specific ANSI color codes from an input string
The color codes are hard-coded to those used above
in userinfo() and in WARNING, ERROR, and CRITICAL.
"""
nocolor_msg = ''
a = msg.split('\033[1;34m')
for b in a:
c = b.split('\033[0m')
for d in c:
e = d.split('\033[31;1m')
for f in e:
nocolor_msg += f
return nocolor_msg
| gpl-3.0 |
2014c2g2/2014c2 | exts/w2/static/Brython2.0.0-20140209-164925/Lib/posixpath.py | 722 | 14212 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
# TODO: on Mac OS X, this should really return s.lower().
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
valid_types = all(isinstance(s, (str, bytes, bytearray))
for s in (a, ) + p)
if valid_types:
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components.") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
s2 = os.lstat(parent)
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
if isinstance(name, bytes):
name = str(name, 'ASCII')
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if isinstance(path, bytes):
value = value.encode('ASCII')
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| gpl-2.0 |
byterom/android_external_chromium_org | tools/metrics/histograms/PRESUBMIT.py | 109 | 1422 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
for f in input_api.AffectedTextFiles():
p = f.AbsoluteLocalPath()
if (input_api.basename(p) == 'histograms.xml'
and input_api.os_path.dirname(p) == input_api.PresubmitLocalPath()):
cwd = input_api.os_path.dirname(p)
exit_code = input_api.subprocess.call(
['python', 'pretty_print.py', '--presubmit'], cwd=cwd)
if exit_code != 0:
return [output_api.PresubmitError(
'histograms.xml is not formatted correctly; run pretty_print.py '
'to fix')]
exit_code = input_api.subprocess.call(
['python', 'validate_format.py'], cwd=cwd)
if exit_code != 0:
return [output_api.PresubmitError(
'histograms.xml is not well formatted; run validate_format.py '
'and fix the reported errors')]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| bsd-3-clause |
jonparrott/storage-getting-started-python | main.py | 2 | 5511 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line sample for Google Cloud Storage.
Usage:
python main.py [--logging_level=<log-level>]
"""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import logging
import os
import sys
import gflags
import httplib2
import oauth2client.client as oauthclient
import oauth2client.file as oauthfile
import oauth2client.tools as oauthtools
import gcs.gcs_commands as gcs_commands
from gcs.gcs_xml import GcsXml as Gcs
FLAGS = gflags.FLAGS
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
gflags.DEFINE_enum(
'logging_level', 'INFO', LOG_LEVELS, 'Set the level of logging detail.')
CLIENT_SECRETS = 'client_secrets.json'
CREDENTIALS_FILE = 'gcs_credentials.dat'
PROJECT_FILE = 'project_info'
SCOPE = 'https://www.googleapis.com/auth/devstorage.full_control'
def init_client(auth_http, project_id):
"""Initializes the gcs.Gcs client.
Clients are available per module. To switch the client, update the import
statement above.
Args:
auth_http: An authorized httplib2.Http instance.
project_id: A string Cloud Storage project id, ex: '123456'.
Returns:
An instance of gcs.Gcs.
"""
gcs_client = Gcs(auth_http, project_id)
return gcs_client
def get_project_id():
"""Retrieves Cloud Storage project id from user or file.
Returns:
The string project id.
"""
project_file = None
project_id = None
try:
project_file = open(PROJECT_FILE, 'r')
project_id = project_file.read()
except IOError:
project_file = open(PROJECT_FILE, 'w')
project_id = raw_input(
'Enter your Cloud Storage project id (found in the API console): ')
project_file.write(project_id)
project_file.close()
return project_id
def get_auth_http():
"""Runs the OAuth 2.0 installed application flow.
Returns:
An authorized httplib2.Http instance.
"""
message = ('Please configure OAuth 2.0 by populating the client_secrets.json '
'file found at: %s' % (os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS)))
flow = oauthclient.flow_from_clientsecrets(
CLIENT_SECRETS, scope=SCOPE, message=message)
storage = oauthfile.Storage(CREDENTIALS_FILE)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = oauthtools.run(flow, storage)
http = httplib2.Http()
auth_http = credentials.authorize(http)
return auth_http
def main(argv):
"""Main application control."""
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
logging.error('%s\\nUsage: %s ARGS\\n%s', e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
numeric_level = getattr(logging, FLAGS.logging_level.upper())
if not isinstance(numeric_level, int):
logging.error('Invalid log level: %s', FLAGS.logging_level)
logging.basicConfig(level=numeric_level)
if FLAGS.logging_level == 'DEBUG': httplib2.debuglevel = 1
auth_http = get_auth_http()
project_id = get_project_id()
gcs_client = init_client(auth_http, project_id)
commands = [
gcs_commands.GetBucketsCommand('Get all buckets', gcs_client),
gcs_commands.GetBucketCommand('Get a bucket', gcs_client),
gcs_commands.GetBucketCorsCommand('Get bucket CORS', gcs_client),
gcs_commands.GetBucketLocationCommand('Get bucket location', gcs_client),
gcs_commands.InsertBucketCommand('Create a bucket', gcs_client),
gcs_commands.SetBucketCorsCommand('Set bucket CORS', gcs_client),
gcs_commands.DeleteBucketCommand('Delete a bucket', gcs_client),
gcs_commands.GetObjectCommand('Download an object', gcs_client),
gcs_commands.GetObjectAclsCommand('Get object ACLs', gcs_client),
gcs_commands.GetObjectMetadataCommand('Get object metadata', gcs_client),
gcs_commands.InsertObjectCommand('Upload an object', gcs_client),
gcs_commands.CopyObjectCommand('Copy an object', gcs_client),
gcs_commands.DeleteObjectCommand('Delete an object', gcs_client),
]
while True:
print 'What would you like to do? Enter the number.'
for i in range(len(commands)):
print '%d: %s' % (i, commands[i].description)
print '%d: Quit' % len(commands)
selection = raw_input('Enter your selection: ')
try:
selection = int(selection)
except ValueError, e:
logging.error('Enter a number.')
continue
if selection > len(commands) or selection < 0:
logging.error('Selection not recognized.')
continue
if selection == len(commands): break
try:
commands[selection].run_command()
except Exception, e:
logging.error('Error running command. Please try again.')
logging.error(e)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
oliver-sanders/cylc | cylc/flow/batch_sys_handlers/at.py | 1 | 4271 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Logic to submit jobs to the "at" batch system."""
import errno
import os
import re
from subprocess import PIPE
class AtCommandHandler():
"""Logic to submit jobs to the "at" batch system.
Submit the task job script to the simple 'at' scheduler. Note that
(1) the 'atd' daemon service must be running; (2) the atq command
does not report if the job is running or not.
How to make tasks stays in the queue until tea time:
[runtime]
[[MyTask]]
[[[job]]]
batch system = at
batch submit command template = at teatime
"""
# List of known error strings when atd is not running
ERR_NO_ATD_STRS = [
"Can't open /var/run/atd.pid to signal atd. No atd running?",
"Warning: at daemon not running",
]
SHOULD_KILL_PROC_GROUP = True
SHOULD_POLL_PROC_GROUP = True
KILL_CMD_TMPL = "atrm '%(job_id)s'"
POLL_CMD = "atq"
REC_ERR_FILTERS = [
re.compile("warning: commands will be executed using /bin/sh")]
REC_ID_FROM_SUBMIT_ERR = re.compile(r"\Ajob\s(?P<id>\S+)\sat")
# Note: The SUBMIT_CMD_STDIN_TMPL below requires "sh" compatible shell. The
# safest way, therefore, is to force the command to run under "/bin/sh" by
# exporting "SHELL=/bin/sh" for the "at" command.
SUBMIT_CMD_ENV = {"SHELL": "/bin/sh"}
SUBMIT_CMD_TMPL = "at now"
# Note: The perl command ensures that the job script is executed in its own
# process group, which allows the job script and its child processes to be
# killed correctly.
SUBMIT_CMD_STDIN_TMPL = (
r"exec perl -e 'setpgrp(0,0);exec(@ARGV)'" +
r" '%(job)s' 1>'%(job)s.out' 2>'%(job)s.err'")
SUBMIT_CMD_STDIN_TMPL_2 = (
r"exec perl -e 'setpgrp(0,0);exec(@ARGV)'" +
r" timeout --signal=XCPU %(execution_time_limit)d" +
r" '%(job)s' 1>'%(job)s.out' 2>'%(job)s.err'")
# atq properties:
# * stdout is "job-num date hour queue username", e.g.:
# 1762 Wed May 15 00:20:00 2013 = hilary
# * queue is '=' if running
#
def filter_submit_output(self, out, err):
"""Suppress at's routine output to stderr.
Otherwise we get warning messages that suggest something is wrong.
1) move the standard job ID message from stderr to stdout
2) suppress the message warning that commands will be executed with
/bin/sh (this refers to the command line that runs the job script).
Call get_id() first, to extract the job ID.
"""
new_err = ""
if err:
for line in err.splitlines(True):
if self.REC_ID_FROM_SUBMIT_ERR.match(line):
out += line
elif any(rec.match(line) for rec in self.REC_ERR_FILTERS):
continue
elif line.strip() in self.ERR_NO_ATD_STRS:
raise OSError(
errno.ESRCH, os.strerror(errno.ESRCH), line)
else:
new_err += line
return out, new_err
@classmethod
def get_submit_stdin(cls, job_file_path, submit_opts):
"""Return proc_stdin_arg, proc_stdin_value."""
try:
return (PIPE, cls.SUBMIT_CMD_STDIN_TMPL_2 % {
"job": job_file_path,
"execution_time_limit": submit_opts["execution_time_limit"]})
except KeyError:
return (PIPE, cls.SUBMIT_CMD_STDIN_TMPL % {"job": job_file_path})
BATCH_SYS_HANDLER = AtCommandHandler()
| gpl-3.0 |
dzamie/weasyl | weasyl/image.py | 1 | 6891 | # favorite.py
import logging
import os
from sanpera.exception import SanperaError
from sanpera.image import Image
from sanpera import geometry
import web
from error import WeasylError
import files
COVER_SIZE = 1024, 3000
def read(filename):
try:
return Image.read(filename)
except SanperaError:
web.ctx.log_exc(level=logging.DEBUG)
raise WeasylError('imageDecodeError')
def from_string(filedata):
try:
return Image.from_buffer(filedata)
except SanperaError:
web.ctx.log_exc(level=logging.DEBUG)
raise WeasylError('imageDecodeError')
# Return a dictionary containing the image format, dimensions, and file size.
# If a particular element of the result dictionary cannot be determined, it
# will be assigned to None; if the filename does not appear to refer to a valid
# image file, a ValueError will be raised if `exception` is True else None will
# be returned.
def get_info(filename, exception=False, printable=False):
assert not printable # deprecated parameter
if not filename:
if exception:
raise ValueError
else:
return
im = read(filename)
filesize = os.path.getsize(filename)
return {
# File extension
"format": image_extension(im),
# File type flag
"setting": image_setting(im),
# Dimensions list
"dimensions": (im.size.width, im.size.height),
# File size
"filesize": filesize,
}
def image_extension(im):
if im.original_format in ('JPG', 'JPEG'):
return '.jpg'
if im.original_format == 'PNG':
return '.png'
if im.original_format == 'GIF':
return '.gif'
def image_setting(im):
if im.original_format in ('JPG', 'JPEG'):
return 'J'
if im.original_format == 'PNG':
return 'P'
if im.original_format == 'GIF':
return 'G'
def image_file_type(im):
ret = image_extension(im)
if ret is not None:
ret = ret.lstrip('.')
return ret
def get_frames(filename):
"""
Return the number of frames in the image file.
"""
im = read(filename)
return len(im)
def unanimate(im):
if len(im) == 1:
return im
ret = Image()
ret.append(im[0])
return ret
def get_dimensions(filename, inline=False):
"""
Return the dimension of the image file; if `inline` is True return the result
set as tuple, else return it as a list. The dimensions are returned as width
and height in either case.
"""
im = read(filename)
size = im.size.width, im.size.height
if not inline:
size = list(size)
return size
def check_crop(dim, x1, y1, x2, y2):
"""
Return True if the specified crop coordinates are valid, else False.
"""
return (
x1 >= 0 and y1 >= 0 and x2 >= 0 and y2 >= 0 and x1 <= dim[0] and
y1 <= dim[1] and x2 <= dim[0] and y2 <= dim[1] and x2 > x1 and y2 > y1)
def check_type(filename, secure=True):
"""
Return True if the filename corresponds to an image file, else False.
"""
if secure:
try:
im = Image.read(filename)
except SanperaError:
return False
else:
return im.original_format in ['JPEG', 'PNG', 'GIF']
else:
return filename and filename[-4:] in [".jpg", ".png", ".gif"]
def _resize(im, width, height):
"""
Resizes the image to fit within the specified height and width; aspect ratio
is preserved. Images always preserve animation and might even result in a
better-optimized animated gif.
"""
# resize only if we need to; return None if we don't
if im.size.width > width or im.size.height > height:
im = im.resized(im.size.fit_inside((width, height)))
return im
def resize(filename, width, height, destination=None, animate=False):
in_place = False
if not destination:
destination = filename + '.new'
in_place = True
im = read(filename)
if not image_extension(im):
raise WeasylError("FileType")
files.ensure_file_directory(filename)
im = correct_image_and_call(_resize, im, width, height)
if im is not None:
im.write(destination)
if in_place:
os.rename(destination, filename)
# if there's no need to resize, in-place resize is a no-op. otherwise copy
# the source to the destination.
elif not in_place:
files.copy(filename, destination)
def resize_image(im, width, height):
return correct_image_and_call(_resize, im, width, height) or im
def make_popup(filename, destination=None):
"""
Create a popup image file; if `destination` is passed, a new file will be
created and the original left unaltered, else the original file will be
altered.
"""
resize(filename, 300, 300, destination=destination)
def make_cover(filename, destination=None):
"""
Create a cover image file; if `destination` is passed, a new file will be
created and the original left unaltered, else the original file will be
altered.
"""
resize(filename, *COVER_SIZE, destination=destination)
def make_cover_image(im):
return resize_image(im, *COVER_SIZE)
def correct_image_and_call(f, im, *a, **kw):
"""
Call a function, passing in an image where the canvas size of each frame is
the same.
The function can return an image to post-process or None.
"""
animated = len(im) > 1
# either of these operations make the image satisfy the contraint
# `all(im.size == frame.size for frame in im)`
if animated:
im = im.coalesced()
else:
im = im.cropped(im[0].canvas)
# returns a new image to post-process or None
im = f(im, *a, **kw)
if animated and im is not None:
im = im.optimized_for_animated_gif()
return im
def _shrinkcrop(im, size, bounds=None):
if bounds is not None:
ret = im
if bounds.position != geometry.origin or bounds.size != ret.size:
ret = ret.cropped(bounds)
if ret.size != size:
ret = ret.resized(size)
return ret
elif im.size == size:
return im
shrunk_size = im.size.fit_around(size)
shrunk = im
if shrunk.size != shrunk_size:
shrunk = shrunk.resized(shrunk_size)
x1 = (shrunk.size.width - size.width) // 2
y1 = (shrunk.size.height - size.height) // 2
bounds = geometry.Rectangle(x1, y1, x1 + size.width, y1 + size.height)
return shrunk.cropped(bounds)
def shrinkcrop(im, size, bounds=None):
ret = correct_image_and_call(_shrinkcrop, im, size, bounds)
if ret.size != size or (len(ret) == 1 and ret[0].size != size):
ignored_sizes = ret.size, ret[0].size # to log these locals
raise WeasylError('thumbnailingMessedUp')
ignored_sizes # to shut pyflakes up
return ret
| apache-2.0 |
ivano666/tensorflow | tensorflow/python/kernel_tests/spacetodepth_op_test.py | 10 | 8179 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SpaceToDepthTest(tf.test.TestCase):
def _testOne(self, inputs, block_size, outputs):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = tf.space_to_depth(tf.to_float(inputs), block_size)
self.assertAllEqual(x_tf.eval(), outputs)
def testBasic(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i],
[5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2],
[3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]],
[[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = tf.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(IndexError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]],
[[3], [4], [7]]]]
block_size = 3
with self.assertRaises(IndexError):
_ = tf.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]],
[[3], [4]],
[[5], [6]]]]
block_size = 3
with self.assertRaises(IndexError):
_ = tf.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 3
with self.assertRaises(IndexError):
_ = tf.space_to_depth(x_np, block_size)
class SpaceToDepthGradientTest(tf.test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session():
tf_x = tf.convert_to_tensor(x)
tf_y = tf.space_to_depth(tf_x, block_size)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
x = np.random.normal(
0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
self._checkGrad(x, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size)
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
eclipse-ease-addons/engines | jython/org.jython/Lib/distutils/tests/test_config.py | 95 | 3184 | """Tests for distutils.pypirc.pypirc."""
import sys
import os
import unittest
import tempfile
import shutil
from distutils.core import PyPIRCCommand
from distutils.core import Distribution
from distutils.log import set_threshold
from distutils.log import WARN
from distutils.tests import support
from test.test_support import run_unittest
PYPIRC = """\
[distutils]
index-servers =
server1
server2
[server1]
username:me
password:secret
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
"""
PYPIRC_OLD = """\
[server-login]
username:tarek
password:secret
"""
WANTED = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:xxx
"""
class PyPIRCCommandTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
"""Patches the environment."""
super(PyPIRCCommandTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
os.environ['HOME'] = self.tmp_dir
self.rc = os.path.join(self.tmp_dir, '.pypirc')
self.dist = Distribution()
class command(PyPIRCCommand):
def __init__(self, dist):
PyPIRCCommand.__init__(self, dist)
def initialize_options(self):
pass
finalize_options = initialize_options
self._cmd = command
self.old_threshold = set_threshold(WARN)
def tearDown(self):
"""Removes the patch."""
set_threshold(self.old_threshold)
super(PyPIRCCommandTestCase, self).tearDown()
def test_server_registration(self):
# This test makes sure PyPIRCCommand knows how to:
# 1. handle several sections in .pypirc
# 2. handle the old format
# new format
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
config = cmd._read_pypirc()
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server1'), ('username', 'me')]
self.assertEqual(config, waited)
# old format
self.write_file(self.rc, PYPIRC_OLD)
config = cmd._read_pypirc()
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEqual(config, waited)
def test_server_empty_registration(self):
cmd = self._cmd(self.dist)
rc = cmd._get_rc_file()
self.assertTrue(not os.path.exists(rc))
cmd._store_pypirc('tarek', 'xxx')
self.assertTrue(os.path.exists(rc))
f = open(rc)
try:
content = f.read()
self.assertEqual(content, WANTED)
finally:
f.close()
def test_suite():
return unittest.makeSuite(PyPIRCCommandTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| epl-1.0 |
arista-eosplus/ansible | lib/ansible/modules/packaging/language/pip.py | 14 | 22412 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: pip
short_description: Manages Python library dependencies.
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
or C(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url of the remote package.
- As of 2.2 you can supply a list of names.
required: false
default: null
version:
description:
- The version number to install of the Python library specified in the I(name) parameter
required: false
default: null
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the 'executable' parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional virtualenv_site_packages, virtualenv_command,
and virtualenv_python options affect the creation of the virtualenv.
required: false
default: null
virtualenv_site_packages:
version_added: "1.0"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command or a pathname to the command to create the virtual
environment with. For example C(pyvenv), C(virtualenv),
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
required: false
default: virtualenv
virtualenv_python:
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
For example C(python3.5), C(python2.7). When not specified, the
Python version used to run the ansible module is used. This parameter
should not be used when C(virtualenv_command) is using C(pyvenv) or
the C(-m venv) module.
required: false
default: null
state:
description:
- The state of module
- The 'forcereinstall' option is only available in Ansible 2.1 and above.
required: false
default: present
choices: [ "present", "absent", "latest", "forcereinstall" ]
extra_args:
description:
- Extra arguments passed to pip.
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag for versioning URLs.
required: false
default: yes
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
version_added: "1.3"
required: false
default: null
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run pip for a specific version of Python installed in the system. For
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
By default, it will take the appropriate version for the python interpreter
use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., 0077) and you want to pip install
packages which are to be used by all users. Note that this requires you
to specify desired umask mode in octal, with a leading 0 (e.g., 0077).
version_added: "2.1"
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- By default, this module will use the appropriate version of pip for the
interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
- pip:
name: bottle
# Install (Bottle) python package on version 0.11.
- pip:
name: bottle
version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.
- pip:
name: git+http://myrepo/app/MyApp
editable: false
# Install (MyApp) from local tarball
- pip:
name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
# Install specified python requirements.
- pip:
requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
- pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
- pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
- pip:
name: bottle
executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
- pip:
name: bottle
state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
- pip:
name: bottle
umask: 0022
become: True
'''
import tempfile
import re
import os
import sys
from ansible.module_utils.basic import AnsibleModule, is_executable
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_full_name(name, version=None):
if version is None:
resp = name
else:
resp = name + '==' + version
return resp
def _get_packages(module, pip, chdir):
'''Return results of pip command to get packages.'''
# Try 'pip list' command first.
command = '%s list' % pip
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (pip version too old) then use 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return (command, out, err)
def _is_present(name, version, installed_pkgs, pkg_command):
'''Return whether or not package is installed.'''
for pkg in installed_pkgs:
# Package listing will be different depending on which pip
# command was used ('pip list' vs. 'pip freeze').
if 'list' in pkg_command:
pkg = pkg.replace('(', '').replace(')', '')
if ',' in pkg:
pkg_name, pkg_version, _ = pkg.replace(',', '').split(' ')
else:
pkg_name, pkg_version = pkg.split(' ')
elif 'freeze' in pkg_command:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
else:
continue
else:
continue
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
# Older pip only installed under the "/usr/bin/pip" name. Many Linux
# distros install it there.
# By default, we try to use pip required for the current python
# interpreter, so people can use pip to install modules dependencies
candidate_pip_basenames = ('pip2', 'pip')
if PY3:
# pip under python3 installs the "/usr/bin/pip3" name
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
if os.path.isabs(executable):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
# As noted in the docs, executable doesn't work with virtualenvs.
candidate_pip_basenames = (executable,)
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
# virtualenv
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find pip in the virtualenv,'
' %s, under any of these names: %s. Make sure pip is'
' present in the virtualenv.' % (env,
', '.join(candidate_pip_basenames)))
return pip
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, env=None):
"""This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.
"""
if env:
opt_dirs = ['%s/bin' % env]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if python_bin is None:
formatted_dep = None
else:
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def main():
state_map = dict(
present='install',
absent='uninstall -y',
latest='install -U',
forcereinstall='install -U --force-reinstall',
)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=state_map.keys()),
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(),
editable=dict(default=True, type='bool'),
chdir=dict(type='path'),
executable=dict(type='path'),
umask=dict(),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if '--no-site-packages' in cmd_opts:
cmd += ' --no-site-packages'
# -p is a virtualenv option, not compatible with pyenv or venv
# this if validates if the command being used is not any of them
if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
elif PY3:
# Ubuntu currently has a patch making virtualenv always
# try to use python2. Since Ubuntu16 works without
# python2 installed, this is a problem. This code mimics
# the upstream behaviour of using the python which invoked
# virtualenv to determine which python is used inside of
# the virtualenv (when none are specified).
cmd += ' -p%s' % sys.executable
# if venv or pyvenv are used and virtualenv_python is defined, then
# virtualenv_python is ignored, this has to be acknowledged
elif module.params['virtualenv_python']:
module.fail_json(
msg='virtualenv_python should not be used when'
' using the venv module or pyvenv as virtualenv_command'
)
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
# in run_command by setting path_prefix here.
path_prefix = None
if env:
path_prefix = "/".join(pip.split('/')[:-1])
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
has_vcs = False
if name:
for pkg in name:
if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)):
has_vcs = True
break
if has_vcs and module.params['editable']:
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args
if name:
for pkg in name:
cmd += ' %s' % _get_full_name(pkg, version)
else:
if requirements:
cmd += ' -r %s' % requirements
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
elif has_vcs:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
# Older versions of pip (pre-1.3) do not have pip list.
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
if requirements or has_vcs:
_, out_freeze_before, _ = _get_packages(module, pip, chdir)
else:
out_freeze_before = None
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass # rc is 1 when attempting to uninstall non-installed package
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
_, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
| gpl-3.0 |
SheffieldML/GPy | GPy/models/gp_var_gauss.py | 1 | 1092 | # Copyright (c) 2014, James Hensman, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import GP
from ..core.parameterization.param import Param
from ..inference.latent_function_inference import VarGauss
log_2_pi = np.log(2*np.pi)
class GPVariationalGaussianApproximation(GP):
"""
The Variational Gaussian Approximation revisited
.. rubric:: References
.. [opper_archambeau_2009] Opper, M.; Archambeau, C.; The Variational Gaussian Approximation Revisited. Neural Comput. 2009, pages 786-792.
"""
def __init__(self, X, Y, kernel, likelihood, Y_metadata=None):
num_data = Y.shape[0]
self.alpha = Param('alpha', np.zeros((num_data,1))) # only one latent fn for now.
self.beta = Param('beta', np.ones(num_data))
inf = VarGauss(self.alpha, self.beta)
super(GPVariationalGaussianApproximation, self).__init__(X, Y, kernel, likelihood, name='VarGP', inference_method=inf, Y_metadata=Y_metadata)
self.link_parameter(self.alpha)
self.link_parameter(self.beta)
| bsd-3-clause |
SyamGadde/cython | Cython/Compiler/Tests/TestUtilityLoad.py | 129 | 3351 | import unittest
from Cython.Compiler import Code, UtilityCode
def strip_2tup(tup):
return tup[0] and tup[0].strip(), tup[1] and tup[1].strip()
class TestUtilityLoader(unittest.TestCase):
"""
Test loading UtilityCodes
"""
expected = "test {{loader}} prototype", "test {{loader}} impl"
required = "req {{loader}} proto", "req {{loader}} impl"
context = dict(loader='Loader')
name = "TestUtilityLoader"
filename = "TestUtilityLoader.c"
cls = Code.UtilityCode
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name))
self.assertEquals(got, self.expected)
got = strip_2tup(self.cls.load_as_string(self.name, self.filename))
self.assertEquals(got, self.expected)
def test_load(self):
utility = self.cls.load(self.name)
got = strip_2tup((utility.proto, utility.impl))
self.assertEquals(got, self.expected)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
self.assertEquals(got, self.required)
utility = self.cls.load(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
self.assertEquals(got, self.expected)
utility = self.cls.load_cached(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
self.assertEquals(got, self.expected)
class TestTempitaUtilityLoader(TestUtilityLoader):
"""
Test loading UtilityCodes with Tempita substitution
"""
expected_tempita = (TestUtilityLoader.expected[0].replace('{{loader}}', 'Loader'),
TestUtilityLoader.expected[1].replace('{{loader}}', 'Loader'))
required_tempita = (TestUtilityLoader.required[0].replace('{{loader}}', 'Loader'),
TestUtilityLoader.required[1].replace('{{loader}}', 'Loader'))
cls = Code.TempitaUtilityCode
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name, context=self.context))
self.assertEquals(got, self.expected_tempita)
def test_load(self):
utility = self.cls.load(self.name, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
self.assertEquals(got, self.expected_tempita)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
self.assertEquals(got, self.required_tempita)
utility = self.cls.load(self.name, from_file=self.filename, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
self.assertEquals(got, self.expected_tempita)
class TestCythonUtilityLoader(TestTempitaUtilityLoader):
"""
Test loading CythonUtilityCodes
"""
# Just change the attributes and run the same tests
expected = None, "test {{cy_loader}} impl"
expected_tempita = None, "test CyLoader impl"
required = None, "req {{cy_loader}} impl"
required_tempita = None, "req CyLoader impl"
context = dict(cy_loader='CyLoader')
name = "TestCyUtilityLoader"
filename = "TestCyUtilityLoader.pyx"
cls = UtilityCode.CythonUtilityCode
# Small hack to pass our tests above
cls.proto = None
test_load = TestUtilityLoader.test_load
test_load_tempita = TestTempitaUtilityLoader.test_load
| apache-2.0 |
drjeep/django | django/contrib/sessions/backends/cached_db.py | 109 | 2892 | """
Cached, database-backed sessions.
"""
import logging
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.cache import caches
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
KEY_PREFIX = "django.contrib.sessions.cached_db"
class SessionStore(DBStore):
"""
Implements cached, database backed sessions.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
# Duplicate DBStore.load, because we need to keep track
# of the expiry date to set it properly in the cache.
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
data = self.decode(s.session_data)
self._cache.set(self.cache_key, data,
self.get_expiry_age(expiry=s.expire_date))
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
data = {}
return data
def exists(self, session_key):
if session_key and (KEY_PREFIX + session_key) in self._cache:
return True
return super(SessionStore, self).exists(session_key)
def save(self, must_create=False):
super(SessionStore, self).save(must_create)
self._cache.set(self.cache_key, self._session, self.get_expiry_age())
def delete(self, session_key=None):
super(SessionStore, self).delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete(self.session_key)
self._session_key = None
# At bottom to avoid circular import
from django.contrib.sessions.models import Session # isort:skip
| bsd-3-clause |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| mit |
kgaillot/libqb | .tito/custom.py | 8 | 2351 | # -*- coding: UTF-8 -*-
"""Tito ad-hoc module for custom git repo -> spec + archive metamorphosis"""
from __future__ import (print_function, unicode_literals, absolute_import,
division)
__author__ = "Jan Pokorný <jpokorny@redhat.com>"
__copyright__ = "Copyright 2016 Red Hat, Inc."
__license__ = "LGPLv2.1+ WITHOUT ANY WARRANTY"
from os.path import basename, join
from shutil import copyfile
from tito.builder.main import BuilderBase
from tito.builder.fetch import FetchBuilder
from tito.common import error_out, run_command, get_spec_version_and_release
class NativeFetchBuilder(FetchBuilder):
"""
A specialized FetchBuilder to just setup the specfile + archive
using package-native scripts, which currently boils down to a sequence
that needs to be configured via fetch_prep_command option in builder
section (e.g., "./autogen.sh && ./configure && make dist foo.spec").
Uses code of src/tito/builder/fetch.py from the tito project as a template.
"""
REQUIRED_ARGS = []
def __init__(self, name=None, tag=None, build_dir=None,
config=None, user_config=None,
args=None, **kwargs):
BuilderBase.__init__(self, name=name, build_dir=build_dir,
config=config,
user_config=user_config, args=args, **kwargs)
if tag:
error_out("FetchBuilder does not support building specific tags.")
if not config.has_option('builder', 'fetch_prep_command'):
error_out("NativeFetchBuilder requires fetch_prep_command.")
self.build_tag = '%s-%s' % (
self.project_name,
get_spec_version_and_release(self.start_dir,
'%s.spec' % self.project_name)
)
def tgz(self):
self.ran_tgz = True
self._create_build_dirs()
print("Fetching sources...")
run_command(self.config.get('builder', 'fetch_prep_command'))
manual_sources = [run_command("ls -1t *.tar.* | head -n1")]
self.spec_file = self.project_name + '.spec'
for s in manual_sources:
base_name = basename(s)
dest_filepath = join(self.rpmbuild_sourcedir, base_name)
copyfile(s, dest_filepath)
self.sources.append(dest_filepath)
| lgpl-2.1 |
masashi-y/myccg | src/py/ja_lstm_parser.py | 1 | 20024 |
from __future__ import print_function
import sys
import numpy as np
import json
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import cuda
from chainer import training, Variable
from chainer.training import extensions
from chainer.optimizer import WeightDecay, GradientClipping
from py.japanese_ccg import JaCCGReader
from collections import defaultdict, OrderedDict
from py.py_utils import read_pretrained_embeddings, read_model_defs
from py.tree import Leaf, Tree, get_leaves
from py.biaffine import Biaffine
##############################################################
################# DEPRECATED NOT MAINTAINED ##################
##############################################################
UNK = "*UNKNOWN*"
START = "*START*"
END = "*END*"
IGNORE = -1
def log(args, out):
for k, v in vars(args).items():
out.write("{}: {}\n".format(k, v))
class TrainingDataCreator(object):
"""
create train & validation data
"""
def __init__(self, filepath, word_freq_cut, char_freq_cut, cat_freq_cut):
self.filepath = filepath
# those categories whose frequency < freq_cut are discarded.
self.word_freq_cut = word_freq_cut
self.char_freq_cut = char_freq_cut
self.cat_freq_cut = cat_freq_cut
self.seen_rules = defaultdict(int) # seen binary rules
self.unary_rules = defaultdict(int) # seen unary rules
self.cats = defaultdict(int) # all cats
self.words = defaultdict(int)
self.chars = defaultdict(int)
self.samples = OrderedDict()
self.sents = []
self.words[UNK] = 10000
self.words[START] = 10000
self.words[END] = 10000
self.chars[UNK] = 10000
self.chars[START] = 10000
self.chars[END] = 10000
self.cats[START] = 10000
self.cats[END] = 10000
def _traverse(self, tree):
if isinstance(tree, Leaf):
self.cats[tree.cat.without_semantics] += 1
w = tree.word
self.words[w] += 1
for c in w:
self.chars[c] += 1
else:
children = tree.children
if len(children) == 1:
rule = tree.cat.without_semantics + \
" " + children[0].cat.without_semantics
self.unary_rules[rule] += 1
self._traverse(children[0])
else:
rule = children[0].cat.without_semantics + \
" " + children[1].cat.without_semantics
self.seen_rules[rule] += 1
self._traverse(children[0])
self._traverse(children[1])
@staticmethod
def _write(dct, out, comment_out_value=False):
print("writing to", out.name, file=sys.stderr)
for key, value in dct.items():
out.write(key.encode("utf-8") + " ")
if comment_out_value:
out.write("# ")
out.write(str(value) + "\n")
def _get_dependencies(self, tree, sent_len):
def rec(subtree):
if isinstance(subtree, Tree):
children = subtree.children
if len(children) == 2:
head = rec(children[0 if subtree.left_is_head else 1])
dep = rec(children[1 if subtree.left_is_head else 0])
res[dep] = head
else:
head = rec(children[0])
return head
else:
return subtree.pos
res = [-1 for _ in range(sent_len)]
rec(tree)
res = [i + 1 for i in res]
assert len(filter(lambda i:i == 0, res)) == 1
return res
def _to_conll(self, out):
for sent, (cats, deps) in self.samples.items():
for i, (w, c, d) in enumerate(zip(sent.split(" "), cats, deps), 1):
out.write("{}\t{}\t{}\t{}\n".format(i, w.encode("utf-8"), c, d))
out.write("\n")
def _create_samples(self, trees):
for tree in trees:
tokens = get_leaves(tree)
words = [token.word for token in tokens]
cats = [token.cat.without_semantics for token in tokens]
deps = self._get_dependencies(tree, len(tokens))
sent = " ".join(words)
self.sents.append(sent)
self.samples[sent] = cats, deps
@staticmethod
def create_traindata(args):
self = TrainingDataCreator(args.path,
args.word_freq_cut, args.char_freq_cut, args.cat_freq_cut)
with open(args.out + "/log_create_traindata", "w") as f:
log(args, f)
trees = JaCCGReader(self.filepath).readall()
for tree in trees:
self._traverse(tree)
self._create_samples(trees)
self.cats = {k: v for (k, v) in self.cats.items() \
if v >= self.cat_freq_cut}
self.words = {k: v for (k, v) in self.words.items() \
if v >= self.word_freq_cut}
self.chars = {k: v for (k, v) in self.chars.items() \
if v >= self.char_freq_cut}
with open(args.out + "/unary_rules.txt", "w") as f:
self._write(self.unary_rules, f, comment_out_value=True)
with open(args.out + "/seen_rules.txt", "w") as f:
self._write(self.seen_rules, f, comment_out_value=True)
with open(args.out + "/target.txt", "w") as f:
self._write(self.cats, f, comment_out_value=False)
with open(args.out + "/words.txt", "w") as f:
self._write(self.words, f, comment_out_value=False)
with open(args.out + "/chars.txt", "w") as f:
self._write(self.chars, f, comment_out_value=False)
with open(args.out + "/traindata.json", "w") as f:
json.dump(self.samples, f)
with open(args.out + "/trainsents.txt", "w") as f:
for sent in self.sents:
f.write(sent.encode("utf-8") + "\n")
with open(args.out + "/trainsents.conll", "w") as f:
self._to_conll(f)
@staticmethod
def create_testdata(args):
self = TrainingDataCreator(args.path,
args.word_freq_cut, args.char_freq_cut, args.cat_freq_cut)
with open(args.out + "/log_create_testdata", "w") as f:
log(args, f)
trees = JaCCGReader(self.filepath).readall()
self._create_samples(trees)
with open(args.out + "/testdata.json", "w") as f:
json.dump(self.samples, f)
with open(args.out + "/testsents.conll", "w") as f:
self._to_conll(f)
with open(args.out + "/testsents.txt", "w") as f:
for sent in self.sents:
f.write(sent.encode("utf-8") + "\n")
class FeatureExtractor(object):
def __init__(self, model_path):
self.model_path = model_path
self.words = read_model_defs(model_path + "/words.txt")
self.chars = read_model_defs(model_path + "/chars.txt")
self.unk_word = self.words[UNK]
self.start_word = self.words[START]
self.end_word = self.words[END]
self.unk_char = self.chars[UNK]
self.start_char = self.chars[START]
self.end_char = self.chars[END]
def process(self, words):
"""
words: list of unicode tokens
"""
w = np.array([self.start_word] + [self.words.get(
x, self.unk_word) for x in words] + [self.end_word], 'i')
l = max(len(x) for x in words)
c = -np.ones((len(words) + 2, l), 'i')
c[0, 0] = self.start_char
c[-1, 0] = self.end_char
for i, word in enumerate(words, 1):
for j in range(len(word)):
c[i, j] = self.chars.get(word[j], self.unk_char)
return w, c, np.array([l], 'i')
class LSTMParserDataset(chainer.dataset.DatasetMixin):
def __init__(self, model_path, samples_path):
self.model_path = model_path
self.extractor = FeatureExtractor(model_path)
self.targets = read_model_defs(model_path + "/target.txt")
with open(samples_path) as f:
self.samples = json.load(f).items()
def __len__(self):
return len(self.samples)
def get_example(self, i):
words, [cats, deps] = self.samples[i]
words = words.split(" ")
w, c, l = self.extractor.process(words)
cats = np.array([self.targets.get(x, IGNORE) \
for x in [START] + cats + [END]], 'i')
deps = np.array([-1] + deps + [-1], 'i')
return w, c, l, cats, deps
class JaLSTMParser(chainer.Chain):
def __init__(self, model_path, word_dim=None, char_dim=None, nlayers=2,
hidden_dim=128, relu_dim=64, dep_dim=100, dropout_ratio=0.5):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
# use as supertagger
with open(defs_file) as f:
defs = json.load(f)
self.dep_dim = defs["dep_dim"]
self.word_dim = defs["word_dim"]
self.char_dim = defs["char_dim"]
self.hidden_dim = defs["hidden_dim"]
self.relu_dim = defs["relu_dim"]
self.nlayers = defs["nlayers"]
self.train = False
self.extractor = FeatureExtractor(model_path)
else:
# training
self.dep_dim = dep_dim
self.word_dim = word_dim
self.char_dim = char_dim
self.hidden_dim = hidden_dim
self.relu_dim = relu_dim
self.nlayers = nlayers
self.train = True
with open(defs_file, "w") as f:
json.dump({"model": self.__class__.__name__,
"word_dim": self.word_dim, "char_dim": self.char_dim,
"hidden_dim": hidden_dim, "relu_dim": relu_dim,
"nlayers": nlayers, "dep_dim": dep_dim}, f)
self.targets = read_model_defs(model_path + "/target.txt")
self.words = read_model_defs(model_path + "/words.txt")
self.chars = read_model_defs(model_path + "/chars.txt")
self.in_dim = self.word_dim + self.char_dim
self.dropout_ratio = dropout_ratio
super(JaLSTMParser, self).__init__(
emb_word=L.EmbedID(len(self.words), self.word_dim),
emb_char=L.EmbedID(len(self.chars), 50, ignore_label=IGNORE),
conv_char=L.Convolution2D(1, self.char_dim,
(3, 50), stride=1, pad=(1, 0)),
lstm_f=L.NStepLSTM(nlayers, self.in_dim,
self.hidden_dim, self.dropout_ratio),
lstm_b=L.NStepLSTM(nlayers, self.in_dim,
self.hidden_dim, self.dropout_ratio),
linear_cat1=L.Linear(2 * self.hidden_dim, self.relu_dim),
linear_cat2=L.Linear(self.relu_dim, len(self.targets)),
linear_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
linear_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
biaffine=Biaffine(self.dep_dim)
)
def load_pretrained_embeddings(self, path):
self.emb_word.W.data = read_pretrained_embeddings(path)
def forward(self, ws, cs, ls):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(ws)
# cs: [(sentence length, max word length)]
ws = map(self.emb_word, ws)
# ls: [(sentence length, char dim)]
# before conv: (sent len, 1, max word len, char_size)
# after conv: (sent len, char_size, max word len, 1)
# after max_pool: (sent len, char_size, 1, 1)
cs = [F.squeeze(
F.max_pooling_2d(
self.conv_char(
F.expand_dims(
self.emb_char(c), 1)), (l, 1)))
for c, l in zip(cs, ls)]
# [(sentence length, (word_dim + char_dim))]
xs_f = [F.dropout(F.concat([w, c]),
self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
xs_b = [x[::-1] for x in xs_f]
cx_f, hx_f, cx_b, hx_b = self._init_state(batchsize)
_, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
_, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
hs_b = [x[::-1] for x in hs_b]
# ys: [(sentence length, number of category)]
hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]
cat_ys = [self.linear_cat2(F.relu(self.linear_cat1(h))) for h in hs]
dep_ys = [self.biaffine(
F.relu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
F.relu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]
return cat_ys, dep_ys
def __call__(self, xs):
batchsize = len(xs)
ws, cs, ls, cat_ts, dep_ts = zip(*xs)
cat_ys, dep_ys = self.forward(ws, cs, ls)
cat_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
cat_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])
dep_loss = reduce(lambda x, y: x + y,
[F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
dep_acc = reduce(lambda x, y: x + y,
[F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])
cat_acc /= batchsize
dep_acc /= batchsize
chainer.report({
"tagging_loss": cat_loss,
"tagging_accuracy": cat_acc,
"parsing_loss": dep_loss,
"parsing_accuracy": dep_acc
}, self)
return cat_loss + dep_loss
def predict(self, xs):
"""
batch: list of splitted sentences
"""
xs = [self.extractor.process(x) for x in xs]
batchsize = len(xs)
ws, cs, ls = zip(*xs)
cat_ys, dep_ys = self.forward(ws, cs, ls)
return zip([y.data[1:-1] for y in cat_ys], [F.log_softmax(y[1:-1, :-1]).data for y in dep_ys])
def predict_doc(self, doc, batchsize=16):
"""
doc list of splitted sentences
"""
res = []
for i in range(0, len(doc), batchsize):
res.extend([(i + j, 0, y)
for j, y in enumerate(self.predict(doc[i:i + batchsize]))])
return res
def _init_state(self, batchsize):
res = [Variable(np.zeros(( # forward cx, hx, backward cx, hx
self.nlayers, batchsize, self.hidden_dim), 'f')) for _ in range(4)]
return res
@property
def cats(self):
return zip(*sorted(self.targets.items(), key=lambda x: x[1]))[0]
def converter(x, device):
if device is None:
return x
elif device < 0:
return cuda.to_cpu(x)
else:
return cuda.to_gpu(x, device, cuda.Stream.null)
def train(args):
model = JaLSTMParser(args.model, args.word_emb_size, args.char_emb_size,
args.nlayers, args.hidden_dim, args.relu_dim, args.dep_dim, args.dropout_ratio)
with open(args.model + "/params", "w") as f: log(args, f)
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.pretrained:
print('Load pretrained word embeddings from', args.pretrained)
model.load_pretrained_embeddings(args.pretrained)
train = LSTMParserDataset(args.model, args.train)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val = LSTMParserDataset(args.model, args.val)
val_iter = chainer.iterators.SerialIterator(
val, args.batchsize, repeat=False, shuffle=False)
optimizer = chainer.optimizers.Adam(beta2=0.9)
# optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
optimizer.setup(model)
optimizer.add_hook(WeightDecay(1e-6))
# optimizer.add_hook(GradientClipping(5.))
updater = training.StandardUpdater(train_iter, optimizer, converter=converter)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)
val_interval = 1000, 'iteration'
log_interval = 200, 'iteration'
eval_model = model.copy()
eval_model.train = False
trainer.extend(extensions.Evaluator(
val_iter, eval_model, converter), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/tagging_loss',
'main/tagging_accuracy', 'main/tagging_loss',
'main/parsing_accuracy', 'main/parsing_loss',
'validation/main/tagging_loss', 'validation/main/tagging_accuracy',
'validation/main/parsing_loss', 'validation/main/parsing_accuracy'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
"CCG parser's LSTM supertag tagger")
subparsers = parser.add_subparsers()
# Creating training data
parser_c = subparsers.add_parser(
"create", help="create tagger input data")
parser_c.add_argument("path",
help="path to ccgbank data file")
parser_c.add_argument("out",
help="output directory path")
parser_c.add_argument("--cat-freq-cut",
type=int, default=10,
help="only allow categories which appear >= freq-cut")
parser_c.add_argument("--word-freq-cut",
type=int, default=5,
help="only allow words which appear >= freq-cut")
parser_c.add_argument("--char-freq-cut",
type=int, default=5,
help="only allow characters which appear >= freq-cut")
parser_c.add_argument("--mode",
choices=["train", "test"],
default="train")
parser_c.set_defaults(func=
(lambda args:
TrainingDataCreator.create_traindata(args)
if args.mode == "train"
else TrainingDataCreator.create_testdata(args)))
#TODO updater
# Do training using training data created through `create`
parser_t = subparsers.add_parser(
"train", help="train supertagger model")
parser_t.add_argument("model",
help="path to model directory")
parser_t.add_argument("train",
help="training data file path")
parser_t.add_argument("val",
help="validation data file path")
parser_t.add_argument("--batchsize",
type=int, default=16, help="batch size")
parser_t.add_argument("--epoch",
type=int, default=20, help="epoch")
parser_t.add_argument("--word-emb-size",
type=int, default=50,
help="word embedding size")
parser_t.add_argument("--char-emb-size",
type=int, default=32,
help="character embedding size")
parser_t.add_argument("--nlayers",
type=int, default=1,
help="number of layers for each LSTM")
parser_t.add_argument("--hidden-dim",
type=int, default=128,
help="dimensionality of hidden layer")
parser_t.add_argument("--relu-dim",
type=int, default=64,
help="dimensionality of relu layer")
parser_t.add_argument("--dep-dim",
type=int, default=100,
help="dim")
parser_t.add_argument("--dropout-ratio",
type=float, default=0.5,
help="dropout ratio")
parser_t.add_argument("--initmodel",
help="initialize model with `initmodel`")
parser_t.add_argument("--pretrained",
help="pretrained word embeddings")
parser_t.set_defaults(func=train)
args = parser.parse_args()
args.func(args)
| mit |
Lilywei123/tempest | tempest/api/compute/security_groups/test_security_group_rules_negative.py | 3 | 6949 | # Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute.security_groups import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
def not_existing_id():
if CONF.service_available.neutron:
return data_utils.rand_uuid()
else:
return data_utils.rand_int_id(start=999)
class SecurityGroupRulesNegativeTestJSON(base.BaseSecurityGroupsTest):
@classmethod
def resource_setup(cls):
super(SecurityGroupRulesNegativeTestJSON, cls).resource_setup()
cls.client = cls.security_groups_client
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_non_existent_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with non existent Parent group id
# Adding rules to the non existent Security Group id
parent_group_id = not_existing_id()
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.NotFound,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with Parent group id which is not integer
# Adding rules to the non int Security Group id
parent_group_id = data_utils.rand_name('non_int_id')
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_duplicate(self):
# Negative test: Create Security Group rule duplicate should fail
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
resp, rule = \
self.client.create_security_group_rule(parent_group_id,
ip_protocol,
from_port,
to_port)
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
# Add the same rule to the group should fail
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = data_utils.rand_name('999')
from_port = 22
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = data_utils.rand_int_id(start=65536)
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = data_utils.rand_int_id(start=65536)
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
# Creating a Security Group to add rule to it.
resp, sg = self.create_security_group()
# Adding a rule to the created Security Group
secgroup_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 21
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_delete_security_group_rule_with_non_existent_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with non existent id
non_existent_rule_id = not_existing_id()
self.assertRaises(exceptions.NotFound,
self.client.delete_security_group_rule,
non_existent_rule_id)
| apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/layers/bayes_test.py | 3 | 12179 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common Bayes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensor2tensor.layers import bayes
import tensorflow as tf
class BayesTest(parameterized.TestCase, tf.test.TestCase):
@tf.contrib.eager.run_test_in_graph_and_eager_modes
def testTrainableNormalStddevConstraint(self):
layer = bayes.DenseReparameterization(
100, kernel_initializer=bayes.TrainableNormal())
inputs = tf.random_normal([1, 1])
out = layer(inputs)
stddev = layer.kernel.distribution.scale
self.evaluate(tf.global_variables_initializer())
res, _ = self.evaluate([stddev, out])
self.assertAllGreater(res, 0.)
@parameterized.named_parameters(
{"testcase_name": "_no_uncertainty", "kernel_initializer": "zeros",
"bias_initializer": "zeros", "all_close": True},
{"testcase_name": "_kernel_uncertainty", "kernel_initializer": None,
"bias_initializer": "zeros", "all_close": False},
{"testcase_name": "_bias_uncertainty", "kernel_initializer": "zeros",
"bias_initializer": None, "all_close": False},
)
@tf.contrib.eager.run_test_in_graph_and_eager_modes
def testDenseReparameterizationKernel(
self, kernel_initializer, bias_initializer, all_close):
inputs = tf.to_float(np.random.rand(5, 3, 12))
layer = bayes.DenseReparameterization(
4, kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer, activation=tf.nn.relu)
outputs1 = layer(inputs)
outputs2 = layer(inputs)
self.evaluate(tf.global_variables_initializer())
res1, res2 = self.evaluate([outputs1, outputs2])
self.assertEqual(res1.shape, (5, 3, 4))
self.assertAllGreaterEqual(res1, 0.)
if all_close:
self.assertAllClose(res1, res2)
else:
self.assertNotAllClose(res1, res2)
layer.get_config()
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testDenseReparameterizationKL(self):
inputs = tf.to_float(np.random.rand(5, 12))
layer = bayes.DenseReparameterization(10)
# Imagine this is the 1st epoch.
with tf.GradientTape() as tape:
layer(inputs) # first call forces a build, here inside this tape
layer(inputs) # ensure robustness after multiple calls
loss = tf.reduce_sum([tf.reduce_sum(l) for l in layer.losses])
variables = [layer.kernel_initializer.mean, layer.kernel_initializer.stddev]
for v in variables:
self.assertIn(v, layer.variables)
# This will be fine, since the layer was built inside this tape, and thus
# the distribution init ops were inside this tape.
grads = tape.gradient(loss, variables)
for grad in grads:
self.assertIsNotNone(grad)
# Imagine this is the 2nd epoch.
with tf.GradientTape() as tape:
layer(inputs) # build won't be called again
loss = tf.reduce_sum([tf.reduce_sum(l) for l in layer.losses])
variables = [layer.kernel_initializer.mean, layer.kernel_initializer.stddev]
for v in variables:
self.assertIn(v, layer.variables)
# This would fail, since the layer was built inside the tape from the 1st
# epoch, and thus the distribution init ops were inside that tape instead of
# this tape. By using a callable for the variable, this will no longer fail.
grads = tape.gradient(loss, variables)
for grad in grads:
self.assertIsNotNone(grad)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testDenseReparameterizationModel(self):
inputs = tf.to_float(np.random.rand(3, 4, 4, 1))
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(3,
kernel_size=2,
padding="SAME",
activation=tf.nn.relu),
tf.keras.layers.Flatten(),
bayes.DenseReparameterization(2, activation=None),
])
outputs = model(inputs)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(outputs)
self.assertEqual(res.shape, (3, 2))
self.assertLen(model.losses, 1)
@parameterized.named_parameters(
{"testcase_name": "_no_uncertainty", "kernel_initializer": "zeros",
"recurrent_initializer": "orthogonal", "bias_initializer": "zeros",
"all_close": True},
{"testcase_name": "_kernel_uncertainty", "kernel_initializer": None,
"recurrent_initializer": "orthogonal", "bias_initializer": "zeros",
"all_close": False},
{"testcase_name": "_recurrent_uncertainty", "kernel_initializer": "zeros",
"recurrent_initializer": None, "bias_initializer": "zeros",
"all_close": False},
{"testcase_name": "_bias_uncertainty", "kernel_initializer": "zeros",
"recurrent_initializer": "orthogonal", "bias_initializer": None,
"all_close": False},
)
@tf.contrib.eager.run_test_in_graph_and_eager_modes
def testLSTMCellReparameterization(
self, kernel_initializer, recurrent_initializer, bias_initializer,
all_close):
batch_size, timesteps, dim = 5, 3, 12
hidden_size = 10
inputs = tf.to_float(np.random.rand(batch_size, timesteps, dim))
cell = bayes.LSTMCellReparameterization(
hidden_size, kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer)
noise = tf.to_float(np.random.rand(1, hidden_size))
h0, c0 = cell.get_initial_state(inputs)
state = (h0 + noise, c0)
outputs1, _ = cell(inputs[:, 0, :], state)
outputs2, _ = cell(inputs[:, 0, :], state)
cell.sample_weights()
outputs3, _ = cell(inputs[:, 0, :], state)
self.evaluate(tf.global_variables_initializer())
res1, res2, res3 = self.evaluate([outputs1, outputs2, outputs3])
self.assertEqual(res1.shape, (batch_size, hidden_size))
self.assertAllClose(res1, res2)
if all_close:
self.assertAllClose(res1, res3)
else:
self.assertNotAllClose(res1, res3)
cell.get_config()
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testLSTMCellReparameterizationKL(self):
inputs = tf.to_float(np.random.rand(5, 1, 12))
cell = bayes.LSTMCellReparameterization(10)
state = (tf.zeros([1, 10]), tf.zeros([1, 10]))
# Imagine this is the 1st epoch.
with tf.GradientTape() as tape:
cell(inputs[:, 0, :], state) # first call forces a build, inside the tape
cell(inputs[:, 0, :], state) # ensure robustness after multiple calls
cell.get_initial_state(inputs[:, 0, :])
cell(inputs[:, 0, :], state) # ensure robustness after multiple calls
loss = tf.reduce_sum([tf.reduce_sum(l) for l in cell.losses])
variables = [
cell.kernel_initializer.mean, cell.kernel_initializer.stddev,
cell.recurrent_initializer.mean, cell.recurrent_initializer.stddev,
]
for v in variables:
self.assertIn(v, cell.variables)
# This will be fine, since the layer was built inside this tape, and thus
# the distribution init ops were inside this tape.
grads = tape.gradient(loss, variables)
for grad in grads:
self.assertIsNotNone(grad)
# Imagine this is the 2nd epoch.
with tf.GradientTape() as tape:
cell(inputs[:, 0, :], state) # build won't be called again
loss = tf.reduce_sum([tf.reduce_sum(l) for l in cell.losses])
variables = [
cell.kernel_initializer.mean, cell.kernel_initializer.stddev,
cell.recurrent_initializer.mean, cell.recurrent_initializer.stddev,
]
for v in variables:
self.assertIn(v, cell.variables)
# This would fail, since the layer was built inside the tape from the 1st
# epoch, and thus the distribution init ops were inside that tape instead of
# this tape. By using a callable for the variable, this will no longer fail.
grads = tape.gradient(loss, variables)
for grad in grads:
self.assertIsNotNone(grad)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testLSTMCellReparameterizationModel(self):
batch_size, timesteps, dim = 5, 3, 12
hidden_size = 10
inputs = tf.to_float(np.random.rand(batch_size, timesteps, dim))
cell = bayes.LSTMCellReparameterization(hidden_size)
model = tf.keras.Sequential([
tf.keras.layers.RNN(cell, return_sequences=True)
])
outputs1 = model(inputs)
outputs2 = model(inputs)
state = (tf.zeros([1, hidden_size]), tf.zeros([1, hidden_size]))
outputs3 = []
for t in range(timesteps):
out, state = cell(inputs[:, t, :], state)
outputs3.append(out)
outputs3 = tf.stack(outputs3, axis=1)
self.evaluate(tf.global_variables_initializer())
res1, res2, res3 = self.evaluate([outputs1, outputs2, outputs3])
self.assertEqual(res1.shape, (batch_size, timesteps, hidden_size))
self.assertEqual(res3.shape, (batch_size, timesteps, hidden_size))
# NOTE: `cell.sample_weights` should have been called at the beginning of
# each call, so these should be different.
self.assertNotAllClose(res1, res2)
# NOTE: We didn't call `cell.sample_weights` again before computing
# `outputs3`, so the cell should have had the same weights as it did during
# computation of `outputs2`, and thus yielded the same output tensor.
self.assertAllClose(res2, res3)
self.assertLen(model.losses, 2)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testBayesianLinearModel(self):
"""Tests that model makes reasonable predictions."""
np.random.seed(42)
train_batch_size = 5
test_batch_size = 2
num_features = 3
noise_variance = 0.01
coeffs = tf.range(num_features, dtype=tf.float32)
features = tf.to_float(np.random.randn(train_batch_size, num_features))
labels = (tf.tensordot(features, coeffs, [[-1], [0]])
+ noise_variance * tf.to_float(np.random.randn(train_batch_size)))
model = bayes.BayesianLinearModel(noise_variance=noise_variance)
model.fit(features, labels)
test_features = tf.to_float(np.random.randn(test_batch_size, num_features))
test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])
outputs = model(test_features)
test_predictions = outputs.distribution.mean()
test_predictions_variance = outputs.distribution.variance()
[
test_labels_val, test_predictions_val, test_predictions_variance_val,
] = self.evaluate(
[test_labels, test_predictions, test_predictions_variance])
self.assertEqual(test_predictions_val.shape, (test_batch_size,))
self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,))
self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1)
self.assertAllLessEqual(test_predictions_variance_val, noise_variance)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testMixtureLogistic(self):
batch_size = 3
features = tf.to_float(np.random.rand(batch_size, 4))
labels = tf.to_float(np.random.rand(batch_size))
model = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation=None),
bayes.MixtureLogistic(5),
])
outputs = model(features)
log_likelihood = tf.reduce_sum(outputs.distribution.log_prob(labels))
self.evaluate(tf.global_variables_initializer())
log_likelihood_val, outputs_val = self.evaluate([log_likelihood, outputs])
self.assertEqual(log_likelihood_val.shape, ())
self.assertLessEqual(log_likelihood_val, 0.)
self.assertEqual(outputs_val.shape, (batch_size,))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
naousse/odoo | openerp/netsvc.py | 220 | 9506 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import logging.handlers
import os
import platform
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
path_prefix = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] if tools.config['log_db'] and tools.config['log_db'] != '%d' else ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
cr.autocommit(True)
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname[len(path_prefix)+1:], record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
logging.addLevelName(25, "INFO")
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
elif platform.system() == 'Darwin':
handler = logging.handlers.SysLogHandler('/var/run/log')
else:
handler = logging.handlers.SysLogHandler('/dev/log')
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
db_levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chongtianfeiyu/kbengine | kbe/src/lib/python/Lib/test/test_pkgimport.py | 84 | 2787 | import os
import sys
import shutil
import string
import random
import tempfile
import unittest
from importlib.util import cache_from_source
from test.support import run_unittest, create_empty_file
class TestImport(unittest.TestCase):
def __init__(self, *args, **kw):
self.package_name = 'PACKAGE_'
while self.package_name in sys.modules:
self.package_name += random.choose(string.ascii_letters)
self.module_name = self.package_name + '.foo'
unittest.TestCase.__init__(self, *args, **kw)
def remove_modules(self):
for module_name in (self.package_name, self.module_name):
if module_name in sys.modules:
del sys.modules[module_name]
def setUp(self):
self.test_dir = tempfile.mkdtemp()
sys.path.append(self.test_dir)
self.package_dir = os.path.join(self.test_dir,
self.package_name)
os.mkdir(self.package_dir)
create_empty_file(os.path.join(self.package_dir, '__init__.py'))
self.module_path = os.path.join(self.package_dir, 'foo.py')
def tearDown(self):
shutil.rmtree(self.test_dir)
self.assertNotEqual(sys.path.count(self.test_dir), 0)
sys.path.remove(self.test_dir)
self.remove_modules()
def rewrite_file(self, contents):
compiled_path = cache_from_source(self.module_path)
if os.path.exists(compiled_path):
os.remove(compiled_path)
with open(self.module_path, 'w') as f:
f.write(contents)
def test_package_import__semantics(self):
# Generate a couple of broken modules to try importing.
# ...try loading the module when there's a SyntaxError
self.rewrite_file('for')
try: __import__(self.module_name)
except SyntaxError: pass
else: raise RuntimeError('Failed to induce SyntaxError') # self.fail()?
self.assertNotIn(self.module_name, sys.modules)
self.assertFalse(hasattr(sys.modules[self.package_name], 'foo'))
# ...make up a variable name that isn't bound in __builtins__
var = 'a'
while var in dir(__builtins__):
var += random.choose(string.ascii_letters)
# ...make a module that just contains that
self.rewrite_file(var)
try: __import__(self.module_name)
except NameError: pass
else: raise RuntimeError('Failed to induce NameError.')
# ...now change the module so that the NameError doesn't
# happen
self.rewrite_file('%s = 1' % var)
module = __import__(self.module_name).foo
self.assertEqual(getattr(module, var), 1)
def test_main():
run_unittest(TestImport)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
zero-db/pystunnel | pystunnel/pystunnel.py | 1 | 6468 | """Start, stop, and check a stunnel process"""
from __future__ import print_function
import os
import sys
import re
import subprocess
import getopt
from six.moves import input
class StunnelConfig:
"""Represent a stunnel configuration"""
_pid_file_re = re.compile(r"pid\s*=\s*(.*)")
def __init__(self, config_file):
self.config_file = config_file
self.pid_file = None
self._read_config()
def _read_config(self):
if self.config_file and os.path.isfile(self.config_file):
lines = []
with open(self.config_file, "rt") as f:
lines = f.readlines()
for line in lines:
match = self._pid_file_re.match(line.strip())
if match:
self.pid_file = match.group(1)
return
class Stunnel(StunnelConfig):
"""Start and stop a stunnel instance given a configuration file
The config file must contain a pid = /path/to/pid-file line.
Example:
from pystunnel import Stunnel
stunnel = Stunnel("/path/to/config-file")
rc = stunnel.start()
print("stunnel started with rc", rc)
if stunnel.check() == 0:
print("stunnel is running with pid", stunnel.getpid())
else:
print("stunnel is not running")
rc = stunnel.stop()
print("stunnel stopped with rc", rc)
Return Codes:
0 means OK, 1 or higher means error.
"""
def __init__(self, config_file):
StunnelConfig.__init__(self, config_file)
def start(self):
if self.check() == 1:
try:
config_file = '"%s"' % self.config_file if self.config_file else ""
return subprocess.call("stunnel %s" % config_file, shell=True);
except KeyboardInterrupt:
pass
return 1
def stop(self):
if self.check() == 0:
try:
return subprocess.call("kill %d" % self.getpid(), shell=True)
except KeyboardInterrupt:
pass
return 1
def check(self):
pid = self.getpid()
return 0 if pid >= 0 else 1
def getpid(self):
pid = -1
if self.pid_file and os.path.isfile(self.pid_file):
with open(self.pid_file, "rt") as f:
pidstr = f.read(16)
if pidstr:
try:
pid = int(pidstr, 10)
except ValueError:
pass
return pid
class PyStunnel(Stunnel):
"""Usage: pystunnel [options] [command]
Start and stop a stunnel instance from the command line
Options:
-c config-file, --stunnel-config=config-file
Use config-file to drive the stunnel instance.
The stunnel configuration must specify a PID file.
-h, --help Print this help message and exit.
-v, --version Print the version string and exit.
Commands:
start Start the stunnel instance.
stop Stop the stunnel instance.
check Check if stunnel is running.
getpid Return PID of running stunnel instance.
If the command is omitted, pystunnel enters an interactive shell.
"""
def __init__(self, args=None):
Stunnel.__init__(self, None)
self.args = args
def parse_args(self, args):
try:
options, args = getopt.gnu_getopt(args, "c:hv", ("stunnel-config=", "help", "version"))
except getopt.GetoptError as e:
print(e.msg)
sys.exit(1)
for name, value in options:
if name in ("-c", "--stunnel-config"):
Stunnel.__init__(self, value)
elif name in ("-h", "--help"):
print(self.__doc__)
sys.exit(0)
elif name in ("-v", "--version"):
print("pystunnel", get_version() or "(unknown version)")
sys.exit(0)
return args
def perform(self, command):
rc = 0
if command == "start":
rc = self.start()
if rc == 0:
print("started")
elif self.check() == 0:
print("already started")
else:
print("not started!")
elif command == "stop":
rc = self.stop()
if rc == 0:
print("stopped")
elif self.check() == 1:
print("already stopped")
else:
print("not stopped!")
elif command == "check":
rc = self.check()
if rc == 0:
print("running")
else:
print("not running")
elif command == "getpid":
pid = self.getpid()
print(pid)
rc = 0 if pid >= 0 else 1
return rc
def single(self, command):
rc = 0
if command in ("start", "stop", "check", "getpid"):
rc = self.perform(command)
else:
print("valid commands: start, stop, check, getpid")
rc = 1
return rc
def loop(self):
rc = 0
enable_readline()
while True:
try:
command = input("pystunnel> ")
command = command.strip()
except (EOFError, KeyboardInterrupt):
print()
break;
if command in ("start", "stop", "check", "getpid"):
rc = self.perform(command)
elif command in ("q", "quit"):
break;
elif command == "":
pass
else:
print("valid commands: start, stop, check, getpid, quit")
rc = 1
return rc
def run(self):
args = self.parse_args(self.args)
if args:
return self.single(args[0])
else:
return self.loop()
def get_version():
try:
import pkg_resources
except ImportError:
return ""
else:
return pkg_resources.get_distribution("pystunnel").version
def enable_readline():
try:
import readline
except ImportError:
pass
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
return PyStunnel(args).run()
except SystemExit as e:
return e.code
if __name__ == "__main__":
sys.exit(main())
| agpl-3.0 |
dsfsdgsbngfggb/odoo | addons/hr_timesheet/wizard/__init__.py | 381 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sign_in_out
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
its-dirg/CMservice | tests/cmservice/service/test_views.py | 1 | 1442 | from unittest.mock import patch
from cmservice.service.views import find_requester_name, render_consent
class TestFindRequesterName(object):
def test_should_find_exact_match(self):
requester_name = [{'lang': 'sv', 'text': 'å ä ö'}, {'lang': 'en', 'text': 'aa ae oo'}]
assert find_requester_name(requester_name, 'sv') == requester_name[0]['text']
def test_should_fallback_to_english_if_available(self):
requester_name = [{'lang': 'sv', 'text': 'å ä ö'}, {'lang': 'en', 'text': 'aa ae oo'}]
assert find_requester_name(requester_name, 'unknown') == requester_name[1]['text']
def test_should_fallback_to_first_entry_if_english_is_not_available(self):
requester_name = [{'lang': 'sv', 'text': 'å ä ö'}, {'lang': 'no', 'text': 'Æ Ø Å'}]
assert find_requester_name(requester_name, 'unknown') == requester_name[0]['text']
class TestRenderConsent(object):
def test_locked_attr_not_contained_in_released_claims(self):
with patch('cmservice.service.views.render_template') as m:
render_consent('en', 'test_requester', ['foo', 'bar'], {'bar': 'test', 'abc': 'xyz'}, 'test_state',
[3, 6], True)
locked_claims = {'bar': 'test'}
released_claims = {'abc': 'xyz'}
kwargs = m.call_args[1]
assert kwargs['locked_claims'] == locked_claims
assert kwargs['released_claims'] == released_claims
| apache-2.0 |
arantebillywilson/python-snippets | microblog/flask/lib/python3.5/site-packages/wtforms/ext/sqlalchemy/orm.py | 75 | 10671 | """
Tools for generating forms based on SQLAlchemy models.
"""
from __future__ import unicode_literals
import inspect
from wtforms import fields as f
from wtforms import validators
from wtforms.form import Form
from .fields import QuerySelectField, QuerySelectMultipleField
__all__ = (
'model_fields', 'model_form',
)
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class ModelConversionError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ModelConverterBase(object):
def __init__(self, converters, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def convert(self, model, mapper, prop, field_args, db_session=None):
if not hasattr(prop, 'columns') and not hasattr(prop, 'direction'):
return
elif not hasattr(prop, 'direction') and len(prop.columns) != 1:
raise TypeError(
'Do not know how to convert multiple-column properties currently'
)
kwargs = {
'validators': [],
'filters': [],
'default': None,
}
converter = None
column = None
types = None
if not hasattr(prop, 'direction'):
column = prop.columns[0]
# Support sqlalchemy.schema.ColumnDefault, so users can benefit
# from setting defaults for fields, e.g.:
# field = Column(DateTimeField, default=datetime.utcnow)
default = getattr(column, 'default', None)
if default is not None:
# Only actually change default if it has an attribute named
# 'arg' that's callable.
callable_default = getattr(default, 'arg', None)
if callable_default is not None:
# ColumnDefault(val).arg can be also a plain value
default = callable_default(None) if callable(callable_default) else callable_default
kwargs['default'] = default
if column.nullable:
kwargs['validators'].append(validators.Optional())
else:
kwargs['validators'].append(validators.Required())
if self.use_mro:
types = inspect.getmro(type(column.type))
else:
types = [type(column.type)]
for col_type in types:
type_string = '%s.%s' % (col_type.__module__, col_type.__name__)
if type_string.startswith('sqlalchemy'):
type_string = type_string[11:]
if type_string in self.converters:
converter = self.converters[type_string]
break
else:
for col_type in types:
if col_type.__name__ in self.converters:
converter = self.converters[col_type.__name__]
break
else:
raise ModelConversionError('Could not find field converter for %s (%r).' % (prop.key, types[0]))
else:
# We have a property with a direction.
if not db_session:
raise ModelConversionError("Cannot convert field %s, need DB session." % prop.key)
foreign_model = prop.mapper.class_
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
nullable = False
kwargs.update({
'allow_blank': nullable,
'query_factory': lambda: db_session.query(foreign_model).all()
})
converter = self.converters[prop.direction.name]
if field_args:
kwargs.update(field_args)
return converter(
model=model,
mapper=mapper,
prop=prop,
column=column,
field_args=kwargs
)
class ModelConverter(ModelConverterBase):
def __init__(self, extra_converters=None, use_mro=True):
super(ModelConverter, self).__init__(extra_converters, use_mro=use_mro)
@classmethod
def _string_common(cls, column, field_args, **extra):
if column.type.length:
field_args['validators'].append(validators.Length(max=column.type.length))
@converts('String', 'Unicode')
def conv_String(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return f.TextField(**field_args)
@converts('types.Text', 'UnicodeText', 'types.LargeBinary', 'types.Binary', 'sql.sqltypes.Text')
def conv_Text(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return f.TextAreaField(**field_args)
@converts('Boolean')
def conv_Boolean(self, field_args, **extra):
return f.BooleanField(**field_args)
@converts('Date')
def conv_Date(self, field_args, **extra):
return f.DateField(**field_args)
@converts('DateTime')
def conv_DateTime(self, field_args, **extra):
return f.DateTimeField(**field_args)
@converts('Enum')
def conv_Enum(self, column, field_args, **extra):
if 'choices' not in field_args:
field_args['choices'] = [(e, e) for e in column.type.enums]
return f.SelectField(**field_args)
@converts('Integer', 'SmallInteger')
def handle_integer_types(self, column, field_args, **extra):
unsigned = getattr(column.type, 'unsigned', False)
if unsigned:
field_args['validators'].append(validators.NumberRange(min=0))
return f.IntegerField(**field_args)
@converts('Numeric', 'Float')
def handle_decimal_types(self, column, field_args, **extra):
places = getattr(column.type, 'scale', 2)
if places is not None:
field_args['places'] = places
return f.DecimalField(**field_args)
@converts('databases.mysql.MSYear', 'dialects.mysql.base.YEAR')
def conv_MSYear(self, field_args, **extra):
field_args['validators'].append(validators.NumberRange(min=1901, max=2155))
return f.TextField(**field_args)
@converts('databases.postgres.PGInet', 'dialects.postgresql.base.INET')
def conv_PGInet(self, field_args, **extra):
field_args.setdefault('label', 'IP Address')
field_args['validators'].append(validators.IPAddress())
return f.TextField(**field_args)
@converts('dialects.postgresql.base.MACADDR')
def conv_PGMacaddr(self, field_args, **extra):
field_args.setdefault('label', 'MAC Address')
field_args['validators'].append(validators.MacAddress())
return f.TextField(**field_args)
@converts('dialects.postgresql.base.UUID')
def conv_PGUuid(self, field_args, **extra):
field_args.setdefault('label', 'UUID')
field_args['validators'].append(validators.UUID())
return f.TextField(**field_args)
@converts('MANYTOONE')
def conv_ManyToOne(self, field_args, **extra):
return QuerySelectField(**field_args)
@converts('MANYTOMANY', 'ONETOMANY')
def conv_ManyToMany(self, field_args, **extra):
return QuerySelectMultipleField(**field_args)
def model_fields(model, db_session=None, only=None, exclude=None,
field_args=None, converter=None, exclude_pk=False,
exclude_fk=False):
"""
Generate a dictionary of fields for a given SQLAlchemy model.
See `model_form` docstring for description of parameters.
"""
mapper = model._sa_class_manager.mapper
converter = converter or ModelConverter()
field_args = field_args or {}
properties = []
for prop in mapper.iterate_properties:
if getattr(prop, 'columns', None):
if exclude_fk and prop.columns[0].foreign_keys:
continue
elif exclude_pk and prop.columns[0].primary_key:
continue
properties.append((prop.key, prop))
# ((p.key, p) for p in mapper.iterate_properties)
if only:
properties = (x for x in properties if x[0] in only)
elif exclude:
properties = (x for x in properties if x[0] not in exclude)
field_dict = {}
for name, prop in properties:
field = converter.convert(
model, mapper, prop,
field_args.get(name), db_session
)
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, db_session=None, base_class=Form, only=None,
exclude=None, field_args=None, converter=None, exclude_pk=True,
exclude_fk=True, type_name=None):
"""
Create a wtforms Form for a given SQLAlchemy model class::
from wtforms.ext.sqlalchemy.orm import model_form
from myapp.models import User
UserForm = model_form(User)
:param model:
A SQLAlchemy mapped model class.
:param db_session:
An optional SQLAlchemy Session.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
:param exclude_pk:
An optional boolean to force primary key exclusion.
:param exclude_fk:
An optional boolean to force foreign keys exclusion.
:param type_name:
An optional string to set returned type name.
"""
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
type_name = type_name or str(model.__name__ + 'Form')
field_dict = model_fields(
model, db_session, only, exclude, field_args, converter,
exclude_pk=exclude_pk, exclude_fk=exclude_fk
)
return type(type_name, (base_class, ), field_dict)
| mit |
CountZer0/PipelineConstructionSet | python/maya/site-packages/pymel-1.0.5/pymel/util/external/ply/yacc.py | 319 | 128492 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| bsd-3-clause |
redhat-openstack/nova | nova/cells/weights/__init__.py | 141 | 1284 | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell Scheduler weights
"""
from nova import weights
class WeightedCell(weights.WeighedObject):
def __repr__(self):
return "WeightedCell [cell: %s, weight: %s]" % (
self.obj.name, self.weight)
class BaseCellWeigher(weights.BaseWeigher):
"""Base class for cell weights."""
pass
class CellWeightHandler(weights.BaseWeightHandler):
object_class = WeightedCell
def __init__(self):
super(CellWeightHandler, self).__init__(BaseCellWeigher)
def all_weighers():
"""Return a list of weight plugin classes found in this directory."""
return CellWeightHandler().get_all_classes()
| apache-2.0 |
bhmm/bhmm-force-spectroscopy-manuscript | examples/discrete_1d_2min/run.py | 3 | 1630 | import numpy as np
import pyemma.msm.io as msmio
import pyemma.msm.analysis as msmana
import bhmm
bhmm.config.verbose=True
# load observations
o = np.loadtxt('2well_traj_100K.dat', dtype=int)
# hidden states
nstates = 2
# multiple lags
lags = [1,5,10,50,100,500,1000]
#lags = [1,2,5,10,20,50,100,200,300,400,500,600,700,800,900,1000]
its = np.zeros((len(lags)))
its_mean = np.zeros((len(lags)))
its_std = np.zeros((len(lags)))
likelihoods = np.zeros((len(lags)))
for (i,lag) in enumerate(lags):
print ("\n========================================================================")
print ("LAG = ",lag)
# prepare shifted lagged data
observations = []
for shift in range(0, lag):
observations.append(o[shift:][::lag])
# initial HMM
hmm = bhmm.estimate_hmm(observations, nstates, type='discrete')
its[i] = lag*hmm.timescales
likelihoods[i] = hmm.likelihood
# Sample models.
print "BHMM for lag ",lag
nsample = 10
sampled_hmms = bhmm.bayesian_hmm(observations, hmm, nsample=nsample, store_hidden=False)
print 'sampled timescales: ',sampled_hmms.timescales_samples
# store sampled timescale moments
its_mean[i] = lag*sampled_hmms.timescales_mean
its_std[i] = lag*sampled_hmms.timescales_std
print ("========================================================================")
print 'Reference:'
P = msmio.read_matrix('2well_P.dat', mode='sparse').toarray()
itsref = msmana.timescales(P, tau=0.1)[1]
print itsref
print 'Resulting timescales:'
for i in range(len(lags)):
print lags[i], likelihoods[i], its[i], its_mean[i], '+-', its_std[i]
| lgpl-3.0 |
timlau/FedoraReview | plugins/generic_autotools.py | 1 | 7655 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# (C) 2013 - Pavel Raiskup <praiskup@redhat.com>
''' Autotools SHOULD checks, default Generic group. '''
import textwrap
from subprocess import Popen, PIPE
from FedoraReview import CheckBase, RegistryBase, ReviewDirs
#######################################
# STILL COMMONLY USED OBSOLETE MACROS #
# #
# Any suggestion for newer check here #
# would be appreciated. #
#######################################
_OBS_M4S_AUTOMAKE = [
'AM_CONFIG_HEADER',
'AM_PROG_CC_STDC',
]
_OBS_M4S_LIBTOOL = [
'AC_PROG_LIBTOOL',
'AM_PROG_LIBTOOL',
]
_OBSOLETE_CHECKS = {
'automake': _OBS_M4S_AUTOMAKE,
'libtool': _OBS_M4S_LIBTOOL,
}
def _prepend_indent(text):
''' add the paragraph indentation '''
lines = text.splitlines()
return '\n'.join(map(lambda x: " " + x if x != "" else "", lines))
class Registry(RegistryBase):
''' Module registration, register all checks. '''
group = 'Generic.autotools'
def is_applicable(self):
return True
class AutotoolsCheckBase(CheckBase):
''' Base class for all Autotool-related tests. '''
used_tools = None
def __init__(self, checks):
CheckBase.__init__(self, checks, __file__)
# construct text wrapper
self.wrapper = textwrap.TextWrapper(break_long_words=False,
drop_whitespace=True,
replace_whitespace=True,
fix_sentence_endings=True)
def text_wrap(self, text, width=76):
''' wrap the text on the specified character '''
self.wrapper.width = width
return self.wrapper.fill(text)
def find_used_tools(self):
''' get the list of autotools relevant for this package '''
def check_for(tool, packages):
''' helper - try all known package names for the tool '''
for name in packages:
if name in brequires:
self.used_tools.append(tool)
return
brequires = self.spec.build_requires
if self.used_tools is not None:
return
self.used_tools = []
am_pkgs = ['automake', 'automake14', 'automake15', 'automake16',
'automake17']
check_for('automake', am_pkgs)
# Re-enable once some autoconf-related checkers are added
# check_for('autoconf', ['autoconf', 'autoconf213'])
check_for('libtool', ['libtool'])
self.log.debug("autotools used: " + ' '.join(self.used_tools))
# CHECKERS #
class CheckAutotoolsObsoletedMacros(AutotoolsCheckBase):
''' obsolete macros (shorthly m4s) checker '''
warn_items = {}
def __init__(self, base):
AutotoolsCheckBase.__init__(self, base)
# basic settings
self.text = 'Package should not use obsolete m4 macros'
self.automatic = True
self.type = 'EXTRA'
self.url = 'https://fedorahosted.org/FedoraReview/wiki/AutoTools'
def get_trace_command(self):
''' construct the basic grep command '''
trace_cmd = ["grep", "-E", "-n", "-o"]
for tool in self.used_tools:
if tool not in _OBSOLETE_CHECKS:
# shouldn't be neccessary
continue
checks = _OBSOLETE_CHECKS[tool]
for obs_m4 in checks:
trace_cmd.append("-e")
trace_cmd.append(obs_m4 + "[[:space:]]*$")
trace_cmd.append("-e")
trace_cmd.append(obs_m4 + r"\(")
return trace_cmd
def trace(self):
''' trace for obsoleted macros '''
def shorter_configure(configure):
''' remove the workdir prefix from configure file '''
prefix = ReviewDirs.root + "/BUILD"
simple = configure
if configure.startswith(prefix):
simple = configure[len(prefix) + 1:]
return simple
# find traced files
if self.checks.buildsrc.is_available:
src = self.checks.buildsrc
else:
src = self.checks.sources
trace_files = src.find_all('*configure.ac') \
+ src.find_all('*configure.in')
# get the base tracing command (grep)
trace_cmd = self.get_trace_command()
# ---------------------------
# search for obsoleted macros
# ---------------------------
for configure_ac in trace_files:
cmd = trace_cmd + [configure_ac]
try:
self.log.debug("running: " + ' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
except IOError:
self.set_passed(self.PENDING,
"error while tracing autoconf.ac")
return
if p.returncode not in [0, 1]:
msg = "grep returns bad exit value %d: " % p.returncode \
+ stderr
self.set_passed(self.PENDING, msg)
return
m4_lines = stdout.splitlines(False)
m4_lines = map(lambda x: x.strip('('), m4_lines)
for m4_line in m4_lines:
line, m4 = m4_line.split(':')
if m4 not in self.warn_items:
self.warn_items[m4] = []
self.warn_items[m4].append({
'file': shorter_configure(configure_ac),
'line': int(line),
})
def generate_pretty_output(self):
'''
take the results from self.warn_items and generate at pretty looking
message
'''
output = ""
for item in self.warn_items.keys():
positions = self.warn_items[item]
hit = item + " found in: "
first = True
for pos in positions:
if first:
first = False
else:
hit = hit + ', '
hit = hit + pos['file'] + ':' + str(pos['line'])
output = output + _prepend_indent(self.text_wrap(hit))
output = output + "\n"
return output
def run(self):
''' standard entry point for each check '''
self.set_passed(self.NA)
self.find_used_tools()
if not self.used_tools:
return
# trace for warnings
self.trace()
if not len(self.warn_items):
self.set_passed(self.PASS)
return
msg = "Some obsoleted macros found, see the attachment."
output = self.generate_pretty_output()
attachment = self.Attachment("AutoTools: Obsoleted m4s found", output)
self.set_passed(self.FAIL, msg, [attachment])
return
# vim: set expandtab ts=4 sw=4 tw=79:
| gpl-2.0 |
Harmon758/discord.py | discord/user.py | 1 | 11442 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import Optional, TYPE_CHECKING
import discord.abc
from .flags import PublicUserFlags
from .utils import snowflake_time, _bytes_to_base64_data
from .enums import DefaultAvatar
from .colour import Colour
from .asset import Asset
__all__ = (
'User',
'ClientUser',
)
_BaseUser = discord.abc.User
class BaseUser(_BaseUser):
__slots__ = ('name', 'id', 'discriminator', '_avatar', 'bot', 'system', '_public_flags', '_state')
if TYPE_CHECKING:
name: str
id: int
discriminator: str
bot: bool
system: bool
def __init__(self, *, state, data):
self._state = state
self._update(data)
def __repr__(self):
return (
f"<BaseUser id={self.id} name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} system={self.system}>"
)
def __str__(self):
return f'{self.name}#{self.discriminator}'
def __eq__(self, other):
return isinstance(other, _BaseUser) and other.id == self.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.id >> 22
def _update(self, data):
self.name = data['username']
self.id = int(data['id'])
self.discriminator = data['discriminator']
self._avatar = data['avatar']
self._public_flags = data.get('public_flags', 0)
self.bot = data.get('bot', False)
self.system = data.get('system', False)
@classmethod
def _copy(cls, user):
self = cls.__new__(cls) # bypass __init__
self.name = user.name
self.id = user.id
self.discriminator = user.discriminator
self._avatar = user._avatar
self.bot = user.bot
self._state = user._state
self._public_flags = user._public_flags
return self
def _to_minimal_user_json(self):
return {
'username': self.name,
'id': self.id,
'avatar': self._avatar,
'discriminator': self.discriminator,
'bot': self.bot,
}
@property
def public_flags(self):
""":class:`PublicUserFlags`: The publicly available flags the user has."""
return PublicUserFlags._from_value(self._public_flags)
@property
def avatar(self):
""":class:`Asset`: Returns an :class:`Asset` for the avatar the user has.
If the user does not have a traditional avatar, an asset for
the default avatar is returned instead.
"""
if self._avatar is None:
return Asset._from_default_avatar(self._state, int(self.discriminator) % len(DefaultAvatar))
else:
return Asset._from_avatar(self._state, self.id, self._avatar)
@property
def default_avatar(self):
""":class:`Asset`: Returns the default avatar for a given user. This is calculated by the user's discriminator."""
return Asset._from_default_avatar(self._state, int(self.discriminator) % len(DefaultAvatar))
@property
def colour(self):
""":class:`Colour`: A property that returns a colour denoting the rendered colour
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :attr:`color`.
"""
return Colour.default()
@property
def color(self):
""":class:`Colour`: A property that returns a color denoting the rendered color
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :attr:`colour`.
"""
return self.colour
@property
def mention(self):
""":class:`str`: Returns a string that allows you to mention the given user."""
return f'<@{self.id}>'
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the user's creation time in UTC.
This is when the user's Discord account was created.
"""
return snowflake_time(self.id)
@property
def display_name(self):
""":class:`str`: Returns the user's display name.
For regular users this is just their username, but
if they have a guild specific nickname then that
is returned instead.
"""
return self.name
def mentioned_in(self, message):
"""Checks if the user is mentioned in the specified message.
Parameters
-----------
message: :class:`Message`
The message to check if you're mentioned in.
Returns
-------
:class:`bool`
Indicates if the user is mentioned in the message.
"""
if message.mention_everyone:
return True
return any(user.id == self.id for user in message.mentions)
class ClientUser(BaseUser):
"""Represents your Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
bot: :class:`bool`
Specifies if the user is a bot account.
system: :class:`bool`
Specifies if the user is a system user (i.e. represents Discord officially).
.. versionadded:: 1.3
verified: :class:`bool`
Specifies if the user's email is verified.
locale: Optional[:class:`str`]
The IETF language tag used to identify the language the user is using.
mfa_enabled: :class:`bool`
Specifies if the user has MFA turned on and working.
"""
__slots__ = BaseUser.__slots__ + ('locale', '_flags', 'verified', 'mfa_enabled', '__weakref__')
def __init__(self, *, state, data):
super().__init__(state=state, data=data)
def __repr__(self):
return (
f'<ClientUser id={self.id} name={self.name!r} discriminator={self.discriminator!r}'
f' bot={self.bot} verified={self.verified} mfa_enabled={self.mfa_enabled}>'
)
def _update(self, data):
super()._update(data)
# There's actually an Optional[str] phone field as well but I won't use it
self.verified = data.get('verified', False)
self.locale = data.get('locale')
self._flags = data.get('flags', 0)
self.mfa_enabled = data.get('mfa_enabled', False)
async def edit(self, *, username: str = None, avatar: Optional[bytes] = None) -> None:
"""|coro|
Edits the current profile of the client.
.. note::
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
"""
if avatar is not None:
avatar = _bytes_to_base64_data(avatar)
data = await self._state.http.edit_profile(username=username, avatar=avatar)
self._update(data)
class User(BaseUser, discord.abc.Messageable):
"""Represents a Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
bot: :class:`bool`
Specifies if the user is a bot account.
system: :class:`bool`
Specifies if the user is a system user (i.e. represents Discord officially).
"""
__slots__ = BaseUser.__slots__ + ('__weakref__',)
def __repr__(self):
return f'<User id={self.id} name={self.name!r} discriminator={self.discriminator!r} bot={self.bot}>'
async def _get_channel(self):
ch = await self.create_dm()
return ch
@property
def dm_channel(self):
"""Optional[:class:`DMChannel`]: Returns the channel associated with this user if it exists.
If this returns ``None``, you can create a DM channel by calling the
:meth:`create_dm` coroutine function.
"""
return self._state._get_private_channel_by_user(self.id)
@property
def mutual_guilds(self):
"""List[:class:`Guild`]: The guilds that the user shares with the client.
.. note::
This will only return mutual guilds within the client's internal cache.
.. versionadded:: 1.7
"""
return [guild for guild in self._state._guilds.values() if guild.get_member(self.id)]
async def create_dm(self):
"""|coro|
Creates a :class:`DMChannel` with this user.
This should be rarely called, as this is done transparently for most
people.
Returns
-------
:class:`.DMChannel`
The channel that was created.
"""
found = self.dm_channel
if found is not None:
return found
state = self._state
data = await state.http.start_private_message(self.id)
return state.add_dm_channel(data)
| mit |
pylixm/sae-django-demo | django1.7-sae/site-packages/django/db/backends/postgresql_psycopg2/creation.py | 57 | 3837 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.utils import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
def sql_table_creation_suffix(self):
test_settings = self.connection.settings_dict['TEST']
assert test_settings['COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if test_settings['CHARSET']:
return "WITH ENCODING '%s'" % test_settings['CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
output = []
db_type = f.db_type(connection=self.connection)
if db_type is not None and (f.db_index or f.unique):
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
if not f.unique:
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
| apache-2.0 |
gitaarik/django | tests/model_fields/test_datetimefield.py | 87 | 3393 | import datetime
from django.db import models
from django.test import (
SimpleTestCase, TestCase, override_settings, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import DateTimeModel
class DateTimeFieldTests(TestCase):
def test_datetimefield_to_python_microseconds(self):
"""DateTimeField.to_python() supports microseconds."""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'), datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'), datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_microseconds(self):
"""TimeField.to_python() supports microseconds."""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'), datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'), datetime.time(1, 2, 3, 999999))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
@override_settings(USE_TZ=False)
def test_lookup_date_without_use_tz(self):
d = datetime.date(2014, 3, 12)
dt1 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
dt2 = datetime.datetime(2014, 3, 11, 21, 22, 23, 240000)
t = datetime.time(21, 22, 23, 240000)
m = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
# Other model with different datetime.
DateTimeModel.objects.create(d=d, dt=dt2, t=t)
self.assertEqual(m, DateTimeModel.objects.get(dt__date=d))
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='America/Vancouver')
def test_lookup_date_with_use_tz(self):
d = datetime.date(2014, 3, 12)
# The following is equivalent to UTC 2014-03-12 18:34:23.24000.
dt1 = datetime.datetime(2014, 3, 12, 10, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
# The following is equivalent to UTC 2014-03-13 05:34:23.24000.
dt2 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
t = datetime.time(21, 22, 23, 240000)
m1 = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
m2 = DateTimeModel.objects.create(d=d, dt=dt2, t=t)
# In Vancouver, we expect both results.
self.assertQuerysetEqual(
DateTimeModel.objects.filter(dt__date=d),
[repr(m1), repr(m2)],
ordered=False
)
with self.settings(TIME_ZONE='UTC'):
# But in UTC, the __date only matches one of them.
self.assertQuerysetEqual(DateTimeModel.objects.filter(dt__date=d), [repr(m1)])
class ValidationTest(SimpleTestCase):
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
| bsd-3-clause |
GeoCat/QGIS | tests/src/python/test_qgslayoutmanager.py | 4 | 11291 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutManager.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 by Nyall Dawson'
__date__ = '15/03/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsComposition,
QgsLayoutManager,
QgsProject)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtTest import QSignalSpy
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutManager(unittest.TestCase):
def setUp(self):
"""Run before each test."""
self.manager = None
self.aboutFired = False
def tearDown(self):
"""Run after each test."""
pass
def testAddComposition(self):
project = QgsProject()
composition = QgsComposition(project)
composition.setName('test composition')
manager = QgsLayoutManager(project)
composition_about_to_be_added_spy = QSignalSpy(manager.compositionAboutToBeAdded)
composition_added_spy = QSignalSpy(manager.compositionAdded)
self.assertTrue(manager.addComposition(composition))
self.assertEqual(len(composition_about_to_be_added_spy), 1)
self.assertEqual(composition_about_to_be_added_spy[0][0], 'test composition')
self.assertEqual(len(composition_added_spy), 1)
self.assertEqual(composition_added_spy[0][0], 'test composition')
# adding it again should fail
self.assertFalse(manager.addComposition(composition))
# try adding a second composition
composition2 = QgsComposition(project)
composition2.setName('test composition2')
self.assertTrue(manager.addComposition(composition2))
self.assertEqual(len(composition_added_spy), 2)
self.assertEqual(composition_about_to_be_added_spy[1][0], 'test composition2')
self.assertEqual(len(composition_about_to_be_added_spy), 2)
self.assertEqual(composition_added_spy[1][0], 'test composition2')
# adding a composition with duplicate name should fail
composition3 = QgsComposition(project)
composition3.setName('test composition2')
self.assertFalse(manager.addComposition(composition3))
def testCompositions(self):
project = QgsProject()
manager = QgsLayoutManager(project)
composition = QgsComposition(project)
composition.setName('test composition')
composition2 = QgsComposition(project)
composition2.setName('test composition2')
composition3 = QgsComposition(project)
composition3.setName('test composition3')
manager.addComposition(composition)
self.assertEqual(manager.compositions(), [composition])
manager.addComposition(composition2)
self.assertEqual(set(manager.compositions()), {composition, composition2})
manager.addComposition(composition3)
self.assertEqual(set(manager.compositions()), {composition, composition2, composition3})
def aboutToBeRemoved(self, name):
# composition should still exist at this time
self.assertEqual(name, 'test composition')
self.assertTrue(self.manager.compositionByName('test composition'))
self.aboutFired = True
def testRemoveComposition(self):
project = QgsProject()
composition = QgsComposition(project)
composition.setName('test composition')
self.manager = QgsLayoutManager(project)
composition_removed_spy = QSignalSpy(self.manager.compositionRemoved)
composition_about_to_be_removed_spy = QSignalSpy(self.manager.compositionAboutToBeRemoved)
# tests that composition still exists when compositionAboutToBeRemoved is fired
self.manager.compositionAboutToBeRemoved.connect(self.aboutToBeRemoved)
# not added, should fail
self.assertFalse(self.manager.removeComposition(composition))
self.assertEqual(len(composition_removed_spy), 0)
self.assertEqual(len(composition_about_to_be_removed_spy), 0)
self.assertTrue(self.manager.addComposition(composition))
self.assertEqual(self.manager.compositions(), [composition])
self.assertTrue(self.manager.removeComposition(composition))
self.assertEqual(len(self.manager.compositions()), 0)
self.assertEqual(len(composition_removed_spy), 1)
self.assertEqual(composition_removed_spy[0][0], 'test composition')
self.assertEqual(len(composition_about_to_be_removed_spy), 1)
self.assertEqual(composition_about_to_be_removed_spy[0][0], 'test composition')
self.assertTrue(self.aboutFired)
self.manager = None
def testClear(self):
project = QgsProject()
manager = QgsLayoutManager(project)
# add a bunch of compositions
composition = QgsComposition(project)
composition.setName('test composition')
composition2 = QgsComposition(project)
composition2.setName('test composition2')
composition3 = QgsComposition(project)
composition3.setName('test composition3')
manager.addComposition(composition)
manager.addComposition(composition2)
manager.addComposition(composition3)
composition_removed_spy = QSignalSpy(manager.compositionRemoved)
composition_about_to_be_removed_spy = QSignalSpy(manager.compositionAboutToBeRemoved)
manager.clear()
self.assertEqual(len(manager.compositions()), 0)
self.assertEqual(len(composition_removed_spy), 3)
self.assertEqual(len(composition_about_to_be_removed_spy), 3)
def testCompositionByName(self):
project = QgsProject()
manager = QgsLayoutManager(project)
# add a bunch of compositions
composition = QgsComposition(project)
composition.setName('test composition')
composition2 = QgsComposition(project)
composition2.setName('test composition2')
composition3 = QgsComposition(project)
composition3.setName('test composition3')
manager.addComposition(composition)
manager.addComposition(composition2)
manager.addComposition(composition3)
self.assertFalse(manager.compositionByName('asdf'))
self.assertEqual(manager.compositionByName('test composition'), composition)
self.assertEqual(manager.compositionByName('test composition2'), composition2)
self.assertEqual(manager.compositionByName('test composition3'), composition3)
def testReadWriteXml(self):
"""
Test reading and writing layout manager state to XML
"""
project = QgsProject()
manager = QgsLayoutManager(project)
# add a bunch of compositions
composition = QgsComposition(project)
composition.setName('test composition')
composition2 = QgsComposition(project)
composition2.setName('test composition2')
composition3 = QgsComposition(project)
composition3.setName('test composition3')
manager.addComposition(composition)
manager.addComposition(composition2)
manager.addComposition(composition3)
# save to xml
doc = QDomDocument("testdoc")
elem = manager.writeXml(doc)
doc.appendChild(elem)
# restore from xml
project2 = QgsProject()
manager2 = QgsLayoutManager(project2)
self.assertTrue(manager2.readXml(elem, doc))
self.assertEqual(len(manager2.compositions()), 3)
names = [c.name() for c in manager2.compositions()]
self.assertEqual(set(names), {'test composition', 'test composition2', 'test composition3'})
def testSaveAsTemplate(self):
"""
Test saving composition as template
"""
project = QgsProject()
manager = QgsLayoutManager(project)
doc = QDomDocument("testdoc")
self.assertFalse(manager.saveAsTemplate('not in manager', doc))
composition = QgsComposition(project)
composition.setName('test composition')
manager.addComposition(composition)
self.assertTrue(manager.saveAsTemplate('test composition', doc))
def testDuplicateComposition(self):
"""
Test duplicating compositions
"""
project = QgsProject()
manager = QgsLayoutManager(project)
doc = QDomDocument("testdoc")
self.assertFalse(manager.duplicateComposition('not in manager', 'dest'))
composition = QgsComposition(project)
composition.setName('test composition')
composition.setPaperSize(100, 200)
manager.addComposition(composition)
# duplicate name
self.assertFalse(manager.duplicateComposition('test composition', 'test composition'))
result = manager.duplicateComposition('test composition', 'dupe composition')
self.assertTrue(result)
# make sure result in stored in manager
self.assertEqual(result, manager.compositionByName('dupe composition'))
self.assertEqual(result.name(), 'dupe composition')
self.assertEqual(result.paperHeight(), 200)
self.assertEqual(result.paperWidth(), 100)
def testGenerateUniqueTitle(self):
project = QgsProject()
manager = QgsLayoutManager(project)
self.assertEqual(manager.generateUniqueTitle(), 'Composer 1')
composition = QgsComposition(project)
composition.setName(manager.generateUniqueTitle())
manager.addComposition(composition)
self.assertEqual(manager.generateUniqueTitle(), 'Composer 2')
composition2 = QgsComposition(project)
composition2.setName(manager.generateUniqueTitle())
manager.addComposition(composition2)
self.assertEqual(manager.generateUniqueTitle(), 'Composer 3')
manager.clear()
self.assertEqual(manager.generateUniqueTitle(), 'Composer 1')
def testRenameSignal(self):
project = QgsProject()
manager = QgsLayoutManager(project)
composition = QgsComposition(project)
composition.setName('c1')
manager.addComposition(composition)
composition2 = QgsComposition(project)
composition2.setName('c2')
manager.addComposition(composition2)
composition_renamed_spy = QSignalSpy(manager.compositionRenamed)
composition.setName('d1')
self.assertEqual(len(composition_renamed_spy), 1)
self.assertEqual(composition_renamed_spy[0][0], composition)
self.assertEqual(composition_renamed_spy[0][1], 'd1')
composition2.setName('d2')
self.assertEqual(len(composition_renamed_spy), 2)
self.assertEqual(composition_renamed_spy[1][0], composition2)
self.assertEqual(composition_renamed_spy[1][1], 'd2')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
AlanZatarain/raft | thirdparty/pyamf/pyamf/adapters/_elixir.py | 39 | 1706 | # Copyright (c) The PyAMF Project.
# See LICENSE for details.
"""
Elixir adapter module. Elixir adds a number of properties to the mapped instances.
@see: U{Elixir homepage<http://elixir.ematia.de>}
@since: 0.6
"""
import elixir.entity
import pyamf
from pyamf import adapters
adapter = adapters.get_adapter('sqlalchemy.orm')
adapter.class_checkers.append(elixir.entity.is_entity)
class ElixirAdapter(adapter.SaMappedClassAlias):
EXCLUDED_ATTRS = adapter.SaMappedClassAlias.EXCLUDED_ATTRS + [
'_global_session']
def getCustomProperties(self):
adapter.SaMappedClassAlias.getCustomProperties(self)
self.descriptor = self.klass._descriptor
self.parent_descriptor = None
if self.descriptor.parent:
self.parent_descriptor = self.descriptor.parent._descriptor
foreign_constraints = []
for constraint in self.descriptor.constraints:
for col in constraint.columns:
col = str(col)
if adapter.__version__.startswith('0.6'):
foreign_constraints.append(col)
else:
if col.startswith(self.descriptor.tablename + '.'):
foreign_constraints.append(col[len(self.descriptor.tablename) + 1:])
if self.descriptor.polymorphic:
self.exclude_attrs.update([self.descriptor.polymorphic])
self.exclude_attrs.update(foreign_constraints)
def _compile_base_class(self, klass):
if klass is elixir.EntityBase or klass is elixir.Entity:
return
pyamf.ClassAlias._compile_base_class(self, klass)
pyamf.register_alias_type(ElixirAdapter, elixir.entity.is_entity) | gpl-3.0 |
TD22057/T-Home | python/tHome/eagle/config.py | 1 | 1136 | #===========================================================================
#
# Config file
#
#===========================================================================
__doc__ = """Config file parsing.
"""
from .. import util
from ..util import config as C
#===========================================================================
# Config file section name and defaults.
configEntries = [
# ( name, converter function, default value )
C.Entry( "httpPort", int, 22042 ),
C.Entry( "mqttEnergy", str ),
C.Entry( "mqttPower", str ),
C.Entry( "logFile", util.path.expand ),
C.Entry( "logLevel", int, 20 ), # INFO
]
#===========================================================================
def parse( configDir, configFile='eagle.py' ):
return C.readAndCheck( configDir, configFile, configEntries )
#===========================================================================
def log( config, logFile=None ):
if not logFile:
logFile = config.logFile
return util.log.get( "eagle", config.logLevel, logFile )
#===========================================================================
| bsd-2-clause |
JioCloud/glance | glance/tests/functional/test_glance_manage.py | 6 | 2729 | # Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test cases for glance-manage"""
import os
import sys
from glance.common import utils
from glance.tests import functional
from glance.tests.utils import depends_on_exe
from glance.tests.utils import execute
from glance.tests.utils import skip_if_disabled
class TestGlanceManage(functional.FunctionalTest):
"""Functional tests for glance-manage"""
def setUp(self):
super(TestGlanceManage, self).setUp()
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
self.conf_filepath = os.path.join(conf_dir, 'glance-manage.conf')
self.db_filepath = os.path.join(self.test_dir, 'tests.sqlite')
self.connection = ('sql_connection = sqlite:///%s' %
self.db_filepath)
def _sync_db(self):
with open(self.conf_filepath, 'wb') as conf_file:
conf_file.write('[DEFAULT]\n')
conf_file.write(self.connection)
conf_file.flush()
cmd = ('%s -m glance.cmd.manage --config-file %s db sync' %
(sys.executable, self.conf_filepath))
execute(cmd, raise_error=True)
def _assert_tables(self):
cmd = "sqlite3 %s '.schema'" % self.db_filepath
exitcode, out, err = execute(cmd, raise_error=True)
self.assertTrue('CREATE TABLE images' in out)
self.assertTrue('CREATE TABLE image_tags' in out)
self.assertTrue('CREATE TABLE image_locations' in out)
# NOTE(bcwaldon): For some reason we need double-quotes around
# these two table names
# NOTE(vsergeyev): There are some cases when we have no double-quotes
self.assertTrue(
'CREATE TABLE "image_members"' in out or
'CREATE TABLE image_members' in out)
self.assertTrue(
'CREATE TABLE "image_properties"' in out or
'CREATE TABLE image_properties' in out)
@depends_on_exe('sqlite3')
@skip_if_disabled
def test_db_creation(self):
"""Test DB creation by db_sync on a fresh DB"""
self._sync_db()
self._assert_tables()
| apache-2.0 |
pwil3058/darning | darning/cli/subcmd_select.py | 1 | 1953 | ### Copyright (C) 2010 Peter Williams <peter_ono@users.sourceforge.net>
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 of the License only.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Select/display which patch guards are in force."""
import sys
from . import cli_args
from . import db_utils
from . import msg
PARSER = cli_args.SUB_CMD_PARSER.add_parser(
"select",
description=_("Display/select which patch guards are in force."),
epilog=_("""When invoked with no arguments the currently selected guards are listed."""),
)
GROUP = PARSER.add_mutually_exclusive_group()
GROUP.add_argument(
"-n", "--none",
help=_("Disable all guards."),
dest="opt_none",
action="store_true",
)
GROUP.add_argument(
"-s", "--set",
help=_("the list of guards to be enabled/selected."),
dest="guards",
metavar="guard",
action="append",
)
def run_select(args):
"""Execute the "select" sub command using the supplied args"""
PM = db_utils.get_pm_db()
db_utils.set_report_context(verbose=True)
if args.opt_none:
return PM.do_select_guards(None)
elif args.guards:
return PM.do_select_guards(args.guards)
else:
selected_guards = PM.get_selected_guards()
for guard in sorted(selected_guards):
sys.stdout.write(guard + "\n")
return 0
PARSER.set_defaults(run_cmd=run_select)
| gpl-2.0 |
onestarshang/flask_super_config | venv/lib/python2.7/site-packages/pip/__init__.py | 50 | 10414 | #!/usr/bin/env python
from __future__ import absolute_import
import logging
import os
import optparse
import warnings
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "7.1.0"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWArning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Enable our Deprecation Warnings
for deprecation_warning in deprecation.DEPRECATIONS:
warnings.simplefilter("default", deprecation_warning)
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
xkollar/spacewalk | client/rhel/spacewalk-oscap/scap.py | 9 | 6258 |
import sys
import os
import subprocess
import xml.sax
import tempfile
import shutil
from base64 import encodestring
sys.path.append("/usr/share/rhn/")
from up2date_client import up2dateLog
from up2date_client import rhnserver
from up2date_client import up2dateAuth
from up2date_client import up2dateErrors
__rhnexport__ = [ 'xccdf_eval' ]
log = up2dateLog.initLog()
def xccdf_eval(args, cache_only=None):
if cache_only:
return (0, 'no-ops for caching', {})
results_dir = None
if ('id' in args) and ('file_size' in args) and args['file_size'] > 0:
results_dir = tempfile.mkdtemp()
pwd = os.getcwd()
os.chdir(results_dir)
results_file = tempfile.NamedTemporaryFile(dir=results_dir)
params, oscap_err = _process_params(args['params'], results_file.name, results_dir)
oscap_err += _run_oscap(['xccdf', 'eval'] + params + [args['path']])
if results_dir:
os.chdir(pwd)
if not _assert_xml(results_file.file):
del(results_file)
_cleanup_temp(results_dir)
return (1, 'oscap tool did not produce valid xml.\n' + oscap_err, {})
ret, resume, xslt_err = _xccdf_resume(results_file.name, temp_dir=results_dir)
if ret != 0 or resume == '':
del(results_file)
_cleanup_temp(results_dir)
return (1, 'Problems with extracting resume:\n' + xslt_err, {})
try:
up_err = _upload_results(results_file, results_dir, args)
except:
# An error during the upload must not prevent scan completion
log.log_exception(*sys.exc_info())
up_err = "Upload of detailed results failed. Fatal error in Python code occurred"
del(results_file)
_cleanup_temp(results_dir)
return (0, 'openscap scan completed', {
'resume': encodestring(resume),
'errors': encodestring(oscap_err + xslt_err + up_err)
})
def _run_oscap(arguments):
dev_null = open('/dev/null')
c = _popen(['/usr/bin/oscap'] + arguments, stdout=dev_null.fileno())
ret = c.wait()
dev_null.close()
errors = c.stderr.read()
if ret != 0:
errors += 'xccdf_eval: oscap tool returned %i\n' % ret
log.log_debug('The oscap tool completed\n%s' % errors)
return errors
def _xccdf_resume(results_file, temp_dir=None):
xslt = '/usr/share/openscap/xsl/xccdf-resume.xslt'
dev_null = open('/dev/null')
resume_file = tempfile.NamedTemporaryFile(dir=temp_dir)
c = _popen(['/usr/bin/xsltproc', '--output', resume_file.name,
xslt, results_file], stdout=dev_null.fileno())
ret = c.wait()
dev_null.close()
errors = c.stderr.read()
if ret != 0:
errors += 'xccdf_eval: xsltproc tool returned %i\n' % ret
log.log_debug('The xsltproc tool completed:\n%s' % errors)
resume = resume_file.read()
del(resume_file)
return ret, resume, errors
def _popen(args, stdout=subprocess.PIPE):
log.log_debug('Running: ' + str(args))
return subprocess.Popen(args, bufsize=-1, stdin=subprocess.PIPE,
stdout=stdout, stderr=subprocess.PIPE, shell=False)
def _process_params(args, filename, results_dir=None):
params = ['--results', filename]
if results_dir:
params += ['--oval-results', '--report', 'xccdf-report.html']
errors = ''
if args:
allowed_args = {
'--profile': 1,
'--skip-valid': 0,
'--cpe': 1,
'--fetch-remote-resources': 0,
'--datastream-id': 1,
'--xccdf-id': 1,
'--tailoring-id': 1,
'--tailoring-file': 1,
}
args = args.split(' ')
i = 0
while i < len(args):
if args[i] in allowed_args:
j = i + allowed_args[args[i]]
params += args[i:j+1]
i = j
elif not errors:
errors = 'xccdf_eval: Following arguments forbidden: ' + args[i]
else:
errors += ' ' + args[i]
i += 1
if errors:
errors += '\n'
return params, errors
def _upload_results(xccdf_result, results_dir, args):
errors = ''
if results_dir:
server = rhnserver.RhnServer()
# No need to check capabilities. The server supports detailed results
# If rhe 'file_size' and 'id' was in supplied in the argument list.
systemid = up2dateAuth.getSystemId()
for filename in os.listdir(results_dir):
path = os.path.join(results_dir, filename)
if path == xccdf_result.name:
f = xccdf_result.file
filename = "xccdf-results.xml"
else:
f = open(path, 'r')
errors += _upload_file(server, systemid, args, path, filename, f)
if path != xccdf_result.name:
f.close()
return errors
def _upload_file(server, systemid, args, path, filename, f):
if filename != 'xccdf-report.html' and not _assert_xml(f):
log.log_debug('Excluding "%s" file from upload. Not an XML.' % path)
return '\nxccdf_eval: File "%s" not uploaded. Not an XML file format.' % filename
stat = os.fstat(f.fileno())
if stat.st_size < args['file_size']:
try:
ret = server.scap.upload_result(systemid, args['id'],
{'filename': filename,
'filecontent': encodestring(f.read()),
'content-encoding': 'base64',
})
if ret and ret['result']:
log.log_debug('The file %s uploaded successfully.' % filename)
return ''
except up2dateErrors.Error, e:
log.log_exception(*sys.exc_info())
return '\nxccdf_eval: File "%s" not uploaded. %s' % (filename, e)
else:
return '\nxccdf_eval: File "%s" not uploaded. File size (%d B) exceeds the limit.' \
% (filename, stat.st_size)
def _cleanup_temp(results_dir):
if results_dir:
shutil.rmtree(results_dir)
def _assert_xml(f):
try:
try:
xml.sax.parse(f, xml.sax.ContentHandler())
return True
except Exception, e:
log.log_exception(*sys.exc_info())
return False
finally:
f.seek(0)
| gpl-2.0 |
robotbill/VIctor | victor/path.py | 1 | 1202 | import pyglet;
from victor.vector import *;
__all__ = [ 'Path' ];
def _pairwise(seq):
from itertools import tee, izip;
a, b = tee(iter(seq));
next(b);
return izip(a, b);
class Path(object):
def __init__(self, pos, color = (0, 0, 0, 255)):
self.points = [ (0., pos) ];
self.color = color;
self.batch = None;
def append(self, p):
self.points.append((self.points[-1][0] + 1., p));
self.reset_batch();
def evaluate(self, t):
for p, n in _pairwise(self.points):
if p[0] <= t < n[0]:
return (1. - t) * p[1] + t * p[1];
def approximate(self):
return [ vec2i(*p) for t, p in self.points ];
def reset_batch(self):
from itertools import islice, cycle, chain;
batch = pyglet.graphics.Batch();
points = self.approximate();
batch.add(
len(points), pyglet.gl.GL_LINE_STRIP, None,
('v2i', tuple(chain(*points))),
('c4B', tuple(islice(cycle(self.color), 0, len(self.color) * len(points))))
);
self.batch = batch;
def draw(self, batch = None):
if self.batch: self.batch.draw();
| mit |
jmschrei/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
cyclecomputing/boto | boto/ec2/autoscale/activity.py | 57 | 3059 | # Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
class Activity(object):
def __init__(self, connection=None):
self.connection = connection
self.start_time = None
self.end_time = None
self.activity_id = None
self.progress = None
self.status_code = None
self.cause = None
self.description = None
self.status_message = None
self.group_name = None
def __repr__(self):
return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id,
self.group_name,
self.status_message,
self.cause)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ActivityId':
self.activity_id = value
elif name == 'AutoScalingGroupName':
self.group_name = value
elif name == 'StartTime':
try:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'EndTime':
try:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'Progress':
self.progress = value
elif name == 'Cause':
self.cause = value
elif name == 'Description':
self.description = value
elif name == 'StatusMessage':
self.status_message = value
elif name == 'StatusCode':
self.status_code = value
else:
setattr(self, name, value)
| mit |
vipul-sharma20/oh-mainline | vendor/packages/twisted/twisted/internet/inotify.py | 18 | 13944 | # -*- test-case-name: twisted.internet.test.test_inotify -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to linux inotify API.
In order to use this support, simply do the following (and start a reactor
at some point)::
from twisted.internet import inotify
from twisted.python import filepath
def notify(ignored, filepath, mask):
\"""
For historical reasons, an opaque handle is passed as first
parameter. This object should never be used.
@param filepath: FilePath on which the event happened.
@param mask: inotify event as hexadecimal masks
\"""
print "event %s on %s" % (
', '.join(inotify.humanReadableMask(mask)), filepath)
notifier = inotify.INotify()
notifier.startReading()
notifier.watch(filepath.FilePath("/some/directory"), callbacks=[notify])
@since: 10.1
"""
import os
import struct
from twisted.internet import fdesc
from twisted.internet.abstract import FileDescriptor
from twisted.python import log, _inotify
# from /usr/src/linux/include/linux/inotify.h
IN_ACCESS = 0x00000001L # File was accessed
IN_MODIFY = 0x00000002L # File was modified
IN_ATTRIB = 0x00000004L # Metadata changed
IN_CLOSE_WRITE = 0x00000008L # Writeable file was closed
IN_CLOSE_NOWRITE = 0x00000010L # Unwriteable file closed
IN_OPEN = 0x00000020L # File was opened
IN_MOVED_FROM = 0x00000040L # File was moved from X
IN_MOVED_TO = 0x00000080L # File was moved to Y
IN_CREATE = 0x00000100L # Subfile was created
IN_DELETE = 0x00000200L # Subfile was delete
IN_DELETE_SELF = 0x00000400L # Self was deleted
IN_MOVE_SELF = 0x00000800L # Self was moved
IN_UNMOUNT = 0x00002000L # Backing fs was unmounted
IN_Q_OVERFLOW = 0x00004000L # Event queued overflowed
IN_IGNORED = 0x00008000L # File was ignored
IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory
IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link
IN_MASK_ADD = 0x20000000 # add to the mask of an already existing watch
IN_ISDIR = 0x40000000 # event occurred against dir
IN_ONESHOT = 0x80000000 # only send event once
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # closes
IN_MOVED = IN_MOVED_FROM | IN_MOVED_TO # moves
IN_CHANGED = IN_MODIFY | IN_ATTRIB # changes
IN_WATCH_MASK = (IN_MODIFY | IN_ATTRIB |
IN_CREATE | IN_DELETE |
IN_DELETE_SELF | IN_MOVE_SELF |
IN_UNMOUNT | IN_MOVED_FROM | IN_MOVED_TO)
_FLAG_TO_HUMAN = [
(IN_ACCESS, 'access'),
(IN_MODIFY, 'modify'),
(IN_ATTRIB, 'attrib'),
(IN_CLOSE_WRITE, 'close_write'),
(IN_CLOSE_NOWRITE, 'close_nowrite'),
(IN_OPEN, 'open'),
(IN_MOVED_FROM, 'moved_from'),
(IN_MOVED_TO, 'moved_to'),
(IN_CREATE, 'create'),
(IN_DELETE, 'delete'),
(IN_DELETE_SELF, 'delete_self'),
(IN_MOVE_SELF, 'move_self'),
(IN_UNMOUNT, 'unmount'),
(IN_Q_OVERFLOW, 'queue_overflow'),
(IN_IGNORED, 'ignored'),
(IN_ONLYDIR, 'only_dir'),
(IN_DONT_FOLLOW, 'dont_follow'),
(IN_MASK_ADD, 'mask_add'),
(IN_ISDIR, 'is_dir'),
(IN_ONESHOT, 'one_shot')
]
def humanReadableMask(mask):
"""
Auxiliary function that converts an hexadecimal mask into a series
of human readable flags.
"""
s = []
for k, v in _FLAG_TO_HUMAN:
if k & mask:
s.append(v)
return s
class _Watch(object):
"""
Watch object that represents a Watch point in the filesystem. The
user should let INotify to create these objects
@ivar path: The path over which this watch point is monitoring
@ivar mask: The events monitored by this watchpoint
@ivar autoAdd: Flag that determines whether this watch point
should automatically add created subdirectories
@ivar callbacks: C{list} of callback functions that will be called
when an event occurs on this watch.
"""
def __init__(self, path, mask=IN_WATCH_MASK, autoAdd=False,
callbacks=None):
self.path = path
self.mask = mask
self.autoAdd = autoAdd
if callbacks is None:
callbacks = []
self.callbacks = callbacks
def _notify(self, filepath, events):
"""
Callback function used by L{INotify} to dispatch an event.
"""
for callback in self.callbacks:
callback(self, filepath, events)
class INotify(FileDescriptor, object):
"""
The INotify file descriptor, it basically does everything related
to INotify, from reading to notifying watch points.
@ivar _buffer: a C{str} containing the data read from the inotify fd.
@ivar _watchpoints: a C{dict} that maps from inotify watch ids to
watchpoints objects
@ivar _watchpaths: a C{dict} that maps from watched paths to the
inotify watch ids
"""
_inotify = _inotify
def __init__(self, reactor=None):
FileDescriptor.__init__(self, reactor=reactor)
# Smart way to allow parametrization of libc so I can override
# it and test for the system errors.
self._fd = self._inotify.init()
fdesc.setNonBlocking(self._fd)
fdesc._setCloseOnExec(self._fd)
# The next 2 lines are needed to have self.loseConnection()
# to call connectionLost() on us. Since we already created the
# fd that talks to inotify we want to be notified even if we
# haven't yet started reading.
self.connected = 1
self._writeDisconnected = True
self._buffer = ''
self._watchpoints = {}
self._watchpaths = {}
def _addWatch(self, path, mask, autoAdd, callbacks):
"""
Private helper that abstracts the use of ctypes.
Calls the internal inotify API and checks for any errors after the
call. If there's an error L{INotify._addWatch} can raise an
INotifyError. If there's no error it proceeds creating a watchpoint and
adding a watchpath for inverse lookup of the file descriptor from the
path.
"""
wd = self._inotify.add(self._fd, path.path, mask)
iwp = _Watch(path, mask, autoAdd, callbacks)
self._watchpoints[wd] = iwp
self._watchpaths[path] = wd
return wd
def _rmWatch(self, wd):
"""
Private helper that abstracts the use of ctypes.
Calls the internal inotify API to remove an fd from inotify then
removes the corresponding watchpoint from the internal mapping together
with the file descriptor from the watchpath.
"""
self._inotify.remove(self._fd, wd)
iwp = self._watchpoints.pop(wd)
self._watchpaths.pop(iwp.path)
def connectionLost(self, reason):
"""
Release the inotify file descriptor and do the necessary cleanup
"""
FileDescriptor.connectionLost(self, reason)
if self._fd >= 0:
try:
os.close(self._fd)
except OSError, e:
log.err(e, "Couldn't close INotify file descriptor.")
def fileno(self):
"""
Get the underlying file descriptor from this inotify observer.
Required by L{abstract.FileDescriptor} subclasses.
"""
return self._fd
def doRead(self):
"""
Read some data from the observed file descriptors
"""
fdesc.readFromFD(self._fd, self._doRead)
def _doRead(self, in_):
"""
Work on the data just read from the file descriptor.
"""
self._buffer += in_
while len(self._buffer) >= 16:
wd, mask, cookie, size = struct.unpack("=LLLL", self._buffer[0:16])
if size:
name = self._buffer[16:16 + size].rstrip('\0')
else:
name = None
self._buffer = self._buffer[16 + size:]
try:
iwp = self._watchpoints[wd]
except KeyError:
continue
path = iwp.path
if name:
path = path.child(name)
iwp._notify(path, mask)
if (iwp.autoAdd and mask & IN_ISDIR and mask & IN_CREATE):
# mask & IN_ISDIR already guarantees that the path is a
# directory. There's no way you can get here without a
# directory anyway, so no point in checking for that again.
new_wd = self.watch(
path, mask=iwp.mask, autoAdd=True,
callbacks=iwp.callbacks
)
# This is very very very hacky and I'd rather not do this but
# we have no other alternative that is less hacky other than
# surrender. We use callLater because we don't want to have
# too many events waiting while we process these subdirs, we
# must always answer events as fast as possible or the overflow
# might come.
self.reactor.callLater(0,
self._addChildren, self._watchpoints[new_wd])
if mask & IN_DELETE_SELF:
self._rmWatch(wd)
def _addChildren(self, iwp):
"""
This is a very private method, please don't even think about using it.
Note that this is a fricking hack... it's because we cannot be fast
enough in adding a watch to a directory and so we basically end up
getting here too late if some operations have already been going on in
the subdir, we basically need to catchup. This eventually ends up
meaning that we generate double events, your app must be resistant.
"""
try:
listdir = iwp.path.children()
except OSError:
# Somebody or something (like a test) removed this directory while
# we were in the callLater(0...) waiting. It doesn't make sense to
# process it anymore
return
# note that it's true that listdir will only see the subdirs inside
# path at the moment of the call but path is monitored already so if
# something is created we will receive an event.
for f in listdir:
# It's a directory, watch it and then add its children
if f.isdir():
wd = self.watch(
f, mask=iwp.mask, autoAdd=True,
callbacks=iwp.callbacks
)
iwp._notify(f, IN_ISDIR|IN_CREATE)
# now f is watched, we can add its children the callLater is to
# avoid recursion
self.reactor.callLater(0,
self._addChildren, self._watchpoints[wd])
# It's a file and we notify it.
if f.isfile():
iwp._notify(f, IN_CREATE|IN_CLOSE_WRITE)
def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False,
callbacks=None, recursive=False):
"""
Watch the 'mask' events in given path. Can raise C{INotifyError} when
there's a problem while adding a directory.
@param path: The path needing monitoring
@type path: L{FilePath}
@param mask: The events that should be watched
@type mask: C{int}
@param autoAdd: if True automatically add newly created
subdirectories
@type autoAdd: C{boolean}
@param callbacks: A list of callbacks that should be called
when an event happens in the given path.
The callback should accept 3 arguments:
(ignored, filepath, mask)
@type callbacks: C{list} of callables
@param recursive: Also add all the subdirectories in this path
@type recursive: C{boolean}
"""
if recursive:
# This behavior is needed to be compatible with the windows
# interface for filesystem changes:
# http://msdn.microsoft.com/en-us/library/aa365465(VS.85).aspx
# ReadDirectoryChangesW can do bWatchSubtree so it doesn't
# make sense to implement this at an higher abstraction
# level when other platforms support it already
for child in path.walk():
if child.isdir():
self.watch(child, mask, autoAdd, callbacks,
recursive=False)
else:
wd = self._isWatched(path)
if wd:
return wd
mask = mask | IN_DELETE_SELF # need this to remove the watch
return self._addWatch(path, mask, autoAdd, callbacks)
def ignore(self, path):
"""
Remove the watch point monitoring the given path
@param path: The path that should be ignored
@type path: L{FilePath}
"""
wd = self._isWatched(path)
if wd is not False:
self._rmWatch(wd)
def _isWatched(self, path):
"""
Helper function that checks if the path is already monitored
and returns its watchdescriptor if so or None otherwise.
@param path: The path that should be checked
@type path: L{FilePath}
"""
return self._watchpaths.get(path, None)
INotifyError = _inotify.INotifyError
__all__ = ["INotify", "humanReadableMask", "IN_WATCH_MASK", "IN_ACCESS",
"IN_MODIFY", "IN_ATTRIB", "IN_CLOSE_NOWRITE", "IN_CLOSE_WRITE",
"IN_OPEN", "IN_MOVED_FROM", "IN_MOVED_TO", "IN_CREATE",
"IN_DELETE", "IN_DELETE_SELF", "IN_MOVE_SELF", "IN_UNMOUNT",
"IN_Q_OVERFLOW", "IN_IGNORED", "IN_ONLYDIR", "IN_DONT_FOLLOW",
"IN_MASK_ADD", "IN_ISDIR", "IN_ONESHOT", "IN_CLOSE",
"IN_MOVED", "IN_CHANGED"]
| agpl-3.0 |
Hanaasagi/sorator | orator/orm/factory.py | 1 | 7489 | # -*- coding: utf-8 -*-
import os
import inflection
from faker import Faker
from functools import wraps
from .factory_builder import FactoryBuilder
class Factory:
def __init__(self, faker=None, resolver=None):
"""
:param faker: A faker generator instance
:type faker: faker.Generator
"""
if faker is None:
self._faker = Faker()
else:
self._faker = faker
self._definitions = {}
self._resolver = resolver
@classmethod
def construct(cls, faker, path_to_factories=None):
"""
Create a new factory container.
:param faker: A faker generator instance
:type faker: faker.Generator
:param path_to_factories: The path to factories
:type path_to_factories: str
:rtype: Factory
"""
factory = faker.__class__()
if path_to_factories is not None and os.path.isdir(path_to_factories):
for filename in os.listdir(path_to_factories):
if os.path.isfile(filename):
cls._resolve(path_to_factories, filename)
return factory
def define_as(self, klass, name):
"""
Define a class with the given short name.
:param klass: The class
:type klass: class
:param name: The short name
:type name: str
"""
return self.define(klass, name)
def define(self, klass, name='default'):
"""
Define a class with a given set of attributes.
:param klass: The class
:type klass: class
:param name: The short name
:type name: str
"""
def decorate(func):
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
self.register(klass, func, name=name)
return wrapped
return decorate
def register(self, klass, callback, name='default'):
"""
Register a class with a function.
:param klass: The class
:type klass: class
:param callback: The callable
:type callback: callable
:param name: The short name
:type name: str
"""
if klass not in self._definitions:
self._definitions[klass] = {}
self._definitions[klass][name] = callback
def register_as(self, klass, name, callback):
"""
Register a class with a function.
:param klass: The class
:type klass: class
:param callback: The callable
:type callback: callable
:param name: The short name
:type name: str
"""
return self.register(klass, callback, name)
def create(self, klass, **attributes):
"""
Create an instance of the given model and persist it to the database.
:param klass: The class
:type klass: class
:param attributes: The instance attributes
:type attributes: dict
:return: mixed
"""
return self.of(klass).create(**attributes)
def create_as(self, klass, name, **attributes):
"""
Create an instance of the given model and type and persist it
to the database.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param attributes: The instance attributes
:type attributes: dict
:return: mixed
"""
return self.of(klass, name).create(**attributes)
def make(self, klass, **attributes):
"""
Create an instance of the given model.
:param klass: The class
:type klass: class
:param attributes: The instance attributes
:type attributes: dict
:return: mixed
"""
return self.of(klass).make(**attributes)
def make_as(self, klass, name, **attributes):
"""
Create an instance of the given model and type.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param attributes: The instance attributes
:type attributes: dict
:return: mixed
"""
return self.of(klass, name).make(**attributes)
def raw_of(self, klass, name, **attributes):
"""
Get the raw attribute dict for a given named model.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param attributes: The instance attributes
:type attributes: dict
:return: dict
"""
return self.raw(klass, _name=name, **attributes)
def raw(self, klass, _name='default', **attributes):
"""
Get the raw attribute dict for a given named model.
:param klass: The class
:type klass: class
:param _name: The type
:type _name: str
:param attributes: The instance attributes
:type attributes: dict
:return: dict
"""
raw = self._definitions[klass][_name](self._faker)
raw.update(attributes)
return raw
def of(self, klass, name='default'):
"""
Create a builder for the given model.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:return: orator.orm.factory_builder.FactoryBuilder
"""
return FactoryBuilder(klass, name, self._definitions,
self._faker, self._resolver)
def build(self, klass, name='default', amount=None):
"""
Makes a factory builder with a specified amount.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param amount: The number of models to create
:type amount: int
:return: mixed
"""
if amount is None:
if isinstance(name, int):
amount = name
name = 'default'
else:
amount = 1
return self.of(klass, name).times(amount)
@classmethod
def _resolve(cls, path, factory_file):
"""
Resolve a migration instance from a file.
:param path: The path to factories directory
:type path: str
:param factory_file: The migration file
:type factory_file: str
:rtype: Factory
"""
variables = {}
name = factory_file
factory_file = os.path.join(path, factory_file)
with open(factory_file) as fh:
exec(fh.read(), {}, variables)
klass = variables[inflection.camelize(name)]
instance = klass()
return instance
def set_connection_resolver(self, resolver):
self._resolver = resolver
def __getitem__(self, item):
return self.make(item)
def __setitem__(self, key, value):
return self.define(key, value)
def __contains__(self, item):
return item in self._definitions
def __call__(self, klass, name='default', amount=None):
"""
Makes a factory builder with a specified amount.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param amount: The number of models to create
:type amount: int
:return: mixed
"""
return self.build(klass, name, amount)
| mit |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/python/layers/core.py | 6 | 16103 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
class Dense(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if context.in_graph_mode():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class Dropout(base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(name=name, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
def _get_noise_shape(self, _):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
return self.noise_shape
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
class Flatten(base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = base.InputSpec(min_ndim=2)
def call(self, inputs):
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if context.in_graph_mode():
outputs.set_shape(self._compute_output_shape(inputs.get_shape()))
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def flatten(inputs, name=None):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
Returns:
Reshaped tensor.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
| mit |
ihmeuw/vivarium | src/vivarium/examples/boids/location.py | 1 | 1167 | import numpy as np
import pandas as pd
class Location:
configuration_defaults = {
'location': {
'width': 1000, # Width of our field
'height': 1000, # Height of our field
}
}
def __init__(self):
self.name = 'location'
def setup(self, builder):
self.width = builder.configuration.location.width
self.height = builder.configuration.location.height
columns_created = ['x', 'vx', 'y', 'vy']
builder.population.initializes_simulants(self.on_create_simulants, columns_created)
self.population_view = builder.population.get_view(columns_created)
def on_create_simulants(self, pop_data):
count = len(pop_data.index)
# Start clustered in the center with small random velocities
new_population = pd.DataFrame({
'x': self.width * (0.4 + 0.2 * np.random.random(count)),
'y': self.height * (0.4 + 0.2 * np.random.random(count)),
'vx': -0.5 + np.random.random(count),
'vy': -0.5 + np.random.random(count),
}, index= pop_data.index)
self.population_view.update(new_population)
| gpl-3.0 |
quru/wagtail | wagtail/wagtailembeds/blocks.py | 4 | 2150 | from __future__ import absolute_import, unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailcore import blocks
from wagtail.wagtailembeds.format import embed_to_frontend_html
@python_2_unicode_compatible
class EmbedValue(object):
"""
Native value of an EmbedBlock. Should, at minimum, have a 'url' property
and render as the embed HTML when rendered in a template.
NB We don't use a wagtailembeds.model.Embed object for this, because
we want to be able to do {{ value.url|embed:max_width=500 }} without
doing a redundant fetch of the embed at the default width.
"""
def __init__(self, url):
self.url = url
def __str__(self):
return embed_to_frontend_html(self.url)
class EmbedBlock(blocks.URLBlock):
def get_default(self):
# Allow specifying the default for an EmbedBlock as either an EmbedValue or a string (or None).
if not self.meta.default:
return None
elif isinstance(self.meta.default, EmbedValue):
return self.meta.default
else:
# assume default has been passed as a string
return EmbedValue(self.meta.default)
def to_python(self, value):
# The JSON representation of an EmbedBlock's value is a URL string;
# this should be converted to an EmbedValue (or None).
if not value:
return None
else:
return EmbedValue(value)
def get_prep_value(self, value):
# serialisable value should be a URL string
if value is None:
return ''
else:
return value.url
def value_for_form(self, value):
# the value to be handled by the URLField is a plain URL string (or the empty string)
if value is None:
return ''
else:
return value.url
def value_from_form(self, value):
# convert the value returned from the form (a URL string) to an EmbedValue (or None)
if not value:
return None
else:
return EmbedValue(value)
class Meta:
icon = "media"
| bsd-3-clause |
sklam/numba | numba/tests/test_random.py | 3 | 54944 | import collections
import functools
import math
import multiprocessing
import os
import random
import subprocess
import sys
import threading
import numpy as np
import unittest
from numba import jit, _helperlib
from numba.core import types
from numba.core.compiler import compile_isolated
from numba.tests.support import TestCase, compile_function, tag
# State size of the Mersenne Twister
N = 624
def get_py_state_ptr():
return _helperlib.rnd_get_py_state_ptr()
def get_np_state_ptr():
return _helperlib.rnd_get_np_state_ptr()
def numpy_randint1(a):
return np.random.randint(a)
def numpy_randint2(a, b):
return np.random.randint(a, b)
def random_randint(a, b):
return random.randint(a, b)
def random_randrange1(a):
return random.randrange(a)
def random_randrange2(a, b):
return random.randrange(a, b)
def random_randrange3(a, b, c):
return random.randrange(a, b, c)
def numpy_choice1(a):
return np.random.choice(a)
def numpy_choice2(a, size):
return np.random.choice(a, size=size)
def numpy_choice3(a, size, replace):
return np.random.choice(a, size=size, replace=replace)
def numpy_multinomial2(n, pvals):
return np.random.multinomial(n, pvals)
def numpy_multinomial3(n, pvals, size):
return np.random.multinomial(n, pvals=pvals, size=size)
def numpy_check_rand(seed, a, b):
np.random.seed(seed)
expected = np.random.random((a, b))
np.random.seed(seed)
got = np.random.rand(a, b)
return expected, got
def numpy_check_randn(seed, a, b):
np.random.seed(seed)
expected = np.random.standard_normal((a, b))
np.random.seed(seed)
got = np.random.randn(a, b)
return expected, got
def jit_with_args(name, argstring):
code = """def func(%(argstring)s):
return %(name)s(%(argstring)s)
""" % locals()
pyfunc = compile_function("func", code, globals())
return jit(nopython=True)(pyfunc)
def jit_nullary(name):
return jit_with_args(name, "")
def jit_unary(name):
return jit_with_args(name, "a")
def jit_binary(name):
return jit_with_args(name, "a, b")
def jit_ternary(name):
return jit_with_args(name, "a, b, c")
random_gauss = jit_binary("random.gauss")
random_random = jit_nullary("random.random")
random_seed = jit_unary("random.seed")
numpy_normal = jit_binary("np.random.normal")
numpy_random = jit_nullary("np.random.random")
numpy_seed = jit_unary("np.random.seed")
def _copy_py_state(r, ptr):
"""
Copy state of Python random *r* to Numba state *ptr*.
"""
mt = r.getstate()[1]
ints, index = mt[:-1], mt[-1]
_helperlib.rnd_set_state(ptr, (index, list(ints)))
return ints, index
def _copy_np_state(r, ptr):
"""
Copy state of Numpy random *r* to Numba state *ptr*.
"""
ints, index = r.get_state()[1:3]
_helperlib.rnd_set_state(ptr, (index, [int(x) for x in ints]))
return ints, index
def sync_to_numpy(r):
_ver, mt_st, _gauss_next = r.getstate()
mt_pos = mt_st[-1]
mt_ints = mt_st[:-1]
assert len(mt_ints) == 624
np_st = ('MT19937', np.array(mt_ints, dtype='uint32'), mt_pos)
if _gauss_next is None:
np_st += (0, 0.0)
else:
np_st += (1, _gauss_next)
np.random.set_state(np_st)
# Pure Python equivalents of some of the Numpy distributions, using
# Python's basic generators.
def py_chisquare(r, df):
return 2.0 * r.gammavariate(df / 2.0, 1.0)
def py_f(r, num, denom):
return ((py_chisquare(r, num) * denom) /
(py_chisquare(r, denom) * num))
class BaseTest(TestCase):
def _follow_cpython(self, ptr, seed=2):
r = random.Random(seed)
_copy_py_state(r, ptr)
return r
def _follow_numpy(self, ptr, seed=2):
r = np.random.RandomState(seed)
_copy_np_state(r, ptr)
return r
class TestInternals(BaseTest):
"""
Test low-level internals of the implementation.
"""
def _check_get_set_state(self, ptr):
state = _helperlib.rnd_get_state(ptr)
i, ints = state
self.assertIsInstance(i, int)
self.assertIsInstance(ints, list)
self.assertEqual(len(ints), N)
j = (i * 100007) % N
ints = [i * 3 for i in range(N)]
# Roundtrip
_helperlib.rnd_set_state(ptr, (j, ints))
self.assertEqual(_helperlib.rnd_get_state(ptr), (j, ints))
def _check_shuffle(self, ptr):
# We test shuffling against CPython
r = random.Random()
ints, index = _copy_py_state(r, ptr)
# Force shuffling in CPython generator
for i in range(index, N + 1, 2):
r.random()
_helperlib.rnd_shuffle(ptr)
# Check new integer keys
mt = r.getstate()[1]
ints, index = mt[:-1], mt[-1]
self.assertEqual(_helperlib.rnd_get_state(ptr)[1], list(ints))
def _check_init(self, ptr):
# We use the same integer seeding as Numpy
# (CPython is different: it treats the integer as a byte array)
r = np.random.RandomState()
for i in [0, 1, 125, 2**32 - 5]:
# Need to cast to a C-sized int (for Numpy <= 1.7)
r.seed(np.uint32(i))
st = r.get_state()
ints = list(st[1])
index = st[2]
assert index == N # sanity check
_helperlib.rnd_seed(ptr, i)
self.assertEqual(_helperlib.rnd_get_state(ptr), (index, ints))
def _check_perturb(self, ptr):
states = []
for i in range(10):
# Initialize with known state
_helperlib.rnd_seed(ptr, 0)
# Perturb with entropy
_helperlib.rnd_seed(ptr, os.urandom(512))
states.append(tuple(_helperlib.rnd_get_state(ptr)[1]))
# No two identical states
self.assertEqual(len(set(states)), len(states))
def test_get_set_state(self):
self._check_get_set_state(get_py_state_ptr())
def test_shuffle(self):
self._check_shuffle(get_py_state_ptr())
def test_init(self):
self._check_init(get_py_state_ptr())
def test_perturb(self):
self._check_perturb(get_py_state_ptr())
class TestRandom(BaseTest):
# NOTE: there may be cascading imprecision issues (e.g. between x87-using
# C code and SSE-using LLVM code), which is especially brutal for some
# iterative algorithms with sensitive exit conditions.
# Therefore we stick to hardcoded integers for seed values.
def _check_random_seed(self, seedfunc, randomfunc):
"""
Check seed()- and random()-like functions.
"""
# Our seed() mimicks Numpy's.
r = np.random.RandomState()
for i in [0, 1, 125, 2**32 - 1]:
# Need to cast to a C-sized int (for Numpy <= 1.7)
r.seed(np.uint32(i))
seedfunc(i)
# Be sure to trigger a reshuffle
for j in range(N + 10):
self.assertPreciseEqual(randomfunc(), r.uniform(0.0, 1.0))
def test_random_random(self):
self._check_random_seed(random_seed, random_random)
def test_numpy_random(self):
self._check_random_seed(numpy_seed, numpy_random)
# Test aliases
self._check_random_seed(numpy_seed, jit_nullary("np.random.random_sample"))
self._check_random_seed(numpy_seed, jit_nullary("np.random.ranf"))
self._check_random_seed(numpy_seed, jit_nullary("np.random.sample"))
self._check_random_seed(numpy_seed, jit_nullary("np.random.rand"))
def _check_random_sized(self, seedfunc, randomfunc):
# Our seed() mimicks Numpy's.
r = np.random.RandomState()
for i in [0, 1, 125, 2**32 - 1]:
# Need to cast to a C-sized int (for Numpy <= 1.7)
r.seed(np.uint32(i))
seedfunc(i)
for n in range(10):
self.assertPreciseEqual(randomfunc(n), r.uniform(0.0, 1.0, n))
def test_numpy_random_sized(self):
self._check_random_sized(numpy_seed, jit_unary("np.random.random_sample"))
self._check_random_sized(numpy_seed, jit_unary("np.random.ranf"))
self._check_random_sized(numpy_seed, jit_unary("np.random.sample"))
self._check_random_sized(numpy_seed, jit_unary("np.random.rand"))
def test_independent_generators(self):
# PRNGs for Numpy and Python are independent.
N = 10
random_seed(1)
py_numbers = [random_random() for i in range(N)]
numpy_seed(2)
np_numbers = [numpy_random() for i in range(N)]
random_seed(1)
numpy_seed(2)
pairs = [(random_random(), numpy_random()) for i in range(N)]
self.assertPreciseEqual([p[0] for p in pairs], py_numbers)
self.assertPreciseEqual([p[1] for p in pairs], np_numbers)
def _check_getrandbits(self, func, ptr):
"""
Check a getrandbits()-like function.
"""
# Our implementation follows CPython's for bits <= 64.
r = self._follow_cpython(ptr)
for nbits in range(1, 65):
expected = r.getrandbits(nbits)
got = func(nbits)
self.assertPreciseEqual(expected, got)
self.assertRaises(OverflowError, func, 65)
self.assertRaises(OverflowError, func, 9999999)
self.assertRaises(OverflowError, func, -1)
def test_random_getrandbits(self):
self._check_getrandbits(jit_unary("random.getrandbits"), get_py_state_ptr())
# Explanation for the large ulps value: on 32-bit platforms, our
# LLVM-compiled functions use SSE but they are compared against
# C functions which use x87.
# On some distributions, the errors seem to accumulate dramatically.
def _check_dist(self, func, pyfunc, argslist, niters=3,
prec='double', ulps=12, pydtype=None):
assert len(argslist)
for args in argslist:
results = [func(*args) for i in range(niters)]
pyresults = [(pyfunc(*args, dtype=pydtype) if pydtype else pyfunc(*args))
for i in range(niters)]
self.assertPreciseEqual(results, pyresults, prec=prec, ulps=ulps,
msg="for arguments %s" % (args,))
def _check_gauss(self, func2, func1, func0, ptr):
"""
Check a gauss()-like function.
"""
# Our implementation follows Numpy's.
r = self._follow_numpy(ptr)
if func2 is not None:
self._check_dist(func2, r.normal,
[(1.0, 1.0), (2.0, 0.5), (-2.0, 0.5)],
niters=N // 2 + 10)
if func1 is not None:
self._check_dist(func1, r.normal, [(0.5,)])
if func0 is not None:
self._check_dist(func0, r.normal, [()])
def test_random_gauss(self):
self._check_gauss(jit_binary("random.gauss"), None, None, get_py_state_ptr())
def test_random_normalvariate(self):
# normalvariate() is really an alias to gauss() in Numba
# (not in Python, though - they use different algorithms)
self._check_gauss(jit_binary("random.normalvariate"), None, None,
get_py_state_ptr())
def test_numpy_normal(self):
self._check_gauss(jit_binary("np.random.normal"),
jit_unary("np.random.normal"),
jit_nullary("np.random.normal"),
get_np_state_ptr())
def test_numpy_standard_normal(self):
self._check_gauss(None, None, jit_nullary("np.random.standard_normal"),
get_np_state_ptr())
def test_numpy_randn(self):
self._check_gauss(None, None, jit_nullary("np.random.randn"),
get_np_state_ptr())
def _check_lognormvariate(self, func2, func1, func0, ptr):
"""
Check a lognormvariate()-like function.
"""
# Our implementation follows Numpy's.
r = self._follow_numpy(ptr)
if func2 is not None:
self._check_dist(func2, r.lognormal,
[(1.0, 1.0), (2.0, 0.5), (-2.0, 0.5)],
niters=N // 2 + 10)
if func1 is not None:
self._check_dist(func1, r.lognormal, [(0.5,)])
if func0 is not None:
self._check_dist(func0, r.lognormal, [()])
def test_random_lognormvariate(self):
self._check_lognormvariate(jit_binary("random.lognormvariate"),
None, None, get_py_state_ptr())
def test_numpy_lognormal(self):
self._check_lognormvariate(jit_binary("np.random.lognormal"),
jit_unary("np.random.lognormal"),
jit_nullary("np.random.lognormal"),
get_np_state_ptr())
def _check_randrange(self, func1, func2, func3, ptr, max_width, is_numpy, tp=None):
"""
Check a randrange()-like function.
"""
# Sanity check
ints = []
for i in range(10):
ints.append(func1(500000000))
ints.append(func2(5, 500000000))
if func3 is not None:
ints.append(func3(5, 500000000, 3))
if is_numpy:
rr = self._follow_numpy(ptr).randint
else:
rr = self._follow_cpython(ptr).randrange
widths = [w for w in [1, 5, 8, 5000, 2**40, 2**62 + 2**61] if w < max_width]
pydtype = tp if is_numpy else None
for width in widths:
self._check_dist(func1, rr, [(width,)], niters=10,
pydtype=pydtype)
self._check_dist(func2, rr, [(-2, 2 +width)], niters=10,
pydtype=pydtype)
if func3 is not None:
self.assertPreciseEqual(func3(-2, 2 + width, 6),
rr(-2, 2 + width, 6))
self.assertPreciseEqual(func3(2 + width, 2, -3),
rr(2 + width, 2, -3))
# Empty ranges
self.assertRaises(ValueError, func1, 0)
self.assertRaises(ValueError, func1, -5)
self.assertRaises(ValueError, func2, 5, 5)
self.assertRaises(ValueError, func2, 5, 2)
if func3 is not None:
self.assertRaises(ValueError, func3, 5, 7, -1)
self.assertRaises(ValueError, func3, 7, 5, 1)
def test_random_randrange(self):
for tp, max_width in [(types.int64, 2**63), (types.int32, 2**31)]:
cr1 = compile_isolated(random_randrange1, (tp,))
cr2 = compile_isolated(random_randrange2, (tp, tp))
cr3 = compile_isolated(random_randrange3, (tp, tp, tp))
self._check_randrange(cr1.entry_point, cr2.entry_point,
cr3.entry_point, get_py_state_ptr(),
max_width, False)
def test_numpy_randint(self):
for tp, np_tp, max_width in [(types.int64, np.int64, 2**63),
(types.int32, np.int32, 2**31)]:
cr1 = compile_isolated(numpy_randint1, (tp,))
cr2 = compile_isolated(numpy_randint2, (tp, tp))
self._check_randrange(cr1.entry_point, cr2.entry_point,
None, get_np_state_ptr(), max_width, True, np_tp)
def _check_randint(self, func, ptr, max_width):
"""
Check a randint()-like function.
"""
# Sanity check
ints = []
for i in range(10):
ints.append(func(5, 500000000))
self.assertEqual(len(ints), len(set(ints)), ints)
r = self._follow_cpython(ptr)
for args in [(1, 5), (13, 5000), (20, 2**62 + 2**61)]:
if args[1] > max_width:
continue
self._check_dist(func, r.randint, [args], niters=10)
# Empty ranges
self.assertRaises(ValueError, func, 5, 4)
self.assertRaises(ValueError, func, 5, 2)
def test_random_randint(self):
for tp, max_width in [(types.int64, 2**63), (types.int32, 2**31)]:
cr = compile_isolated(random_randint, (tp, tp))
self._check_randint(cr.entry_point, get_py_state_ptr(), max_width)
def _check_uniform(self, func, ptr):
"""
Check a uniform()-like function.
"""
# Our implementation follows Python's.
r = self._follow_cpython(ptr)
self._check_dist(func, r.uniform,
[(1.5, 1e6), (-2.5, 1e3), (1.5, -2.5)])
def test_random_uniform(self):
self._check_uniform(jit_binary("random.uniform"), get_py_state_ptr())
def test_numpy_uniform(self):
self._check_uniform(jit_binary("np.random.uniform"), get_np_state_ptr())
def _check_triangular(self, func2, func3, ptr):
"""
Check a triangular()-like function.
"""
# Our implementation follows Python's.
r = self._follow_cpython(ptr)
if func2 is not None:
self._check_dist(func2, r.triangular,
[(1.5, 3.5), (-2.5, 1.5), (1.5, 1.5)])
self._check_dist(func3, r.triangular, [(1.5, 3.5, 2.2)])
def test_random_triangular(self):
self._check_triangular(jit_binary("random.triangular"),
jit_ternary("random.triangular"),
get_py_state_ptr())
def test_numpy_triangular(self):
triangular = jit_ternary("np.random.triangular")
fixed_triangular = lambda l, r, m: triangular(l, m, r)
self._check_triangular(None, fixed_triangular, get_np_state_ptr())
def _check_gammavariate(self, func2, func1, ptr):
"""
Check a gammavariate()-like function.
"""
# Our implementation follows Python's.
r = self._follow_cpython(ptr)
if func2 is not None:
self._check_dist(func2, r.gammavariate,
[(0.5, 2.5), (1.0, 1.5), (1.5, 3.5)])
if func1 is not None:
self.assertPreciseEqual(func1(1.5), r.gammavariate(1.5, 1.0))
# Invalid inputs
if func2 is not None:
self.assertRaises(ValueError, func2, 0.0, 1.0)
self.assertRaises(ValueError, func2, 1.0, 0.0)
self.assertRaises(ValueError, func2, -0.5, 1.0)
self.assertRaises(ValueError, func2, 1.0, -0.5)
if func1 is not None:
self.assertRaises(ValueError, func1, 0.0)
self.assertRaises(ValueError, func1, -0.5)
def test_random_gammavariate(self):
self._check_gammavariate(jit_binary("random.gammavariate"), None,
get_py_state_ptr())
def test_numpy_gamma(self):
self._check_gammavariate(jit_binary("np.random.gamma"),
jit_unary("np.random.gamma"),
get_np_state_ptr())
self._check_gammavariate(None,
jit_unary("np.random.standard_gamma"),
get_np_state_ptr())
def _check_betavariate(self, func, ptr):
"""
Check a betavariate()-like function.
"""
# Our implementation follows Python's.
r = self._follow_cpython(ptr)
self._check_dist(func, r.betavariate, [(0.5, 2.5)])
# Invalid inputs
self.assertRaises(ValueError, func, 0.0, 1.0)
self.assertRaises(ValueError, func, 1.0, 0.0)
self.assertRaises(ValueError, func, -0.5, 1.0)
self.assertRaises(ValueError, func, 1.0, -0.5)
def test_random_betavariate(self):
self._check_betavariate(jit_binary("random.betavariate"), get_py_state_ptr())
def test_numpy_beta(self):
self._check_betavariate(jit_binary("np.random.beta"), get_np_state_ptr())
def _check_vonmisesvariate(self, func, ptr):
"""
Check a vonmisesvariate()-like function.
"""
r = self._follow_cpython(ptr)
self._check_dist(func, r.vonmisesvariate, [(0.5, 2.5)])
def test_random_vonmisesvariate(self):
self._check_vonmisesvariate(jit_binary("random.vonmisesvariate"),
get_py_state_ptr())
def test_numpy_vonmises(self):
self._check_vonmisesvariate(jit_binary("np.random.vonmises"),
get_np_state_ptr())
def _check_expovariate(self, func, ptr):
"""
Check a expovariate()-like function. Note the second argument
is inversed compared to np.random.exponential().
"""
r = self._follow_numpy(ptr)
for lambd in (0.2, 0.5, 1.5):
for i in range(3):
self.assertPreciseEqual(func(lambd), r.exponential(1 / lambd),
prec='double')
def test_random_expovariate(self):
self._check_expovariate(jit_unary("random.expovariate"), get_py_state_ptr())
def _check_exponential(self, func1, func0, ptr):
"""
Check a exponential()-like function.
"""
r = self._follow_numpy(ptr)
if func1 is not None:
self._check_dist(func1, r.exponential, [(0.5,), (1.0,), (1.5,)])
if func0 is not None:
self._check_dist(func0, r.exponential, [()])
def test_numpy_exponential(self):
self._check_exponential(jit_unary("np.random.exponential"),
jit_nullary("np.random.exponential"),
get_np_state_ptr())
def test_numpy_standard_exponential(self):
self._check_exponential(None,
jit_nullary("np.random.standard_exponential"),
get_np_state_ptr())
def _check_paretovariate(self, func, ptr):
"""
Check a paretovariate()-like function.
"""
# Our implementation follows Python's.
r = self._follow_cpython(ptr)
self._check_dist(func, r.paretovariate, [(0.5,), (3.5,)])
def test_random_paretovariate(self):
self._check_paretovariate(jit_unary("random.paretovariate"), get_py_state_ptr())
def test_numpy_pareto(self):
pareto = jit_unary("np.random.pareto")
fixed_pareto = lambda a: pareto(a) + 1.0
self._check_paretovariate(fixed_pareto, get_np_state_ptr())
def _check_weibullvariate(self, func2, func1, ptr):
"""
Check a weibullvariate()-like function.
"""
# Our implementation follows Python's.
r = self._follow_cpython(ptr)
if func2 is not None:
self._check_dist(func2, r.weibullvariate, [(0.5, 2.5)])
if func1 is not None:
for i in range(3):
self.assertPreciseEqual(func1(2.5),
r.weibullvariate(1.0, 2.5))
def test_random_weibullvariate(self):
self._check_weibullvariate(jit_binary("random.weibullvariate"),
None, get_py_state_ptr())
def test_numpy_weibull(self):
self._check_weibullvariate(None, jit_unary("np.random.weibull"),
get_np_state_ptr())
def test_numpy_binomial(self):
# We follow Numpy's algorithm up to n*p == 30
binomial = jit_binary("np.random.binomial")
r = self._follow_numpy(get_np_state_ptr(), 0)
self._check_dist(binomial, r.binomial, [(18, 0.25)])
# Sanity check many values
for n in (100, 1000, 10000):
self.assertEqual(binomial(n, 0.0), 0)
self.assertEqual(binomial(n, 1.0), n)
for p in (0.0001, 0.1, 0.4, 0.49999, 0.5, 0.50001, 0.8, 0.9, 0.9999):
r = binomial(n, p)
if p > 0.5:
r = n - r
p = 1 - p
self.assertGreaterEqual(r, 0)
self.assertLessEqual(r, n)
expected = p * n
tol = 3 * n / math.sqrt(n)
self.assertGreaterEqual(r, expected - tol, (p, n, r))
self.assertLessEqual(r, expected + tol, (p, n, r))
# Invalid values
self.assertRaises(ValueError, binomial, -1, 0.5)
self.assertRaises(ValueError, binomial, 10, -0.1)
self.assertRaises(ValueError, binomial, 10, 1.1)
def test_numpy_chisquare(self):
chisquare = jit_unary("np.random.chisquare")
r = self._follow_cpython(get_np_state_ptr())
self._check_dist(chisquare,
functools.partial(py_chisquare, r),
[(1.5,), (2.5,)])
def test_numpy_f(self):
f = jit_binary("np.random.f")
r = self._follow_cpython(get_np_state_ptr())
self._check_dist(f, functools.partial(py_f, r),
[(0.5, 1.5), (1.5, 0.8)])
def test_numpy_geometric(self):
geom = jit_unary("np.random.geometric")
# p out of domain
self.assertRaises(ValueError, geom, -1.0)
self.assertRaises(ValueError, geom, 0.0)
self.assertRaises(ValueError, geom, 1.001)
# Some basic checks
N = 200
r = [geom(1.0) for i in range(N)]
self.assertPreciseEqual(r, [1] * N)
r = [geom(0.9) for i in range(N)]
n = r.count(1)
self.assertGreaterEqual(n, N // 2)
self.assertLess(n, N)
self.assertFalse([i for i in r if i > 1000]) # unlikely
r = [geom(0.4) for i in range(N)]
self.assertTrue([i for i in r if i > 4]) # likely
r = [geom(0.01) for i in range(N)]
self.assertTrue([i for i in r if i > 50]) # likely
r = [geom(1e-15) for i in range(N)]
self.assertTrue([i for i in r if i > 2**32]) # likely
def test_numpy_gumbel(self):
gumbel = jit_binary("np.random.gumbel")
r = self._follow_numpy(get_np_state_ptr())
self._check_dist(gumbel, r.gumbel, [(0.0, 1.0), (-1.5, 3.5)])
def test_numpy_hypergeometric(self):
# Our implementation follows Numpy's up to nsamples = 10.
hg = jit_ternary("np.random.hypergeometric")
r = self._follow_numpy(get_np_state_ptr())
self._check_dist(hg, r.hypergeometric,
[(1000, 5000, 10), (5000, 1000, 10)],
niters=30)
# Sanity checks
r = [hg(1000, 1000, 100) for i in range(100)]
self.assertTrue(all(x >= 0 and x <= 100 for x in r), r)
self.assertGreaterEqual(np.mean(r), 40.0)
self.assertLessEqual(np.mean(r), 60.0)
r = [hg(1000, 100000, 100) for i in range(100)]
self.assertTrue(all(x >= 0 and x <= 100 for x in r), r)
self.assertLessEqual(np.mean(r), 10.0)
r = [hg(100000, 1000, 100) for i in range(100)]
self.assertTrue(all(x >= 0 and x <= 100 for x in r), r)
self.assertGreaterEqual(np.mean(r), 90.0)
def test_numpy_laplace(self):
r = self._follow_numpy(get_np_state_ptr())
self._check_dist(jit_binary("np.random.laplace"), r.laplace,
[(0.0, 1.0), (-1.5, 3.5)])
self._check_dist(jit_unary("np.random.laplace"), r.laplace,
[(0.0,), (-1.5,)])
self._check_dist(jit_nullary("np.random.laplace"), r.laplace, [()])
def test_numpy_logistic(self):
r = self._follow_numpy(get_np_state_ptr())
self._check_dist(jit_binary("np.random.logistic"), r.logistic,
[(0.0, 1.0), (-1.5, 3.5)])
self._check_dist(jit_unary("np.random.logistic"), r.logistic,
[(0.0,), (-1.5,)])
self._check_dist(jit_nullary("np.random.logistic"), r.logistic, [()])
def test_numpy_logseries(self):
r = self._follow_numpy(get_np_state_ptr())
logseries = jit_unary("np.random.logseries")
self._check_dist(logseries, r.logseries,
[(0.1,), (0.99,), (0.9999,)],
niters=50)
# Numpy's logseries overflows on 32-bit builds, so instead
# hardcode Numpy's (correct) output on 64-bit builds.
r = self._follow_numpy(get_np_state_ptr(), seed=1)
self.assertEqual([logseries(0.9999999999999) for i in range(10)],
[2022733531, 77296, 30, 52204, 9341294, 703057324,
413147702918, 1870715907, 16009330, 738])
self.assertRaises(ValueError, logseries, 0.0)
self.assertRaises(ValueError, logseries, -0.1)
self.assertRaises(ValueError, logseries, 1.1)
def test_numpy_poisson(self):
r = self._follow_numpy(get_np_state_ptr())
poisson = jit_unary("np.random.poisson")
# Our implementation follows Numpy's.
self._check_dist(poisson, r.poisson,
[(0.0,), (0.5,), (2.0,), (10.0,), (900.5,)],
niters=50)
self.assertRaises(ValueError, poisson, -0.1)
def test_numpy_negative_binomial(self):
self._follow_numpy(get_np_state_ptr(), 0)
negbin = jit_binary("np.random.negative_binomial")
self.assertEqual([negbin(10, 0.9) for i in range(10)],
[2, 3, 1, 5, 2, 1, 0, 1, 0, 0])
self.assertEqual([negbin(10, 0.1) for i in range(10)],
[55, 71, 56, 57, 56, 56, 34, 55, 101, 67])
self.assertEqual([negbin(1000, 0.1) for i in range(10)],
[9203, 8640, 9081, 9292, 8938,
9165, 9149, 8774, 8886, 9117])
m = np.mean([negbin(1000000000, 0.1)
for i in range(50)])
self.assertGreater(m, 9e9 * 0.99)
self.assertLess(m, 9e9 * 1.01)
self.assertRaises(ValueError, negbin, 0, 0.5)
self.assertRaises(ValueError, negbin, -1, 0.5)
self.assertRaises(ValueError, negbin, 10, -0.1)
self.assertRaises(ValueError, negbin, 10, 1.1)
def test_numpy_power(self):
r = self._follow_numpy(get_np_state_ptr())
power = jit_unary("np.random.power")
self._check_dist(power, r.power,
[(0.1,), (0.5,), (0.9,), (6.0,)])
self.assertRaises(ValueError, power, 0.0)
self.assertRaises(ValueError, power, -0.1)
def test_numpy_rayleigh(self):
r = self._follow_numpy(get_np_state_ptr())
rayleigh1 = jit_unary("np.random.rayleigh")
rayleigh0 = jit_nullary("np.random.rayleigh")
self._check_dist(rayleigh1, r.rayleigh,
[(0.1,), (0.8,), (25.,), (1e3,)])
self._check_dist(rayleigh0, r.rayleigh, [()])
self.assertRaises(ValueError, rayleigh1, 0.0)
self.assertRaises(ValueError, rayleigh1, -0.1)
def test_numpy_standard_cauchy(self):
r = self._follow_numpy(get_np_state_ptr())
cauchy = jit_nullary("np.random.standard_cauchy")
self._check_dist(cauchy, r.standard_cauchy, [()])
def test_numpy_standard_t(self):
# We use CPython's algorithm for the gamma dist and numpy's
# for the normal dist. Standard T calls both so we can't check
# against either generator's output.
r = self._follow_cpython(get_np_state_ptr())
standard_t = jit_unary("np.random.standard_t")
avg = np.mean([standard_t(5) for i in range(5000)])
# Sanity check
self.assertLess(abs(avg), 0.5)
def test_numpy_wald(self):
r = self._follow_numpy(get_np_state_ptr())
wald = jit_binary("np.random.wald")
self._check_dist(wald, r.wald, [(1.0, 1.0), (2.0, 5.0)])
self.assertRaises(ValueError, wald, 0.0, 1.0)
self.assertRaises(ValueError, wald, -0.1, 1.0)
self.assertRaises(ValueError, wald, 1.0, 0.0)
self.assertRaises(ValueError, wald, 1.0, -0.1)
def test_numpy_zipf(self):
r = self._follow_numpy(get_np_state_ptr())
zipf = jit_unary("np.random.zipf")
self._check_dist(zipf, r.zipf, [(1.5,), (2.5,)], niters=100)
for val in (1.0, 0.5, 0.0, -0.1):
self.assertRaises(ValueError, zipf, val)
def _check_shuffle(self, func, ptr, is_numpy):
"""
Check a shuffle()-like function for arrays.
"""
arrs = [np.arange(20), np.arange(32).reshape((8, 4))]
if is_numpy:
r = self._follow_numpy(ptr)
else:
r = self._follow_cpython(ptr)
for a in arrs:
for i in range(3):
got = a.copy()
expected = a.copy()
func(got)
if is_numpy or len(a.shape) == 1:
r.shuffle(expected)
self.assertPreciseEqual(got, expected)
# Test with an arbitrary buffer-providing object
a = arrs[0]
b = a.copy()
func(memoryview(b))
self.assertNotEqual(list(a), list(b))
self.assertEqual(sorted(a), sorted(b))
# Read-only object
with self.assertTypingError():
func(memoryview(b"xyz"))
def test_random_shuffle(self):
self._check_shuffle(jit_unary("random.shuffle"), get_py_state_ptr(), False)
def test_numpy_shuffle(self):
self._check_shuffle(jit_unary("np.random.shuffle"), get_np_state_ptr(), True)
def _check_startup_randomness(self, func_name, func_args):
"""
Check that the state is properly randomized at startup.
"""
code = """if 1:
from numba.tests import test_random
func = getattr(test_random, %(func_name)r)
print(func(*%(func_args)r))
""" % (locals())
numbers = set()
for i in range(3):
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
numbers.add(float(out.strip()))
self.assertEqual(len(numbers), 3, numbers)
def test_random_random_startup(self):
self._check_startup_randomness("random_random", ())
def test_random_gauss_startup(self):
self._check_startup_randomness("random_gauss", (1.0, 1.0))
def test_numpy_random_startup(self):
self._check_startup_randomness("numpy_random", ())
def test_numpy_gauss_startup(self):
self._check_startup_randomness("numpy_normal", (1.0, 1.0))
def test_numpy_random_permutation(self):
func = jit_unary("np.random.permutation")
r = self._follow_numpy(get_np_state_ptr())
for s in [5, 10, 15, 20]:
a = np.arange(s)
b = a.copy()
# Test array version
self.assertPreciseEqual(func(a), r.permutation(a))
# Test int version
self.assertPreciseEqual(func(s), r.permutation(s))
# Permutation should not modify its argument
self.assertPreciseEqual(a, b)
# Check multi-dimensional arrays
arrs = [np.arange(10).reshape(2, 5),
np.arange(27).reshape(3, 3, 3),
np.arange(36).reshape(2, 3, 3, 2)]
for a in arrs:
b = a.copy()
self.assertPreciseEqual(func(a), r.permutation(a))
self.assertPreciseEqual(a, b)
class TestRandomArrays(BaseTest):
"""
Test array-producing variants of np.random.* functions.
"""
def _compile_array_dist(self, funcname, nargs):
qualname = "np.random.%s" % (funcname,)
argstring = ', '.join('abcd'[:nargs])
return jit_with_args(qualname, argstring)
def _check_array_dist(self, funcname, scalar_args):
"""
Check returning an array according to a given distribution.
"""
cfunc = self._compile_array_dist(funcname, len(scalar_args) + 1)
r = self._follow_numpy(get_np_state_ptr())
pyfunc = getattr(r, funcname)
for size in (8, (2, 3)):
args = scalar_args + (size,)
expected = pyfunc(*args)
got = cfunc(*args)
# Numpy may return int32s where we return int64s, adjust
if (expected.dtype == np.dtype('int32')
and got.dtype == np.dtype('int64')):
expected = expected.astype(got.dtype)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
def test_numpy_randint(self):
cfunc = self._compile_array_dist("randint", 3)
low, high = 1000, 10000
size = (30, 30)
res = cfunc(low, high, size)
self.assertIsInstance(res, np.ndarray)
self.assertEqual(res.shape, size)
self.assertIn(res.dtype, (np.dtype('int32'), np.dtype('int64')))
self.assertTrue(np.all(res >= low))
self.assertTrue(np.all(res < high))
# Crude statistical tests
mean = (low + high) / 2
tol = (high - low) / 20
self.assertGreaterEqual(res.mean(), mean - tol)
self.assertLessEqual(res.mean(), mean + tol)
def test_numpy_random_random(self):
cfunc = self._compile_array_dist("random", 1)
size = (30, 30)
res = cfunc(size)
self.assertIsInstance(res, np.ndarray)
self.assertEqual(res.shape, size)
self.assertEqual(res.dtype, np.dtype('float64'))
# Results are within expected bounds
self.assertTrue(np.all(res >= 0.0))
self.assertTrue(np.all(res < 1.0))
# Crude statistical tests
self.assertTrue(np.any(res <= 0.1))
self.assertTrue(np.any(res >= 0.9))
mean = res.mean()
self.assertGreaterEqual(mean, 0.45)
self.assertLessEqual(mean, 0.55)
# Sanity-check various distributions. For convenience, we only check
# those distributions that produce the exact same values as Numpy's.
def test_numpy_binomial(self):
self._check_array_dist("binomial", (20, 0.5))
def test_numpy_exponential(self):
self._check_array_dist("exponential", (1.5,))
def test_numpy_gumbel(self):
self._check_array_dist("gumbel", (1.5, 0.5))
def test_numpy_laplace(self):
self._check_array_dist("laplace", (1.5, 0.5))
def test_numpy_logistic(self):
self._check_array_dist("logistic", (1.5, 0.5))
def test_numpy_lognormal(self):
self._check_array_dist("lognormal", (1.5, 2.0))
def test_numpy_logseries(self):
self._check_array_dist("logseries", (0.8,))
def test_numpy_normal(self):
self._check_array_dist("normal", (0.5, 2.0))
def test_numpy_poisson(self):
self._check_array_dist("poisson", (0.8,))
def test_numpy_power(self):
self._check_array_dist("power", (0.8,))
def test_numpy_rand(self):
cfunc = jit(nopython=True)(numpy_check_rand)
expected, got = cfunc(42, 2, 3)
self.assertEqual(got.shape, (2, 3))
self.assertPreciseEqual(expected, got)
def test_numpy_randn(self):
cfunc = jit(nopython=True)(numpy_check_randn)
expected, got = cfunc(42, 2, 3)
self.assertEqual(got.shape, (2, 3))
self.assertPreciseEqual(expected, got)
def test_numpy_rayleigh(self):
self._check_array_dist("rayleigh", (0.8,))
def test_numpy_standard_cauchy(self):
self._check_array_dist("standard_cauchy", ())
def test_numpy_standard_exponential(self):
self._check_array_dist("standard_exponential", ())
def test_numpy_standard_normal(self):
self._check_array_dist("standard_normal", ())
def test_numpy_uniform(self):
self._check_array_dist("uniform", (0.1, 0.4))
def test_numpy_wald(self):
self._check_array_dist("wald", (0.1, 0.4))
def test_numpy_zipf(self):
self._check_array_dist("zipf", (2.5,))
class TestRandomChoice(BaseTest):
"""
Test np.random.choice.
"""
def _check_results(self, pop, res, replace=True):
"""
Check basic expectations about a batch of samples.
"""
spop = set(pop)
sres = set(res)
# All results are in the population
self.assertLessEqual(sres, spop)
# Sorted results are unlikely
self.assertNotEqual(sorted(res), list(res))
if replace:
# Duplicates are likely
self.assertLess(len(sres), len(res), res)
else:
# No duplicates
self.assertEqual(len(sres), len(res), res)
def _check_dist(self, pop, samples):
"""
Check distribution of some samples.
"""
# Sanity check that we have enough samples
self.assertGreaterEqual(len(samples), len(pop) * 100)
# Check equidistribution of samples
expected_frequency = len(samples) / len(pop)
c = collections.Counter(samples)
for value in pop:
n = c[value]
self.assertGreaterEqual(n, expected_frequency * 0.5)
self.assertLessEqual(n, expected_frequency * 2.0)
def _accumulate_array_results(self, func, nresults):
"""
Accumulate array results produced by *func* until they reach
*nresults* elements.
"""
res = []
while len(res) < nresults:
res += list(func().flat)
return res[:nresults]
def _check_choice_1(self, a, pop):
"""
Check choice(a) against pop.
"""
cfunc = jit(nopython=True)(numpy_choice1)
n = len(pop)
res = [cfunc(a) for i in range(n)]
self._check_results(pop, res)
dist = [cfunc(a) for i in range(n * 100)]
self._check_dist(pop, dist)
def test_choice_scalar_1(self):
"""
Test choice(int)
"""
n = 50
pop = list(range(n))
self._check_choice_1(n, pop)
def test_choice_array_1(self):
"""
Test choice(array)
"""
pop = np.arange(50) * 2 + 100
self._check_choice_1(pop, pop)
def _check_array_results(self, func, pop, replace=True):
"""
Check array results produced by *func* and their distribution.
"""
n = len(pop)
res = list(func().flat)
self._check_results(pop, res, replace)
dist = self._accumulate_array_results(func, n * 100)
self._check_dist(pop, dist)
def _check_choice_2(self, a, pop):
"""
Check choice(a, size) against pop.
"""
cfunc = jit(nopython=True)(numpy_choice2)
n = len(pop)
# Final sizes should be large enough, so as to stress
# replacement
sizes = [n - 10, (3, (n - 1) // 3), n * 10]
for size in sizes:
# Check result shape
res = cfunc(a, size)
expected_shape = size if isinstance(size, tuple) else (size,)
self.assertEqual(res.shape, expected_shape)
# Check results and their distribution
self._check_array_results(lambda: cfunc(a, size), pop)
def test_choice_scalar_2(self):
"""
Test choice(int, size)
"""
n = 50
pop = np.arange(n)
self._check_choice_2(n, pop)
def test_choice_array_2(self):
"""
Test choice(array, size)
"""
pop = np.arange(50) * 2 + 100
self._check_choice_2(pop, pop)
def _check_choice_3(self, a, pop):
"""
Check choice(a, size, replace) against pop.
"""
cfunc = jit(nopython=True)(numpy_choice3)
n = len(pop)
# Final sizes should be close but slightly <= n, so as to stress
# replacement (or not)
sizes = [n - 10, (3, (n - 1) // 3)]
replaces = [True, False]
# Check result shapes
for size in sizes:
for replace in [True, False]:
res = cfunc(a, size, replace)
expected_shape = size if isinstance(size, tuple) else (size,)
self.assertEqual(res.shape, expected_shape)
# Check results for replace=True
for size in sizes:
self._check_array_results(lambda: cfunc(a, size, True), pop)
# Check results for replace=False
for size in sizes:
self._check_array_results(lambda: cfunc(a, size, False), pop, False)
# Can't ask for more samples than population size with replace=False
for size in [n + 1, (3, n // 3 + 1)]:
with self.assertRaises(ValueError):
cfunc(a, size, False)
def test_choice_scalar_3(self):
"""
Test choice(int, size, replace)
"""
n = 50
pop = np.arange(n)
self._check_choice_3(n, pop)
def test_choice_array_3(self):
"""
Test choice(array, size, replace)
"""
pop = np.arange(50) * 2 + 100
self._check_choice_3(pop, pop)
def test_choice_follows_seed(self):
# See issue #3888, np.random.choice must acknowledge the seed
@jit(nopython=True)
def numba_rands(n_to_return, choice_array):
np.random.seed(1337)
out = np.empty((n_to_return, 2), np.int32)
for i in range(n_to_return):
out[i] = np.random.choice(choice_array, 2, False)
return out
choice_array = np.random.randint(300, size=1000).astype(np.int32)
tmp_np = choice_array.copy()
expected = numba_rands.py_func(5, tmp_np)
tmp_nb = choice_array.copy()
got = numba_rands(5, tmp_nb)
np.testing.assert_allclose(expected, got)
# check no mutation
np.testing.assert_allclose(choice_array, tmp_np)
np.testing.assert_allclose(choice_array, tmp_nb)
class TestRandomMultinomial(BaseTest):
"""
Test np.random.multinomial.
"""
# A biased dice
pvals = np.array([1, 1, 1, 2, 3, 1], dtype=np.float64)
pvals /= pvals.sum()
def _check_sample(self, n, pvals, sample):
"""
Check distribution of some samples.
"""
self.assertIsInstance(sample, np.ndarray)
self.assertEqual(sample.shape, (len(pvals),))
self.assertIn(sample.dtype, (np.dtype('int32'), np.dtype('int64')))
# Statistical properties
self.assertEqual(sample.sum(), n)
for p, nexp in zip(pvals, sample):
self.assertGreaterEqual(nexp, 0)
self.assertLessEqual(nexp, n)
pexp = float(nexp) / n
self.assertGreaterEqual(pexp, p * 0.5)
self.assertLessEqual(pexp, p * 2.0)
def test_multinomial_2(self):
"""
Test multinomial(n, pvals)
"""
cfunc = jit(nopython=True)(numpy_multinomial2)
n, pvals = 1000, self.pvals
res = cfunc(n, pvals)
self._check_sample(n, pvals, res)
# pvals as list
pvals = list(pvals)
res = cfunc(n, pvals)
self._check_sample(n, pvals, res)
# A case with extreme probabilities
n = 1000000
pvals = np.array([1, 0, n // 100, 1], dtype=np.float64)
pvals /= pvals.sum()
res = cfunc(n, pvals)
self._check_sample(n, pvals, res)
def test_multinomial_3_int(self):
"""
Test multinomial(n, pvals, size: int)
"""
cfunc = jit(nopython=True)(numpy_multinomial3)
n, pvals = 1000, self.pvals
k = 10
res = cfunc(n, pvals, k)
self.assertEqual(res.shape[0], k)
for sample in res:
self._check_sample(n, pvals, sample)
def test_multinomial_3_tuple(self):
"""
Test multinomial(n, pvals, size: tuple)
"""
cfunc = jit(nopython=True)(numpy_multinomial3)
n, pvals = 1000, self.pvals
k = (3, 4)
res = cfunc(n, pvals, k)
self.assertEqual(res.shape[:-1], k)
for sample in res.reshape((-1, res.shape[-1])):
self._check_sample(n, pvals, sample)
@jit(nopython=True, nogil=True)
def py_extract_randomness(seed, out):
if seed != 0:
random.seed(seed)
for i in range(out.size):
out[i] = random.getrandbits(32)
_randint_limit = 1 << 32
@jit(nopython=True, nogil=True)
def np_extract_randomness(seed, out):
if seed != 0:
np.random.seed(seed)
s = 0
for i in range(out.size):
out[i] = np.random.randint(_randint_limit)
class ConcurrencyBaseTest(TestCase):
# Enough iterations for:
# 1. Mersenne-Twister state shuffles to occur (once every 624)
# 2. Race conditions to be plausible
# 3. Nice statistical properties to emerge
_extract_iterations = 100000
def setUp(self):
# Warm up, to avoid compiling in the threads
args = (42, self._get_output(1))
py_extract_randomness(*args)
np_extract_randomness(*args)
def _get_output(self, size):
return np.zeros(size, dtype=np.uint32)
def check_output(self, out):
"""
Check statistical properties of output.
"""
# Output should follow a uniform distribution in [0, 1<<32)
expected_avg = 1 << 31
expected_std = (1 << 32) / np.sqrt(12)
rtol = 0.05 # given enough iterations
np.testing.assert_allclose(out.mean(), expected_avg, rtol=rtol)
np.testing.assert_allclose(out.std(), expected_std, rtol=rtol)
def check_several_outputs(self, results, same_expected):
# Outputs should have the expected statistical properties
# (an unitialized PRNG or a PRNG whose internal state was
# corrupted by a race condition could produce bogus randomness)
for out in results:
self.check_output(out)
# Check all threads gave either the same sequence or
# distinct sequences
if same_expected:
expected_distinct = 1
else:
expected_distinct = len(results)
heads = {tuple(out[:5]) for out in results}
tails = {tuple(out[-5:]) for out in results}
sums = {out.sum() for out in results}
self.assertEqual(len(heads), expected_distinct, heads)
self.assertEqual(len(tails), expected_distinct, tails)
self.assertEqual(len(sums), expected_distinct, sums)
class TestThreads(ConcurrencyBaseTest):
"""
Check the PRNG behaves well with threads.
"""
def extract_in_threads(self, nthreads, extract_randomness, seed):
"""
Run *nthreads* threads extracting randomness with the given *seed*
(no seeding if 0).
"""
results = [self._get_output(self._extract_iterations)
for i in range(nthreads + 1)]
def target(i):
# The PRNG will be seeded in thread
extract_randomness(seed=seed, out=results[i])
threads = [threading.Thread(target=target, args=(i,))
for i in range(nthreads)]
for th in threads:
th.start()
# Exercise main thread as well
target(nthreads)
for th in threads:
th.join()
return results
def check_thread_safety(self, extract_randomness):
"""
When initializing the PRNG the same way, each thread
should produce the same sequence of random numbers,
using independent states, regardless of parallel
execution.
"""
# Note the seed value doesn't matter, as long as it's
# the same for all threads
results = self.extract_in_threads(15, extract_randomness, seed=42)
# All threads gave the same sequence
self.check_several_outputs(results, same_expected=True)
def check_implicit_initialization(self, extract_randomness):
"""
The PRNG in new threads should be implicitly initialized with
system entropy, if seed() wasn't called.
"""
results = self.extract_in_threads(4, extract_randomness, seed=0)
# All threads gave a different, valid random sequence
self.check_several_outputs(results, same_expected=False)
def test_py_thread_safety(self):
self.check_thread_safety(py_extract_randomness)
def test_np_thread_safety(self):
self.check_thread_safety(np_extract_randomness)
def test_py_implicit_initialization(self):
self.check_implicit_initialization(py_extract_randomness)
def test_np_implicit_initialization(self):
self.check_implicit_initialization(np_extract_randomness)
@unittest.skipIf(os.name == 'nt', "Windows is not affected by fork() issues")
class TestProcesses(ConcurrencyBaseTest):
"""
Check the PRNG behaves well in child processes.
"""
# Avoid nested multiprocessing AssertionError
# ("daemonic processes are not allowed to have children")
_numba_parallel_test_ = False
def extract_in_processes(self, nprocs, extract_randomness):
"""
Run *nprocs* processes extracting randomness
without explicit seeding.
"""
q = multiprocessing.Queue()
results = []
def target_inner():
out = self._get_output(self._extract_iterations)
extract_randomness(seed=0, out=out)
return out
def target():
try:
out = target_inner()
q.put(out)
except Exception as e:
# Ensure an exception in a child gets reported
# in the parent.
q.put(e)
raise
if hasattr(multiprocessing, 'get_context'):
# The test works only in fork context.
mpc = multiprocessing.get_context('fork')
else:
mpc = multiprocessing
procs = [mpc.Process(target=target)
for i in range(nprocs)]
for p in procs:
p.start()
# Need to dequeue before joining, otherwise the large size of the
# enqueued objects will lead to deadlock.
for i in range(nprocs):
results.append(q.get(timeout=5))
for p in procs:
p.join()
# Exercise parent process as well; this will detect if the
# same state was reused for one of the children.
results.append(target_inner())
for res in results:
if isinstance(res, Exception):
self.fail("Exception in child: %s" % (res,))
return results
def check_implicit_initialization(self, extract_randomness):
"""
The PRNG in new processes should be implicitly initialized
with system entropy, to avoid reproducing the same sequences.
"""
results = self.extract_in_processes(2, extract_randomness)
# All processes gave a different, valid random sequence
self.check_several_outputs(results, same_expected=False)
def test_py_implicit_initialization(self):
self.check_implicit_initialization(py_extract_randomness)
def test_np_implicit_initialization(self):
self.check_implicit_initialization(np_extract_randomness)
if __name__ == "__main__":
unittest.main()
| bsd-2-clause |
adael/goldminer | goldminer/ui.py | 1 | 1846 | class SelectItem:
def __init__(self, label, active=True):
self.label = label
self.active = active
class Separator:
def __init__(self):
self.label = ""
self.active = False
class SelectBox:
def __init__(self, items):
self.items = items
self.item_focused_index = 0
if 0 in self.items and not self.items[0].active:
self.item_focused_index = self.next_active_index()
self.item_selected_index = None
def is_selected(self):
return self.item_selected_index is not None
def item_selected(self):
if self.item_selected_index is not None:
item = self.items[self.item_selected_index]
self.item_selected_index = None
return item
def item_focused(self):
return self.items[self.item_focused_index]
def up_item(self):
index = self.prev_active_index()
if index is not None:
self.item_focused_index = index
def down_item(self):
index = self.next_active_index()
if index is not None:
self.item_focused_index = index
def prev_active_index(self):
for index in range(self.item_focused_index - 1, -1, -1):
if self.items[index].active:
return index
def next_active_index(self):
for index in range(self.item_focused_index + 1, len(self.items)):
if self.items[index].active:
return index
def select_focused_item(self):
if self.item_focused().active:
self.item_selected_index = self.item_focused_index
def handle_input(self, action):
if action.is_up:
self.up_item()
elif action.is_down:
self.down_item()
elif action.is_a or action.is_select:
self.select_focused_item()
| mit |
timofurrer/sure | sure/old.py | 1 | 13767 | # -*- coding: utf-8 -*-
# <sure - utility belt for automated testing in python>
# Copyright (C) <2010-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import re
import traceback
import inspect
from copy import deepcopy
from pprint import pformat
from functools import wraps
try:
from collections import Iterable
except ImportError:
Iterable = (list, dict, tuple, set)
try:
import __builtin__ as builtins
except ImportError:
import builtins
from six import string_types, text_type
from sure.core import DeepComparison
from sure.core import _get_file_name
from sure.core import _get_line_number
from sure.core import itemize_length
def identify_callable_location(callable_object):
filename = os.path.relpath(callable_object.__code__.co_filename)
lineno = callable_object.__code__.co_firstlineno
callable_name = callable_object.__code__.co_name
return '{0} [{1} line {2}]'.format(callable_name, filename, lineno).encode()
def is_iterable(obj):
return hasattr(obj, '__iter__') and not isinstance(obj, string_types)
def all_integers(obj):
if not is_iterable(obj):
return
for element in obj:
if not isinstance(element, int):
return
return True
def explanation(msg):
def dec(func):
@wraps(func)
def wrap(self, what):
ret = func(self, what)
assert ret, msg % (self._src, what)
return True
return wrap
return dec
class AssertionHelper(object):
def __init__(self, src,
within_range=None,
with_args=None,
with_kwargs=None,
and_kwargs=None):
self._src = src
self._attribute = None
self._eval = None
self._range = None
if all_integers(within_range):
if len(within_range) != 2:
raise TypeError(
'within_range parameter must be a tuple with 2 objects',
)
self._range = within_range
self._callable_args = []
if isinstance(with_args, (list, tuple)):
self._callable_args = list(with_args)
self._callable_kw = {}
if isinstance(with_kwargs, dict):
self._callable_kw.update(with_kwargs)
if isinstance(and_kwargs, dict):
self._callable_kw.update(and_kwargs)
@classmethod
def is_a_matcher(cls, func):
def match(self, *args, **kw):
return func(self._src, *args, **kw)
new_matcher = deepcopy(match)
new_matcher.__name__ = func.__name__
setattr(cls, func.__name__, new_matcher)
return new_matcher
def raises(self, exc, msg=None):
if not callable(self._src):
raise TypeError('%r is not callable' % self._src)
try:
self._src(*self._callable_args, **self._callable_kw)
except BaseException as e:
if isinstance(exc, string_types):
msg = exc
exc = type(e)
elif isinstance(exc, re._pattern_type):
msg = exc
exc = type(e)
err = text_type(e)
if isinstance(exc, type) and issubclass(exc, BaseException):
if not isinstance(e, exc):
raise AssertionError(
'%r should raise %r, but raised %r:\nORIGINAL EXCEPTION:\n\n%s' % (
self._src, exc, e.__class__, traceback.format_exc()))
if isinstance(msg, string_types) and msg not in err:
raise AssertionError('''
%r raised %s, but the exception message does not
match.\n\nEXPECTED:\n%s\n\nGOT:\n%s'''.strip() % (
self._src,
type(e).__name__,
msg, err))
elif isinstance(msg, re._pattern_type) and not msg.search(err):
raise AssertionError(
'When calling %r the exception message does not match. ' \
'Expected to match regex: %r\n against:\n %r' % (identify_callable_location(self._src), msg.pattern, err))
elif isinstance(msg, string_types) and msg not in err:
raise AssertionError(
'When calling %r the exception message does not match. ' \
'Expected: %r\n got:\n %r' % (self._src, msg, err))
elif isinstance(msg, re._pattern_type) and not msg.search(err):
raise AssertionError(
'When calling %r the exception message does not match. ' \
'Expected to match regex: %r\n against:\n %r' % (identify_callable_location(self._src), msg.pattern, err))
else:
raise e
else:
if inspect.isbuiltin(self._src):
_src_filename = '<built-in function>'
else:
_src_filename = _get_file_name(self._src)
if inspect.isfunction(self._src):
_src_lineno = _get_line_number(self._src)
raise AssertionError(
'calling function %s(%s at line: "%d") with args %r and kwargs %r did not raise %r' % (
self._src.__name__,
_src_filename, _src_lineno,
self._callable_args,
self._callable_kw, exc))
else:
raise AssertionError(
'at %s:\ncalling %s() with args %r and kwargs %r did not raise %r' % (
_src_filename,
self._src.__name__,
self._callable_args,
self._callable_kw, exc))
return True
def deep_equals(self, dst):
deep = DeepComparison(self._src, dst)
comparison = deep.compare()
if isinstance(comparison, bool):
return comparison
raise comparison.as_assertion(self._src, dst)
def equals(self, dst):
if self._attribute and is_iterable(self._src):
msg = '%r[%d].%s should be %r, but is %r'
for index, item in enumerate(self._src):
if self._range:
if index < self._range[0] or index > self._range[1]:
continue
attribute = getattr(item, self._attribute)
error = msg % (
self._src, index, self._attribute, dst, attribute)
if attribute != dst:
raise AssertionError(error)
else:
return self.deep_equals(dst)
return True
def looks_like(self, dst):
old_src = pformat(self._src)
old_dst = pformat(dst)
self._src = re.sub(r'\s', '', self._src).lower()
dst = re.sub(r'\s', '', dst).lower()
error = '%s does not look like %s' % (old_src, old_dst)
assert self._src == dst, error
return self._src == dst
def every_one_is(self, dst):
msg = 'all members of %r should be %r, but the %dth is %r'
for index, item in enumerate(self._src):
if self._range:
if index < self._range[0] or index > self._range[1]:
continue
error = msg % (self._src, dst, index, item)
if item != dst:
raise AssertionError(error)
return True
@explanation('%r should differ from %r, but is the same thing')
def differs(self, dst):
return self._src != dst
@explanation('%r should be a instance of %r, but is not')
def is_a(self, dst):
return isinstance(self._src, dst)
def at(self, key):
assert self.has(key)
if isinstance(self._src, dict):
return AssertionHelper(self._src[key])
else:
return AssertionHelper(getattr(self._src, key))
@explanation('%r should have %r, but have not')
def has(self, that):
return that in self
def _get_that(self, that):
try:
that = int(that)
except TypeError:
that = len(that)
return that
def len_greater_than(self, that):
that = self._get_that(that)
length = len(self._src)
if length <= that:
error = 'the length of the %s should be greater then %d, but is %d' % (
type(self._src).__name__,
that,
length,
)
raise AssertionError(error)
return True
def len_greater_than_or_equals(self, that):
that = self._get_that(that)
length = len(self._src)
if length < that:
error = 'the length of %r should be greater then or equals %d, but is %d' % (
self._src,
that,
length,
)
raise AssertionError(error)
return True
def len_lower_than(self, that):
original_that = that
if isinstance(that, Iterable):
that = len(that)
else:
that = self._get_that(that)
length = len(self._src)
if length >= that:
error = 'the length of %r should be lower then %r, but is %d' % (
self._src,
original_that,
length,
)
raise AssertionError(error)
return True
def len_lower_than_or_equals(self, that):
that = self._get_that(that)
length = len(self._src)
error = 'the length of %r should be lower then or equals %d, but is %d'
if length > that:
msg = error % (
self._src,
that,
length,
)
raise AssertionError(msg)
return True
def len_is(self, that):
that = self._get_that(that)
length = len(self._src)
if length != that:
error = 'the length of %r should be %d, but is %d' % (
self._src,
that,
length,
)
raise AssertionError(error)
return True
def len_is_not(self, that):
that = self._get_that(that)
length = len(self._src)
if length == that:
error = 'the length of %r should not be %d' % (
self._src,
that,
)
raise AssertionError(error)
return True
def like(self, that):
return self.has(that)
def the_attribute(self, attr):
self._attribute = attr
return self
def in_each(self, attr):
self._eval = attr
return self
def matches(self, items):
msg = '%r[%d].%s should be %r, but is %r'
get_eval = lambda item: eval(
"%s.%s" % ('current', self._eval), {}, {'current': item},
)
if self._eval and is_iterable(self._src):
if isinstance(items, string_types):
items = [items for x in range(len(items))]
else:
if len(items) != len(self._src):
source = list(map(get_eval, self._src))
source_len = len(source)
items_len = len(items)
raise AssertionError(
'%r has %d items, but the matching list has %d: %r'
% (source, source_len, items_len, items),
)
for index, (item, other) in enumerate(zip(self._src, items)):
if self._range:
if index < self._range[0] or index > self._range[1]:
continue
value = get_eval(item)
error = msg % (self._src, index, self._eval, other, value)
if other != value:
raise AssertionError(error)
else:
return self.equals(items)
return True
@builtins.property
def is_empty(self):
try:
lst = list(self._src)
length = len(lst)
assert length == 0, \
'%r is not empty, it has %s' % (self._src,
itemize_length(self._src))
return True
except TypeError:
raise AssertionError("%r is not iterable" % self._src)
@builtins.property
def are_empty(self):
return self.is_empty
def __contains__(self, what):
if isinstance(self._src, dict):
items = self._src.keys()
if isinstance(self._src, Iterable):
items = self._src
else:
items = dir(self._src)
return what in items
def contains(self, what):
assert what in self._src, '%r should be in %r' % (what, self._src)
return True
def does_not_contain(self, what):
assert what not in self._src, \
'%r should NOT be in %r' % (what, self._src)
return True
doesnt_contain = does_not_contain
that = AssertionHelper
| gpl-3.0 |
ylatuya/cerbero | cerbero/commands/genxcconfig.py | 29 | 2437 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import UsageError
from cerbero.ide.xcode.xcconfig import XCConfig
from cerbero.utils import _, N_, ArgparseArgument
from cerbero.utils import messages as m
class GenXCodeConfig(Command):
doc = N_('Generate XCode config files to use the SDK from VS')
name = 'genxcconfig'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('-o', '--output_dir', default='.',
help=_('output directory where .xcconfig files will be saved')),
ArgparseArgument('-f', '--filename', default=None,
help=_('filename of the .xcconfig file')),
ArgparseArgument('libraries', nargs='*',
help=_('List of libraries to include')),
])
def run(self, config, args):
self.runargs(config, args.output_dir, args.filename, args.libraries)
def runargs(self, config, output_dir, filename, libraries):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if len(libraries) == 0:
raise UsageError("You need to specify at least one library name")
filename = filename or libraries[0]
filepath = os.path.join(output_dir, '%s.xcconfig' % filename)
xcconfig = XCConfig(libraries)
xcconfig.create(filepath)
m.action('Created %s.xcconfig' % filename)
m.message('XCode config file were sucessfully created in %s' %
os.path.abspath(filepath))
register_command(GenXCodeConfig)
| lgpl-2.1 |
i-rabot/tractogithub | tracformatter/trac/util/presentation.py | 1 | 10412 | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# Copyright (C) 2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Various utility functions and classes that support common presentation
tasks such as grouping or pagination.
"""
from math import ceil
import re
__all__ = ['captioned_button', 'classes', 'first_last', 'group', 'istext',
'prepared_paginate', 'paginate', 'Paginator']
__no_apidoc__ = 'prepared_paginate'
def captioned_button(req, symbol, text):
"""Return symbol and text or only symbol, according to user preferences."""
return symbol if req.session.get('ui.use_symbols') \
else u'%s %s' % (symbol, text)
def classes(*args, **kwargs):
"""Helper function for dynamically assembling a list of CSS class names
in templates.
Any positional arguments are added to the list of class names. All
positional arguments must be strings:
>>> classes('foo', 'bar')
u'foo bar'
In addition, the names of any supplied keyword arguments are added if they
have a truth value:
>>> classes('foo', bar=True)
u'foo bar'
>>> classes('foo', bar=False)
u'foo'
If none of the arguments are added to the list, this function returns
`None`:
>>> classes(bar=False)
"""
classes = list(filter(None, args)) + [k for k, v in kwargs.items() if v]
if not classes:
return None
return u' '.join(classes)
def first_last(idx, seq):
"""Generate ``first`` or ``last`` or both, according to the
position `idx` in sequence `seq`.
"""
return classes(first=idx == 0, last=idx == len(seq) - 1)
def group(iterable, num, predicate=None):
"""Combines the elements produced by the given iterable so that every `n`
items are returned as a tuple.
>>> items = [1, 2, 3, 4]
>>> for item in group(items, 2):
... print item
(1, 2)
(3, 4)
The last tuple is padded with `None` values if its' length is smaller than
`num`.
>>> items = [1, 2, 3, 4, 5]
>>> for item in group(items, 2):
... print item
(1, 2)
(3, 4)
(5, None)
The optional `predicate` parameter can be used to flag elements that should
not be packed together with other items. Only those elements where the
predicate function returns True are grouped with other elements, otherwise
they are returned as a tuple of length 1:
>>> items = [1, 2, 3, 4]
>>> for item in group(items, 2, lambda x: x != 3):
... print item
(1, 2)
(3,)
(4, None)
"""
buf = []
for item in iterable:
flush = predicate and not predicate(item)
if buf and flush:
buf += [None] * (num - len(buf))
yield tuple(buf)
del buf[:]
buf.append(item)
if flush or len(buf) == num:
yield tuple(buf)
del buf[:]
if buf:
buf += [None] * (num - len(buf))
yield tuple(buf)
def istext(text):
"""`True` for text (`unicode` and `str`), but `False` for `Markup`."""
from genshi.core import Markup
return isinstance(text, basestring) and not isinstance(text, Markup)
def prepared_paginate(items, num_items, max_per_page):
if max_per_page == 0:
num_pages = 1
else:
num_pages = int(ceil(float(num_items) / max_per_page))
return items, num_items, num_pages
def paginate(items, page=0, max_per_page=10):
"""Simple generic pagination.
Given an iterable, this function returns:
* the slice of objects on the requested page,
* the total number of items, and
* the total number of pages.
The `items` parameter can be a list, tuple, or iterator:
>>> items = range(12)
>>> items
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> paginate(items)
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(items, page=1)
([10, 11], 12, 2)
>>> paginate(iter(items))
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(iter(items), page=1)
([10, 11], 12, 2)
This function also works with generators:
>>> def generate():
... for idx in range(12):
... yield idx
>>> paginate(generate())
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(generate(), page=1)
([10, 11], 12, 2)
The `max_per_page` parameter can be used to set the number of items that
should be displayed per page:
>>> items = range(12)
>>> paginate(items, page=0, max_per_page=6)
([0, 1, 2, 3, 4, 5], 12, 2)
>>> paginate(items, page=1, max_per_page=6)
([6, 7, 8, 9, 10, 11], 12, 2)
"""
if not page:
page = 0
start = page * max_per_page
stop = start + max_per_page
count = None
if hasattr(items, '__len__'):
count = len(items)
if count:
assert start < count, 'Page %d out of range' % page
try: # Try slicing first for better performance
retval = items[start:stop]
except TypeError: # Slicing not supported, so iterate through the whole list
retval = []
idx = -1 # Needed if items = []
for idx, item in enumerate(items):
if start <= idx < stop:
retval.append(item)
# If we already obtained the total number of items via `len()`,
# we can break out of the loop as soon as we've got the last item
# for the requested page
if count is not None and idx >= stop:
break
if count is None:
count = idx + 1
return retval, count, int(ceil(float(count) / max_per_page))
class Paginator(object):
"""Pagination controller"""
def __init__(self, items, page=0, max_per_page=10, num_items=None):
if not page:
page = 0
if num_items is None:
items, num_items, num_pages = paginate(items, page, max_per_page)
else:
items, num_items, num_pages = prepared_paginate(items, num_items,
max_per_page)
offset = page * max_per_page
self.page = page
self.max_per_page = max_per_page
self.items = items
self.num_items = num_items
self.num_pages = num_pages
self.span = offset, offset + len(items)
self.show_index = True
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __nonzero__(self):
return len(self.items) > 0
def __setitem__(self, idx, value):
self.items[idx] = value
@property
def has_more_pages(self):
return self.num_pages > 1
@property
def has_next_page(self):
return self.page + 1 < self.num_pages
@property
def has_previous_page(self):
return self.page > 0
def get_shown_pages(self, page_index_count = 11):
if self.has_more_pages == False:
return range(1, 2)
min_page = 1
max_page = int(ceil(float(self.num_items) / self.max_per_page))
current_page = self.page + 1
start_page = current_page - page_index_count / 2
end_page = current_page + page_index_count / 2 + \
(page_index_count % 2 - 1)
if start_page < min_page:
start_page = min_page
if end_page > max_page:
end_page = max_page
return range(start_page, end_page + 1)
def displayed_items(self):
from trac.util.translation import _
start, stop = self.span
total = self.num_items
if start + 1 == stop:
return _("%(last)d of %(total)d", last=stop, total=total)
else:
return _("%(start)d - %(stop)d of %(total)d",
start=self.span[0] + 1, stop=self.span[1], total=total)
def separated(items, sep=','):
"""Yield `(item, sep)` tuples, one for each element in `items`.
`sep` will be `None` for the last item.
>>> list(separated([1, 2]))
[(1, ','), (2, None)]
>>> list(separated([1]))
[(1, None)]
>>> list(separated("abc", ':'))
[('a', ':'), ('b', ':'), ('c', None)]
"""
items = iter(items)
last = items.next()
for i in items:
yield last, sep
last = i
yield last, None
try:
from json import dumps
_js_quote = dict((c, '\\u%04x' % ord(c)) for c in '&<>')
_js_quote_re = re.compile('[' + ''.join(_js_quote) + ']')
def to_json(value):
"""Encode `value` to JSON."""
def replace(match):
return _js_quote[match.group(0)]
text = dumps(value, sort_keys=True, separators=(',', ':'))
return _js_quote_re.sub(replace, text)
except ImportError:
from trac.util.text import to_js_string
def to_json(value):
"""Encode `value` to JSON."""
if isinstance(value, basestring):
return to_js_string(value)
elif value is None:
return 'null'
elif value is False:
return 'false'
elif value is True:
return 'true'
elif isinstance(value, (int, long)):
return str(value)
elif isinstance(value, float):
return repr(value)
elif isinstance(value, (list, tuple)):
return '[%s]' % ','.join(to_json(each) for each in value)
elif isinstance(value, dict):
return '{%s}' % ','.join('%s:%s' % (to_json(k), to_json(v))
for k, v in sorted(value.iteritems()))
else:
raise TypeError('Cannot encode type %s' % value.__class__.__name__)
| bsd-3-clause |
leeon/annotated-django | tests/model_fields/models.py | 2 | 8698 | import os
import tempfile
import warnings
try:
from PIL import Image
except ImportError:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow in this test
#field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
# This model isn't used in any test, just here to ensure it validates successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbsctractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbsctractPersonWithHeight):
"""
Concrete model that subclass an abctract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
| bsd-3-clause |
UfSoft/irssi-notifier | irssinotifier/irclib.py | 1 | 49014 | # Copyright (C) 1999--2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# keltus <keltus@users.sourceforge.net>
#
# $Id: irclib.py 72 2007-11-07 17:25:16Z s0undt3ch $
"""irclib -- Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol at a quite
low level. It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
but DCC file transfers is not yet supported.
In order to understand how to make an IRC client, I'm afraid you more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtilties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* The IRC protocol shines through the abstraction a bit too much.
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* There are no support for DCC file transfers.
* The author haven't even read RFC 2810, 2811, 2812 and 2813.
* Like most projects, documentation is lacking...
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import string
import sys
import time
import types
VERSION = 0, 4, 6
DEBUG = 0
# TODO
# ----
# (maybe) thread safety
# (maybe) color parser convenience functions
# documentation (including all event types)
# (maybe) add awareness of different types of ircds
# send data asynchronously to the server (and DCC connections)
# (maybe) automatically close unused, passive DCC connections after a while
# NOTES
# -----
# connection.quit() only sends QUIT to the server.
# ERROR from the server triggers the error event and the disconnect event.
# dropping of the connection triggers the disconnect event.
FALLBACK_CHARSET = None
def to_unicode(txt):
if isinstance(txt, (list, tuple)):
txt = ''.join(txt)
if isinstance(txt, basestring):
if isinstance(txt, unicode):
return txt
try:
return unicode(txt, 'utf-8', 'replace')
except UnicodeDecodeError:
import locale
encoding = locale.getpreferredencoding()
return unicode(txt, encoding, 'replace')
else:
return unicode(txt, FALLBACK_CHARSET, 'replace')
return txt
class IRCError(Exception):
"""Represents an IRC exception."""
pass
class IRC:
"""Class that handles one or several IRC server connections.
When an IRC object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the IRC object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler, execute_at,
execute_delayed, process_once and process_forever.
Here is an example:
irc = irclib.IRC()
server = irc.server()
server.connect(\"irc.some.where\", 6667, \"my_nickname\")
server.privmsg(\"a_nickname\", \"Hi there!\")
irc.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message \"Hi there!\"
to the nickname a_nickname.
"""
def __init__(self, fn_to_add_socket=None,
fn_to_remove_socket=None,
fn_to_add_timeout=None):
"""Constructor for IRC objects.
Optional arguments are fn_to_add_socket, fn_to_remove_socket
and fn_to_add_timeout. The first two specify functions that
will be called with a socket object as argument when the IRC
object wants to be notified (or stop being notified) of data
coming on a new socket. When new data arrives, the method
process_data should be called. Similarly, fn_to_add_timeout
is called with a number of seconds (a floating point number)
as first argument when the IRC object wants to receive a
notification (by calling the process_timeout method). So, if
e.g. the argument is 42.17, the object wants the
process_timeout method to be called after 42 seconds and 170
milliseconds.
The three arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
if fn_to_add_socket and fn_to_remove_socket:
self.fn_to_add_socket = fn_to_add_socket
self.fn_to_remove_socket = fn_to_remove_socket
else:
self.fn_to_add_socket = None
self.fn_to_remove_socket = None
self.fn_to_add_timeout = fn_to_add_timeout
self.connections = []
self.handlers = {}
self.delayed_commands = [] # list of tuples in the format (time, function, arguments)
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
c = ServerConnection(self)
self.connections.append(c)
return c
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for IRC.__init__.
"""
for s in sockets:
for c in self.connections:
if s == c._get_socket():
c.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for IRC.__init__.
"""
t = time.time()
while self.delayed_commands:
if t >= self.delayed_commands[0][0]:
self.delayed_commands[0][1](*self.delayed_commands[0][2])
del self.delayed_commands[0]
else:
break
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
sockets = map(lambda x: x._get_socket(), self.connections)
sockets = filter(lambda x: x != None, sockets)
if sockets:
(i, o, e) = select.select(sockets, [], [], timeout)
self.process_data(i)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
while 1:
self.process_once(timeout)
def disconnect_all(self, message=""):
"""Disconnects all connections."""
for c in self.connections:
c.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of the
numeric_events dictionary in irclib.py for possible event
types.
handler -- Callback function.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
\"NO MORE\", no more handlers will be called.
"""
if not event in self.handlers:
self.handlers[event] = []
bisect.insort(self.handlers[event], ((priority, handler)))
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
if not event in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h[1]:
self.handlers[event].remove(h)
return 1
def execute_at(self, at, function, arguments=()):
"""Execute a function at a specified time.
Arguments:
at -- Execute at this time (standard \"time_t\" time).
function -- Function to call.
arguments -- Arguments to give the function.
"""
self.execute_delayed(at-time.time(), function, arguments)
def execute_delayed(self, delay, function, arguments=()):
"""Execute a function after a specified time.
Arguments:
delay -- How many seconds to wait.
function -- Function to call.
arguments -- Arguments to give the function.
"""
bisect.insort(self.delayed_commands, (delay+time.time(), function, arguments))
if self.fn_to_add_timeout:
self.fn_to_add_timeout(delay)
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
c = DCCConnection(self, dcctype)
self.connections.append(c)
return c
def _handle_event(self, connection, event):
"""[Internal]"""
h = self.handlers
for handler in h.get("all_events", []) + h.get(event.eventtype(), []):
if handler[1](connection, event) == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
self.connections.remove(connection)
if self.fn_to_remove_socket:
self.fn_to_remove_socket(connection._get_socket())
_rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
class Connection:
"""Base class for IRC connections.
Must be overridden.
"""
def __init__(self, irclibobj):
self.irclibobj = irclibobj
def _get_socket(self):
raise IRCError, "Not overridden"
##############################
### Convenience wrappers.
def execute_at(self, at, function, arguments=()):
self.irclibobj.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.irclibobj.execute_delayed(delay, function, arguments)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
# Huh!? Crrrrazy EFNet doesn't follow the RFC: their ircd seems to
# use \n as message separator! :P
_linesep_regexp = re.compile("\r?\n")
class ServerConnection(Connection):
"""This class represents an IRC server connection.
ServerConnection objects are instantiated by calling the server
method on an IRC object.
"""
def __init__(self, irclibobj):
Connection.__init__(self, irclibobj)
self.connected = 0 # Not connected yet.
self.socket = None
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name ("realname").
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
if self.connected:
self.disconnect("Changing servers")
self.previous_buffer = ""
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.localaddress = localaddress
self.localport = localport
self.localhost = socket.gethostname()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.bind((self.localaddress, self.localport))
self.socket.connect((self.server, self.port))
except socket.error, x:
self.socket.close()
self.socket = None
raise ServerConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
self.disconnect("Closing object")
self.irclibobj._remove_connection(self)
def _get_socket(self):
"""[Internal]"""
return self.socket
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
if self.real_server_name:
return self.real_server_name
else:
return ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method. """
return self.real_nickname
def process_data(self):
"""[Internal]"""
try:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
lines = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = lines[-1]
lines = lines[:-1]
for line in lines:
if DEBUG:
print "FROM SERVER:", line
if not line:
continue
prefix = None
command = None
arguments = None
self._handle_event(Event("all_raw_messages",
self.get_server_name(),
None,
[line]))
m = _rfc_1459_command_regexp.match(line)
if m.group("prefix"):
prefix = m.group("prefix")
if not self.real_server_name:
self.real_server_name = prefix
if m.group("command"):
command = m.group("command").lower()
if m.group("argument"):
a = m.group("argument").split(" :", 1)
arguments = a[0].split()
if len(a) == 2:
arguments.append(a[1])
# Translate numerics into more readable strings.
if command in numeric_events:
command = numeric_events[command]
if command == "nick":
if nm_to_n(prefix) == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
if command in ["privmsg", "notice"]:
target, message = arguments[0], arguments[1]
messages = _ctcp_dequote(message)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for msg in messages:
if type(msg) is types.TupleType:
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
msg = list(msg)
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, msg)
self._handle_event(Event(command, prefix, target, msg))
if command == "ctcp" and msg[0] == "ACTION":
self._handle_event(Event("action", prefix, target, msg[1:]))
else:
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, [msg])
self._handle_event(Event(command, prefix, target, [msg]))
else:
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0]
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self._handle_event(Event(command, prefix, target, arguments))
def _handle_event(self, event):
"""[Internal]"""
self.irclibobj._handle_event(self, event)
if event.eventtype() in self.handlers:
for fn in self.handlers[event.eventtype()]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.irclibobj.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.irclibobj.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_raw(" ".join(["ADMIN", server]).strip())
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or ""))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
self.quit(message)
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_raw("GLOBOPS :" + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_raw(" ".join(["INFO", server]).strip())
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_raw(" ".join(["INVITE", nick, channel]).strip())
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_raw("ISON " + " ".join(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment))))
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
command = "LINKS"
if remote_server:
command = command + " " + remote_server
if server_mask:
command = command + " " + server_mask
self.send_raw(command)
def list(self, channels=None, server=""):
"""Send a LIST command."""
command = "LIST"
if channels:
command = command + " " + ",".join(channels)
if server:
command = command + " " + server
self.send_raw(command)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_raw("LUSERS" + (server and (" " + server)))
def mode(self, target, command):
"""Send a MODE command."""
self.send_raw("MODE %s %s" % (target, command))
def motd(self, server=""):
"""Send an MOTD command."""
self.send_raw("MOTD" + (server and (" " + server)))
def names(self, channels=None):
"""Send a NAMES command."""
self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or ""))
def nick(self, newnick):
"""Send a NICK command."""
self.send_raw("NICK " + newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_raw("NOTICE %s :%s" % (target, text))
def oper(self, nick, password):
"""Send an OPER command."""
self.send_raw("OPER %s %s" % (nick, password))
def part(self, channels, message=""):
"""Send a PART command."""
if type(channels) == types.StringType:
self.send_raw("PART " + channels + (message and (" " + message)))
else:
self.send_raw("PART " + ",".join(channels) + (message and (" " + message)))
def pass_(self, password):
"""Send a PASS command."""
self.send_raw("PASS " + password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (target, text))
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (",".join(targets), text))
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_raw("QUIT" + (message and (" :" + message)))
def sconnect(self, target, port="", server=""):
"""Send an SCONNECT command."""
self.send_raw("CONNECT %s%s%s" % (target,
port and (" " + port),
server and (" " + server)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError, "Not connected."
try:
self.socket.send(string + "\r\n")
if DEBUG:
print "TO SERVER:", string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
def time(self, server=""):
"""Send a TIME command."""
self.send_raw("TIME" + (server and (" " + server)))
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
if new_topic is None:
self.send_raw("TOPIC " + channel)
else:
self.send_raw("TOPIC %s :%s" % (channel, new_topic))
def trace(self, target=""):
"""Send a TRACE command."""
self.send_raw("TRACE" + (target and (" " + target)))
def user(self, username, realname):
"""Send a USER command."""
self.send_raw("USER %s 0 * :%s" % (username, realname))
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_raw("USERHOST " + ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_raw("USERS" + (server and (" " + server)))
def version(self, server=""):
"""Send a VERSION command."""
self.send_raw("VERSION" + (server and (" " + server)))
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_raw("WALLOPS :" + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
def whois(self, targets):
"""Send a WHOIS command."""
self.send_raw("WHOIS " + ",".join(targets))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_raw("WHOWAS %s%s%s" % (nick,
max and (" " + max),
server and (" " + server)))
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""This class represents a DCC connection.
DCCConnection objects are instantiated by calling the dcc
method on an IRC object.
"""
def __init__(self, irclibobj, dcctype):
Connection.__init__(self, irclibobj)
self.connected = 0
self.passive = 0
self.dcctype = dcctype
self.peeraddress = None
self.peerport = None
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.socket = None
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 0
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error, x:
raise DCCConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
return self
def listen(self):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 1
try:
self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error, x:
raise DCCConnectionError, "Couldn't bind socket: %s" % x
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self.irclibobj._handle_event(
self,
Event("dcc_disconnect", self.peeraddress, "", [message]))
self.irclibobj._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = 1
if DEBUG:
print "DCC connection from %s:%d" % (
self.peeraddress, self.peerport)
self.irclibobj._handle_event(
self,
Event("dcc_connect", self.peeraddress, None, None))
return
try:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
# The specification says lines are terminated with LF, but
# it seems safer to handle CR LF terminations too.
chunks = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = chunks[-1]
if len(self.previous_buffer) > 2**14:
# Bad peer! Naughty peer!
self.disconnect()
return
chunks = chunks[:-1]
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
if DEBUG:
print "FROM PEER:", chunk
arguments = [chunk]
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self.irclibobj._handle_event(
self,
Event(command, prefix, target, arguments))
def _get_socket(self):
"""[Internal]"""
return self.socket
def privmsg(self, string):
"""Send data to DCC peer.
The string will be padded with appropriate LF if it's a DCC
CHAT session.
"""
try:
self.socket.send(string)
if self.dcctype == "chat":
self.socket.send("\n")
if DEBUG:
print "TO PEER: %s\n" % string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Instance attributes that can be used by sub classes:
ircobj -- The IRC instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
def __init__(self):
self.ircobj = IRC()
self.connection = self.ircobj.server()
self.dcc_connections = []
self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, c, e):
"""[Internal]"""
msg = "on_" + e.eventtype()
if hasattr(self, msg):
getattr(self, msg)(c, e)
def _dcc_disconnect(self, c, e):
self.dcc_connections.remove(c)
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name.
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
This function can be called to reconnect a closed connection.
"""
self.connection.connect(server, port, nickname,
password, username, ircname,
localaddress, localport)
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.connect(address, port)
return dcc
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.listen()
return dcc
def start(self):
"""Start the IRC client."""
self.ircobj.process_forever()
class Event:
"""Class representing an IRC event."""
def __init__(self, eventtype, source, target, arguments=None):
"""Constructor of Event objects.
Arguments:
eventtype -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event specific arguments.
"""
self._eventtype = to_unicode(eventtype)
self._source = to_unicode(source)
self._target = to_unicode(target)
if arguments:
self._arguments = [to_unicode(arg) for arg in arguments]
else:
self._arguments = []
def eventtype(self):
"""Get the event type."""
return self._eventtype
def source(self):
"""Get the event source."""
return self._source
def target(self):
"""Get the event target."""
return self._target
def arguments(self):
"""Get the event arguments."""
return self._arguments
_LOW_LEVEL_QUOTE = "\020"
_CTCP_LEVEL_QUOTE = "\134"
_CTCP_DELIMITER = "\001"
_low_level_mapping = {
"0": "\000",
"n": "\n",
"r": "\r",
_LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE
}
_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
def mask_matches(nick, mask):
"""Check if a nick matches a mask.
Returns true if the nick matches, otherwise false.
"""
nick = irc_lower(nick)
mask = irc_lower(mask)
mask = mask.replace("\\", "\\\\")
for char in ".$|[](){}+":
mask = mask.replace(char, "\\" + char)
mask = mask.replace("?", ".")
mask = mask.replace("*", ".*")
regex = re.compile(mask, re.IGNORECASE)
return regex.match(nick)
_SPECIAL = "-[]\\`^{}"
NICK_CHARACTERS = string.ascii_letters + string.digits + _SPECIAL
_IRCSTRING_TRANSLATION = string.maketrans(string.ascii_uppercase + "[]\\^",
string.ascii_lowercase + "{}|~")
def irc_lower(s):
"""Returns a lowercased string.
The definition of lowercased comes from the IRC specification (RFC
1459).
"""
return s.translate(_IRCSTRING_TRANSLATION)
def _ctcp_dequote(message):
"""[Internal] Dequote a message according to CTCP specifications.
The function returns a list where each element can be either a
string (normal message) or a tuple of one or two strings (tagged
messages). If a tuple has only one element (ie is a singleton),
that element is the tag; otherwise the tuple has two elements: the
tag and the data.
Arguments:
message -- The message to be decoded.
"""
def _low_level_replace(match_obj):
ch = match_obj.group(1)
# If low_level_mapping doesn't have the character as key, we
# should just return the character.
return _low_level_mapping.get(ch, ch)
if _LOW_LEVEL_QUOTE in message:
# Yup, there was a quote. Release the dequoter, man!
message = _low_level_regexp.sub(_low_level_replace, message)
if _CTCP_DELIMITER not in message:
return [message]
else:
# Split it into parts. (Does any IRC client actually *use*
# CTCP stacking like this?)
chunks = message.split(_CTCP_DELIMITER)
messages = []
i = 0
while i < len(chunks)-1:
# Add message if it's non-empty.
if len(chunks[i]) > 0:
messages.append(chunks[i])
if i < len(chunks)-2:
# Aye! CTCP tagged data ahead!
messages.append(tuple(chunks[i+1].split(" ", 1)))
i = i + 2
if len(chunks) % 2 == 0:
# Hey, a lonely _CTCP_DELIMITER at the end! This means
# that the last chunk, including the delimiter, is a
# normal message! (This is according to the CTCP
# specification.)
messages.append(_CTCP_DELIMITER + chunks[-1])
return messages
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""Convert an IP number as an integer given in ASCII
representation (e.g. '3232235521') to an IP address string
(e.g. '192.168.0.1')."""
n = long(num)
p = map(str, map(int, [n >> 24 & 0xFF, n >> 16 & 0xFF,
n >> 8 & 0xFF, n & 0xFF]))
return ".".join(p)
def ip_quad_to_numstr(quad):
"""Convert an IP address string (e.g. '192.168.0.1') to an IP
number as an integer given in ASCII representation
(e.g. '3232235521')."""
p = map(long, quad.split("."))
s = str((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
if s[-1] == "L":
s = s[:-1]
return s
def nm_to_n(s):
"""Get the nick part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[0]
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1]
def nm_to_h(s):
"""Get the host part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("@")[1]
def nm_to_u(s):
"""Get the user part of a nickmask.
(The source of an Event is a nickmask.)
"""
s = s.split("!")[1]
return s.split("@")[0]
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
always None.
Example:
>>> irclib.parse_nick_modes(\"+ab-c\")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
None if mode isn't one of \"b\", \"k\", \"l\", \"v\" or \"o\".
Example:
>>> irclib.parse_channel_modes(\"+ab-c foo\")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvo")
def _parse_modes(mode_string, unary_modes=""):
"""[Internal]"""
modes = []
arg_count = 0
# State variable.
sign = ""
a = mode_string.split()
if len(a) == 0:
return []
else:
mode_part, args = a[0], a[1:]
if mode_part[0] not in "+-":
return []
for ch in mode_part:
if ch in "+-":
sign = ch
elif ch == " ":
collecting_arguments = 1
elif ch in unary_modes:
if len(args) >= arg_count + 1:
modes.append([sign, ch, args[arg_count]])
arg_count = arg_count + 1
else:
modes.append([sign, ch, None])
else:
modes.append([sign, ch, None])
return modes
def _ping_ponger(connection, event):
"""[Internal]"""
connection.pong(event.target())
# Numeric table mostly stolen from the Perl IRC module (Net::IRC).
numeric_events = {
"001": "welcome",
"002": "yourhost",
"003": "created",
"004": "myinfo",
"005": "featurelist", # XXX
"200": "tracelink",
"201": "traceconnecting",
"202": "tracehandshake",
"203": "traceunknown",
"204": "traceoperator",
"205": "traceuser",
"206": "traceserver",
"207": "traceservice",
"208": "tracenewtype",
"209": "traceclass",
"210": "tracereconnect",
"211": "statslinkinfo",
"212": "statscommands",
"213": "statscline",
"214": "statsnline",
"215": "statsiline",
"216": "statskline",
"217": "statsqline",
"218": "statsyline",
"219": "endofstats",
"221": "umodeis",
"231": "serviceinfo",
"232": "endofservices",
"233": "service",
"234": "servlist",
"235": "servlistend",
"241": "statslline",
"242": "statsuptime",
"243": "statsoline",
"244": "statshline",
"250": "luserconns",
"251": "luserclient",
"252": "luserop",
"253": "luserunknown",
"254": "luserchannels",
"255": "luserme",
"256": "adminme",
"257": "adminloc1",
"258": "adminloc2",
"259": "adminemail",
"261": "tracelog",
"262": "endoftrace",
"263": "tryagain",
"265": "n_local",
"266": "n_global",
"300": "none",
"301": "away",
"302": "userhost",
"303": "ison",
"305": "unaway",
"306": "nowaway",
"311": "whoisuser",
"312": "whoisserver",
"313": "whoisoperator",
"314": "whowasuser",
"315": "endofwho",
"316": "whoischanop",
"317": "whoisidle",
"318": "endofwhois",
"319": "whoischannels",
"321": "liststart",
"322": "list",
"323": "listend",
"324": "channelmodeis",
"329": "channelcreate",
"331": "notopic",
"332": "currenttopic",
"333": "topicinfo",
"341": "inviting",
"342": "summoning",
"346": "invitelist",
"347": "endofinvitelist",
"348": "exceptlist",
"349": "endofexceptlist",
"351": "version",
"352": "whoreply",
"353": "namreply",
"361": "killdone",
"362": "closing",
"363": "closeend",
"364": "links",
"365": "endoflinks",
"366": "endofnames",
"367": "banlist",
"368": "endofbanlist",
"369": "endofwhowas",
"371": "info",
"372": "motd",
"373": "infostart",
"374": "endofinfo",
"375": "motdstart",
"376": "endofmotd",
"377": "motd2", # 1997-10-16 -- tkil
"381": "youreoper",
"382": "rehashing",
"384": "myportis",
"391": "time",
"392": "usersstart",
"393": "users",
"394": "endofusers",
"395": "nousers",
"401": "nosuchnick",
"402": "nosuchserver",
"403": "nosuchchannel",
"404": "cannotsendtochan",
"405": "toomanychannels",
"406": "wasnosuchnick",
"407": "toomanytargets",
"409": "noorigin",
"411": "norecipient",
"412": "notexttosend",
"413": "notoplevel",
"414": "wildtoplevel",
"421": "unknowncommand",
"422": "nomotd",
"423": "noadmininfo",
"424": "fileerror",
"431": "nonicknamegiven",
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
"433": "nicknameinuse",
"436": "nickcollision",
"437": "unavailresource", # "Nick temporally unavailable"
"441": "usernotinchannel",
"442": "notonchannel",
"443": "useronchannel",
"444": "nologin",
"445": "summondisabled",
"446": "usersdisabled",
"451": "notregistered",
"461": "needmoreparams",
"462": "alreadyregistered",
"463": "nopermforhost",
"464": "passwdmismatch",
"465": "yourebannedcreep", # I love this one...
"466": "youwillbebanned",
"467": "keyset",
"471": "channelisfull",
"472": "unknownmode",
"473": "inviteonlychan",
"474": "bannedfromchan",
"475": "badchannelkey",
"476": "badchanmask",
"477": "nochanmodes", # "Channel doesn't support modes"
"478": "banlistfull",
"481": "noprivileges",
"482": "chanoprivsneeded",
"483": "cantkillserver",
"484": "restricted", # Connection is restricted
"485": "uniqopprivsneeded",
"491": "nooperhost",
"492": "noservicehost",
"501": "umodeunknownflag",
"502": "usersdontmatch",
}
generated_events = [
# Generated events
"dcc_connect",
"dcc_disconnect",
"dccmsg",
"disconnect",
"ctcp",
"ctcpreply",
]
protocol_events = [
# IRC protocol events
"error",
"join",
"kick",
"mode",
"part",
"ping",
"privmsg",
"privnotice",
"pubmsg",
"pubnotice",
"quit",
"invite",
"pong",
]
all_events = generated_events + protocol_events + numeric_events.values()
| bsd-3-clause |
pmoravec/sos | sos/report/plugins/systemd.py | 2 | 3548 | # Copyright (C) 2012 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, IndependentPlugin, SoSPredicate
from sos.utilities import is_executable
class Systemd(Plugin, IndependentPlugin):
short_desc = 'System management daemon'
plugin_name = "systemd"
profiles = ('system', 'services', 'boot')
packages = ('systemd',)
files = ('/run/systemd/system',)
def setup(self):
self.add_file_tags({
'/etc/systemd/journald.conf.*': 'insights_etc_journald_conf',
'/usr/lib/systemd/journald.conf.*': 'insights_usr_journald_conf_d',
'/etc/systemd/system.conf': 'insights_systemd_system_conf',
'/etc/systemd/logind.conf': 'insights_systemd_logind_conf'
})
self.add_cmd_output([
"systemctl status --all",
"systemctl show --all",
"systemctl show *service --all",
# It is possible to do systemctl show with target, slice,
# device, socket, scope, and mount too but service and
# status --all mostly seems to cover the others.
"systemctl list-units",
"systemctl list-units --failed",
"systemctl list-unit-files",
"systemctl list-jobs",
"systemctl list-dependencies",
"systemctl list-timers --all",
"systemctl list-machines",
"systemctl show-environment",
"systemd-delta",
"systemd-analyze",
"systemd-analyze blame",
"systemd-analyze dump",
"systemd-inhibit --list",
"journalctl --list-boots",
"ls -lR /lib/systemd"
])
self.add_cmd_output('timedatectl', root_symlink='date')
# resolvectl command starts systemd-resolved service if that
# is not running, so gate the commands by this predicate
if is_executable('resolvectl'):
resolvectl_status = 'resolvectl status'
resolvectl_statistics = 'resolvectl statistics'
else:
resolvectl_status = 'systemd-resolve --status'
resolvectl_statistics = 'systemd-resolve --statistics'
self.add_cmd_output([
resolvectl_status,
resolvectl_statistics,
], pred=SoSPredicate(self, services=["systemd-resolved"]))
self.add_cmd_output("systemd-analyze plot",
suggest_filename="systemd-analyze_plot.svg")
if self.get_option("verify"):
self.add_cmd_output("journalctl --verify")
self.add_copy_spec([
"/etc/systemd",
"/lib/systemd/system",
"/lib/systemd/user",
"/etc/vconsole.conf",
"/run/systemd/generator*",
"/run/systemd/seats",
"/run/systemd/sessions",
"/run/systemd/system",
"/run/systemd/users",
"/etc/modules-load.d/*.conf",
"/etc/yum/protected.d/systemd.conf",
"/etc/tmpfiles.d/*.conf",
"/run/tmpfiles.d/*.conf",
"/usr/lib/tmpfiles.d/*.conf",
])
self.add_forbidden_path('/dev/null')
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
vaidap/zulip | zerver/lib/url_preview/preview.py | 4 | 2287 | from __future__ import absolute_import
import re
import logging
import traceback
from typing import Any, Optional, Text
from typing.re import Match
import requests
from zerver.lib.cache import cache_with_key, get_cache_with_key
from zerver.lib.url_preview.oembed import get_oembed_data
from zerver.lib.url_preview.parsers import OpenGraphParser, GenericParser
from django.utils.encoding import smart_text
CACHE_NAME = "database"
# Based on django.core.validators.URLValidator, with ftp support removed.
link_regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def is_link(url):
# type: (Text) -> Match[Text]
return link_regex.match(smart_text(url))
def cache_key_func(url):
# type: (Text) -> Text
return url
@cache_with_key(cache_key_func, cache_name=CACHE_NAME, with_statsd_key="urlpreview_data")
def get_link_embed_data(url, maxwidth=640, maxheight=480):
# type: (Text, Optional[int], Optional[int]) -> Any
if not is_link(url):
return None
# Fetch information from URL.
# We are using three sources in next order:
# 1. OEmbed
# 2. Open Graph
# 3. Meta tags
try:
data = get_oembed_data(url, maxwidth=maxwidth, maxheight=maxheight)
except requests.exceptions.RequestException:
msg = 'Unable to fetch information from url {0}, traceback: {1}'
logging.error(msg.format(url, traceback.format_exc()))
return None
data = data or {}
response = requests.get(url)
if response.ok:
og_data = OpenGraphParser(response.text).extract_data()
if og_data:
data.update(og_data)
generic_data = GenericParser(response.text).extract_data() or {}
for key in ['title', 'description', 'image']:
if not data.get(key) and generic_data.get(key):
data[key] = generic_data[key]
return data
@get_cache_with_key(cache_key_func, cache_name=CACHE_NAME)
def link_embed_data_from_cache(url, maxwidth=640, maxheight=480):
# type: (Text, Optional[int], Optional[int]) -> Any
return
| apache-2.0 |
Climbee/artcoin | contrib/pyminer/pyminer.py | 20 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9335
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
chiragjogi/odoo | openerp/report/render/odt2odt/odt2odt.py | 443 | 2265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render.rml2pdf import utils
import copy
class odt2odt(object):
def __init__(self, odt, localcontext):
self.localcontext = localcontext
self.etree = odt
self._node = None
def render(self):
def process_text(node,new_node):
for child in utils._child_get(node, self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
new_child.remove(n)
process_text(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
self._node = copy.deepcopy(self.etree)
for n in self._node:
self._node.remove(n)
process_text(self.etree, self._node)
return self._node
def parseNode(node, localcontext = {}):
r = odt2odt(node, localcontext)
return r.render()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lattwood/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/executive_unittest.py | 124 | 13048 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import errno
import signal
import subprocess
import sys
import time
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
third_party_py = os.path.join(script_dir, "webkitpy", "thirdparty", "autoinstalled")
if third_party_py not in sys.path:
sys.path.append(third_party_py)
import unittest2 as unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem_mock import MockFileSystem
class ScriptErrorTest(unittest.TestCase):
def test_message_with_output(self):
error = ScriptError('My custom message!', '', -1)
self.assertEqual(error.message_with_output(), 'My custom message!')
error = ScriptError('My custom message!', '', -1, 'My output.')
self.assertEqual(error.message_with_output(), 'My custom message!\n\nMy output.')
error = ScriptError('', 'my_command!', -1, 'My output.', '/Users/username/blah')
self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1\n\nLast 500 characters of output:\nb' + '1' * 499)
def test_message_with_tuple(self):
error = ScriptError('', ('my', 'command'), -1, 'My output.', '/Users/username/blah')
self.assertEqual(error.message_with_output(), 'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
def never_ending_command():
"""Arguments for a command that will never end (useful for testing process
killing). It should be a process that is unlikely to already be running
because all instances will be killed."""
if sys.platform == 'win32':
return ['wmic']
return ['yes']
def command_line(cmd, *args):
return [sys.executable, __file__, '--' + cmd] + list(args)
class ExecutiveTest(unittest.TestCase):
def assert_interpreter_for_content(self, intepreter, content):
fs = MockFileSystem()
tempfile, temp_name = fs.open_binary_tempfile('')
tempfile.write(content)
tempfile.close()
file_interpreter = Executive.interpreter_for_script(temp_name, fs)
self.assertEqual(file_interpreter, intepreter)
def test_interpreter_for_script(self):
self.assert_interpreter_for_content(None, '')
self.assert_interpreter_for_content(None, 'abcd\nefgh\nijklm')
self.assert_interpreter_for_content(None, '##/usr/bin/perl')
self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl')
self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl\nfirst\nsecond')
self.assert_interpreter_for_content('perl', '#!/usr/bin/perl')
self.assert_interpreter_for_content('perl', '#!/usr/bin/perl -w')
self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python')
self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python\nfirst\nsecond')
self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/python')
self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby')
self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby\nfirst\nsecond')
self.assert_interpreter_for_content('ruby', '#!/usr/bin/ruby')
def test_run_command_with_bad_command(self):
def run_bad_command():
Executive().run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True)
self.assertRaises(OSError, run_bad_command)
def test_run_command_args_type(self):
executive = Executive()
self.assertRaises(AssertionError, executive.run_command, "echo")
self.assertRaises(AssertionError, executive.run_command, u"echo")
executive.run_command(command_line('echo', 'foo'))
executive.run_command(tuple(command_line('echo', 'foo')))
def test_auto_stringify_args(self):
executive = Executive()
executive.run_command(command_line('echo', 1))
executive.popen(command_line('echo', 1), stdout=executive.PIPE).wait()
self.assertEqual('echo 1', executive.command_for_printing(['echo', 1]))
def test_popen_args(self):
executive = Executive()
# Explicitly naming the 'args' argument should not thow an exception.
executive.popen(args=command_line('echo', 1), stdout=executive.PIPE).wait()
def test_run_command_with_unicode(self):
"""Validate that it is safe to pass unicode() objects
to Executive.run* methods, and they will return unicode()
objects by default unless decode_output=False"""
unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
if sys.platform == 'win32':
encoding = 'mbcs'
else:
encoding = 'utf-8'
encoded_tor = unicode_tor_input.encode(encoding)
# On Windows, we expect the unicode->mbcs->unicode roundtrip to be
# lossy. On other platforms, we expect a lossless roundtrip.
if sys.platform == 'win32':
unicode_tor_output = encoded_tor.decode(encoding)
else:
unicode_tor_output = unicode_tor_input
executive = Executive()
output = executive.run_command(command_line('cat'), input=unicode_tor_input)
self.assertEqual(output, unicode_tor_output)
output = executive.run_command(command_line('echo', unicode_tor_input))
self.assertEqual(output, unicode_tor_output)
output = executive.run_command(command_line('echo', unicode_tor_input), decode_output=False)
self.assertEqual(output, encoded_tor)
# Make sure that str() input also works.
output = executive.run_command(command_line('cat'), input=encoded_tor, decode_output=False)
self.assertEqual(output, encoded_tor)
# FIXME: We should only have one run* method to test
output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True)
self.assertEqual(output, unicode_tor_output)
output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True, decode_output=False)
self.assertEqual(output, encoded_tor)
def serial_test_kill_process(self):
executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
self.assertEqual(process.poll(), None) # Process is running
executive.kill_process(process.pid)
# Note: Can't use a ternary since signal.SIGKILL is undefined for sys.platform == "win32"
if sys.platform == "win32":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
# We seem to get either 0 or 1 here for some reason.
self.assertIn(process.wait(), (0, 1))
elif sys.platform == "cygwin":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=98196
# cygwin seems to give us either SIGABRT or SIGKILL
self.assertIn(process.wait(), (-signal.SIGABRT, -signal.SIGKILL))
else:
expected_exit_code = -signal.SIGKILL
self.assertEqual(process.wait(), expected_exit_code)
# Killing again should fail silently.
executive.kill_process(process.pid)
def serial_test_kill_all(self):
executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
self.assertIsNone(process.poll()) # Process is running
executive.kill_all(never_ending_command()[0])
# Note: Can't use a ternary since signal.SIGTERM is undefined for sys.platform == "win32"
if sys.platform == "cygwin":
expected_exit_code = 0 # os.kill results in exit(0) for this process.
self.assertEqual(process.wait(), expected_exit_code)
elif sys.platform == "win32":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
# We seem to get either 0 or 1 here for some reason.
self.assertIn(process.wait(), (0, 1))
else:
expected_exit_code = -signal.SIGTERM
self.assertEqual(process.wait(), expected_exit_code)
# Killing again should fail silently.
executive.kill_all(never_ending_command()[0])
def _assert_windows_image_name(self, name, expected_windows_name):
executive = Executive()
windows_name = executive._windows_image_name(name)
self.assertEqual(windows_name, expected_windows_name)
def test_windows_image_name(self):
self._assert_windows_image_name("foo", "foo.exe")
self._assert_windows_image_name("foo.exe", "foo.exe")
self._assert_windows_image_name("foo.com", "foo.com")
# If the name looks like an extension, even if it isn't
# supposed to, we have no choice but to return the original name.
self._assert_windows_image_name("foo.baz", "foo.baz")
self._assert_windows_image_name("foo.baz.exe", "foo.baz.exe")
def serial_test_check_running_pid(self):
executive = Executive()
self.assertTrue(executive.check_running_pid(os.getpid()))
# Maximum pid number on Linux is 32768 by default
self.assertFalse(executive.check_running_pid(100000))
def serial_test_running_pids(self):
if sys.platform in ("win32", "cygwin"):
return # This function isn't implemented on Windows yet.
executive = Executive()
pids = executive.running_pids()
self.assertIn(os.getpid(), pids)
def serial_test_run_in_parallel(self):
# We run this test serially to avoid overloading the machine and throwing off the timing.
if sys.platform in ("win32", "cygwin"):
return # This function isn't implemented properly on windows yet.
import multiprocessing
NUM_PROCESSES = 4
DELAY_SECS = 0.25
cmd_line = [sys.executable, '-c', 'import time; time.sleep(%f); print "hello"' % DELAY_SECS]
cwd = os.getcwd()
commands = [tuple([cmd_line, cwd])] * NUM_PROCESSES
start = time.time()
command_outputs = Executive().run_in_parallel(commands, processes=NUM_PROCESSES)
done = time.time()
self.assertTrue(done - start < NUM_PROCESSES * DELAY_SECS)
self.assertEqual([output[1] for output in command_outputs], ["hello\n"] * NUM_PROCESSES)
self.assertEqual([], multiprocessing.active_children())
def test_run_in_parallel_assert_nonempty(self):
self.assertRaises(AssertionError, Executive().run_in_parallel, [])
def main(platform, stdin, stdout, cmd, args):
if platform == 'win32' and hasattr(stdout, 'fileno'):
import msvcrt
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
if cmd == '--cat':
stdout.write(stdin.read())
elif cmd == '--echo':
stdout.write(' '.join(args))
return 0
if __name__ == '__main__' and len(sys.argv) > 1 and sys.argv[1] in ('--cat', '--echo'):
sys.exit(main(sys.platform, sys.stdin, sys.stdout, sys.argv[1], sys.argv[2:]))
| bsd-3-clause |
neumerance/cloudloon2 | doc/source/conf.py | 7 | 14657 | # -*- coding: utf-8 -*-
#
# Horizon documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 27 11:38:59 2011.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
import horizon.version
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""returns a list of modules in the SOURCE directory"""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print "SEARCHING %s" % sourcedir
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
#print result
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SRCS = {'horizon': ROOT,
'openstack_dashboard': ROOT}
EXCLUDED_MODULES = ('horizon.tests', 'openstack_dashboard.tests',)
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("=================\n")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
for modulename, path in SRCS.items():
sys.stdout.write("Generating source documentation for %s\n" %
modulename)
INDEXOUT.write("\n%s\n" % modulename.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(modulename),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, modulename)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.mkdir(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([module.startswith(exclude) for exclude
in EXCLUDED_MODULES]):
print "Excluded module %s." % module
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (modulename, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or \
os.stat(generated_file).st_mtime < \
os.stat(source_file).st_mtime:
print "Module %s updated, generating new documentation." \
% module
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print "Removing outdated file for %s" % old_file
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'oslo.sphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Horizon'
copyright = u'2012, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = horizon.version.version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = horizon.version.version_info.release_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Horizondoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Horizon.tex', u'Horizon Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'horizon', u'Horizon Documentation',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Horizon', u'Horizon Documentation', u'OpenStack',
'Horizon', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Horizon'
epub_author = u'OpenStack'
epub_publisher = u'OpenStack'
epub_copyright = u'2012, OpenStack'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'django':
('http://docs.djangoproject.com/en/dev/_objects/'),
'nova': ('http://nova.openstack.org', None),
'swift': ('http://swift.openstack.org', None),
'keystone': ('http://keystone.openstack.org', None),
'glance': ('http://glance.openstack.org', None)}
| apache-2.0 |
trujunzhang/djzhang-targets | cwrealtor/cwrealtor/pipelines.py | 1 | 1263 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
from datetime import datetime
from hashlib import md5
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db, collection_name):
from cwrealtor.DBUtils import DBUtils
self.dbutils = DBUtils(mongo_uri, mongo_db, collection_name)
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGODB_SERVER'),
mongo_db=crawler.settings.get('MONGODB_DB', 'items'),
collection_name=crawler.settings.get('MONGODB_COLLECTION')
)
def open_spider(self, spider):
self.dbutils.open_spider()
def close_spider(self, spider):
self.dbutils.close_spider()
def process_item(self, item, spider):
self.dbutils.process_item(item,spider)
return item
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
log.err(failure)
| mit |
FluidityProject/multifluids | mayavi/mayavi_amcg/filters/projection_and_depth_stretch.py | 7 | 6400 | ## To Do:
## More descriptive description and use to correct terminology
##
## Points to Note:
## Filter only works if it lies directly below the source in the pipeline. It only operates on point data.
# Original code by Tim Bond <http://amcg.ese.ic.ac.uk/index.php?title=Local:Using_Mayavi2>
# Author: Daryl Harrison
# Enthought library imports.
from enthought.traits.api import Instance, Float, Array, List, String
from enthought.traits.ui.api import View, Group, Item
from enthought.tvtk.api import tvtk
# Local imports
from enthought.mayavi.core.common import debug
from enthought.mayavi.core.filter import Filter
from numpy import linalg, array, sum, sqrt, column_stack, arctan2, arccos, zeros, cross, vdot
######################################################################
# `ProjectionAndDepthStretch` class.
######################################################################
class ProjectionAndDepthStretch(Filter):
"""
Depth stretching and simple projection of earth's surface onto a plane.
"""
# The version of this class. Used for persistence.
__version__ = 0
scale_factor = Float(0.0001)
grid = Instance(tvtk.UnstructuredGrid, allow_none=False)
longitude = Array
colatitude = Array
initial_depth = Array
unit_r = Array
unit_lon = Array
unit_lat = Array
processed_vectors = List
active_scalar = String
active_vector = String
# The view
view = \
View(
Group(
Item(name='scale_factor', style='simple'),
),
scrollable=True,
resizable=True,
)
######################################################################
# `Filter` interface.
######################################################################
def setup_pipeline(self):
debug('setup_pipeline')
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
# Scene is updated when scale_factor trait changed.
#self.scale_factor.on_trait_change(self.render)
def update_pipeline(self): # Called when we drag filter under another VTK file
debug('update_pipeline')
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
# Do nothing if there is no input.
if len(self.inputs) == 0:
return
magn = linalg.norm
earth_radius = 6378000.0
# By default we set the input to the first output of the first input
self.grid = tvtk.UnstructuredGrid()
self.grid.deep_copy(self.inputs[0].reader.output) ## WAY OF DOING THIS WITHOUT A DEEP COPY?
#self.inputs[0].outputs[0] ## DOESN'T WORK WITH update_data()
# Split array by column into the cartesian coordinates
xyz = array(self.grid.points)
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
xyz_squared = xyz**2
r_squared = xyz_squared.sum(axis=1)
r = sqrt(r_squared)
self.longitude = arctan2(y, x)
self.colatitude = 1.3 * arccos(z/r)
self.initial_depth = earth_radius - r
# Now we do vector correction
self.unit_r = column_stack((x/r, y/r, z/r))
len_lon = sqrt(x**2 + y**2)
self.unit_lon = column_stack((y/len_lon, -1*x/len_lon, zeros(len(x))))
self.unit_lat = cross(self.unit_r, self.unit_lon)
# Correct current vector array
current_vector_array = self.inputs[0].outputs[0].point_data.vectors.name
self._correct_vector_array(current_vector_array)
self._apply_stretch()
# Propagate the data_changed event - let modules that depend on this filter know that pipeline has changed
self.pipeline_changed = True
def update_data(self): # Called when we change an option under VTK file, e.g. one of the vectors
debug('update_data')
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Do nothing if there is no input.
if len(self.inputs) == 0:
return
if (self.inputs[0].reader.output.point_data.scalars):
self.active_scalar = self.inputs[0].reader.output.point_data.scalars.name
if (self.inputs[0].reader.output.point_data.vectors):
self.active_vector = self.inputs[0].reader.output.point_data.vectors.name
# Propagate the data_changed event - let modules that depend on this filter know that data has changed
self.data_changed = True
def _active_scalar_changed(self, value):
self.grid.point_data.set_active_scalars(value)
self._update_display()
def _active_vector_changed(self, value):
self.grid.point_data.set_active_vectors(value)
self._update_display()
def _scale_factor_changed(self, old, new):
debug('_scale_factor_changed')
self._apply_stretch()
def _apply_stretch(self):
depth = self.initial_depth * self.scale_factor
self.grid.points = column_stack((self.longitude, self.colatitude, depth))
self._update_display()
def _update_display(self):
self._set_outputs([self.grid])
def _correct_vector_array(self, vector_array):
if not (vector_array in self.processed_vectors):
self.processed_vectors.append(vector_array)
vector = self.grid.point_data.get_array(vector_array) # Reference to vector array
xyz_vector = array(vector)
mag_lon = array(map(vdot, xyz_vector, self.unit_lon))
mag_lat = map(vdot, xyz_vector, self.unit_lat)
mag_r = map(vdot, xyz_vector, self.unit_r)
vector.from_array(column_stack((-1*mag_lon, mag_lat, mag_r))) | lgpl-2.1 |
tchellomello/home-assistant | homeassistant/components/homematicip_cloud/generic_entity.py | 1 | 8087 | """Generic entity for the HomematicIP Cloud component."""
import logging
from typing import Any, Dict, Optional
from homematicip.aio.device import AsyncDevice
from homematicip.aio.group import AsyncGroup
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity import Entity
from .const import DOMAIN as HMIPC_DOMAIN
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_MODEL_TYPE = "model_type"
ATTR_LOW_BATTERY = "low_battery"
ATTR_CONFIG_PENDING = "config_pending"
ATTR_CONNECTION_TYPE = "connection_type"
ATTR_DUTY_CYCLE_REACHED = "duty_cycle_reached"
ATTR_ID = "id"
ATTR_IS_GROUP = "is_group"
# RSSI HAP -> Device
ATTR_RSSI_DEVICE = "rssi_device"
# RSSI Device -> HAP
ATTR_RSSI_PEER = "rssi_peer"
ATTR_SABOTAGE = "sabotage"
ATTR_GROUP_MEMBER_UNREACHABLE = "group_member_unreachable"
ATTR_DEVICE_OVERHEATED = "device_overheated"
ATTR_DEVICE_OVERLOADED = "device_overloaded"
ATTR_DEVICE_UNTERVOLTAGE = "device_undervoltage"
ATTR_EVENT_DELAY = "event_delay"
DEVICE_ATTRIBUTE_ICONS = {
"lowBat": "mdi:battery-outline",
"sabotage": "mdi:shield-alert",
"dutyCycle": "mdi:alert",
"deviceOverheated": "mdi:alert",
"deviceOverloaded": "mdi:alert",
"deviceUndervoltage": "mdi:alert",
"configPending": "mdi:alert-circle",
}
DEVICE_ATTRIBUTES = {
"modelType": ATTR_MODEL_TYPE,
"connectionType": ATTR_CONNECTION_TYPE,
"sabotage": ATTR_SABOTAGE,
"dutyCycle": ATTR_DUTY_CYCLE_REACHED,
"rssiDeviceValue": ATTR_RSSI_DEVICE,
"rssiPeerValue": ATTR_RSSI_PEER,
"deviceOverheated": ATTR_DEVICE_OVERHEATED,
"deviceOverloaded": ATTR_DEVICE_OVERLOADED,
"deviceUndervoltage": ATTR_DEVICE_UNTERVOLTAGE,
"configPending": ATTR_CONFIG_PENDING,
"eventDelay": ATTR_EVENT_DELAY,
"id": ATTR_ID,
}
GROUP_ATTRIBUTES = {
"modelType": ATTR_MODEL_TYPE,
"lowBat": ATTR_LOW_BATTERY,
"sabotage": ATTR_SABOTAGE,
"dutyCycle": ATTR_DUTY_CYCLE_REACHED,
"configPending": ATTR_CONFIG_PENDING,
"unreach": ATTR_GROUP_MEMBER_UNREACHABLE,
}
class HomematicipGenericEntity(Entity):
"""Representation of the HomematicIP generic entity."""
def __init__(self, hap: HomematicipHAP, device, post: Optional[str] = None) -> None:
"""Initialize the generic entity."""
self._hap = hap
self._home = hap.home
self._device = device
self.post = post
# Marker showing that the HmIP device hase been removed.
self.hmip_device_removed = False
_LOGGER.info("Setting up %s (%s)", self.name, self._device.modelType)
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
# Only physical devices should be HA devices.
if isinstance(self._device, AsyncDevice):
return {
"identifiers": {
# Serial numbers of Homematic IP device
(HMIPC_DOMAIN, self._device.id)
},
"name": self._device.label,
"manufacturer": self._device.oem,
"model": self._device.modelType,
"sw_version": self._device.firmwareVersion,
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
return None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
self._hap.hmip_device_by_entity_id[self.entity_id] = self._device
self._device.on_update(self._async_device_changed)
self._device.on_remove(self._async_device_removed)
@callback
def _async_device_changed(self, *args, **kwargs) -> None:
"""Handle device state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s (%s)", self.name, self._device.modelType)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Device Changed Event for %s (%s) not fired. Entity is disabled",
self.name,
self._device.modelType,
)
async def async_will_remove_from_hass(self) -> None:
"""Run when hmip device will be removed from hass."""
# Only go further if the device/entity should be removed from registries
# due to a removal of the HmIP device.
if self.hmip_device_removed:
try:
del self._hap.hmip_device_by_entity_id[self.entity_id]
await self.async_remove_from_registries()
except KeyError as err:
_LOGGER.debug("Error removing HMIP device from registry: %s", err)
async def async_remove_from_registries(self) -> None:
"""Remove entity/device from registry."""
# Remove callback from device.
self._device.remove_callback(self._async_device_changed)
self._device.remove_callback(self._async_device_removed)
if not self.registry_entry:
return
device_id = self.registry_entry.device_id
if device_id:
# Remove from device registry.
device_registry = await dr.async_get_registry(self.hass)
if device_id in device_registry.devices:
# This will also remove associated entities from entity registry.
device_registry.async_remove_device(device_id)
else:
# Remove from entity registry.
# Only relevant for entities that do not belong to a device.
entity_id = self.registry_entry.entity_id
if entity_id:
entity_registry = await er.async_get_registry(self.hass)
if entity_id in entity_registry.entities:
entity_registry.async_remove(entity_id)
@callback
def _async_device_removed(self, *args, **kwargs) -> None:
"""Handle hmip device removal."""
# Set marker showing that the HmIP device hase been removed.
self.hmip_device_removed = True
self.hass.async_create_task(self.async_remove())
@property
def name(self) -> str:
"""Return the name of the generic entity."""
name = self._device.label
if name and self._home.name:
name = f"{self._home.name} {name}"
if name and self.post:
name = f"{name} {self.post}"
return name
def _get_label_by_channel(self, channel: int) -> str:
"""Return the name of the channel."""
name = self._device.functionalChannels[channel].label
if name and self._home.name:
name = f"{self._home.name} {name}"
return name
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def available(self) -> bool:
"""Return if entity is available."""
return not self._device.unreach
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.__class__.__name__}_{self._device.id}"
@property
def icon(self) -> Optional[str]:
"""Return the icon."""
for attr, icon in DEVICE_ATTRIBUTE_ICONS.items():
if getattr(self._device, attr, None):
return icon
return None
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the generic entity."""
state_attr = {}
if isinstance(self._device, AsyncDevice):
for attr, attr_key in DEVICE_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
state_attr[ATTR_IS_GROUP] = False
if isinstance(self._device, AsyncGroup):
for attr, attr_key in GROUP_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
state_attr[ATTR_IS_GROUP] = True
return state_attr
| apache-2.0 |
kinnou02/navitia | source/jormungandr/jormungandr/autocomplete/geocodejson.py | 1 | 13686 | # coding=utf-8
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
import jormungandr
from jormungandr.autocomplete.abstract_autocomplete import (
AbstractAutocomplete,
AutocompleteUnavailable,
AutocompleteError,
)
from jormungandr.utils import get_lon_lat as get_lon_lat_from_id, get_house_number
import requests
import pybreaker
from jormungandr import app
from jormungandr.exceptions import UnknownObject
def create_admin_field(geocoding):
"""
This field is needed to respect the geocodejson-spec
https://github.com/geocoders/geocodejson-spec/tree/master/draft#feature-object
"""
if not geocoding:
return None
admin_list = geocoding.get('admin', {})
response = []
for level, name in admin_list.items():
response.append(
{
"insee": None,
"name": name,
"level": int(level.replace('level', '')),
"coord": {"lat": None, "lon": None},
"label": None,
"id": None,
"zip_code": None,
}
)
return response
def format_zip_code(zip_codes):
if all(zip_code == "" for zip_code in zip_codes):
return None
elif len(zip_codes) == 1:
return zip_codes[0]
else:
return '{}-{}'.format(min(zip_codes), max(zip_codes))
def create_administrative_regions_field(geocoding):
if not geocoding:
return None
administrative_regions = geocoding.get('administrative_regions', {})
response = []
for admin in administrative_regions:
coord = admin.get('coord', {})
lat = str(coord.get('lat')) if coord and coord.get('lat') else None
lon = str(coord.get('lon')) if coord and coord.get('lon') else None
zip_codes = admin.get('zip_codes', [])
response.append(
{
"insee": admin.get('insee'),
"name": admin.get('name'),
"level": int(admin.get('level')) if admin.get('level') else None,
"coord": {"lat": lat, "lon": lon},
"label": admin.get('label'),
"id": admin.get('id'),
"zip_code": format_zip_code(zip_codes),
}
)
return response
def create_modes_field(modes):
if not modes:
return []
return [{"id": mode.get('id'), "name": mode.get('name')} for mode in modes]
def create_comments_field(modes):
if not modes:
return []
# To be compatible, type = 'standard'
return [{"type": 'standard', "value": mode.get('name')} for mode in modes]
def create_codes_field(codes):
if not codes:
return []
# The code type value 'navitia1' replaced by 'external_code'
for code in codes:
if code.get('name') == 'navitia1':
code['name'] = 'external_code'
return [{"type": code.get('name'), "value": code.get('value')} for code in codes]
def get_lon_lat(obj):
if not obj or not obj.get('geometry') or not obj.get('geometry').get('coordinates'):
return None, None
coordinates = obj.get('geometry', {}).get('coordinates', [])
if len(coordinates) == 2:
lon = str(coordinates[0])
lat = str(coordinates[1])
else:
lon = None
lat = None
return lon, lat
def create_address_field(geocoding, poi_lat=None, poi_lon=None):
if not geocoding:
return None
coord = geocoding.get('coord', {})
lat = str(coord.get('lat')) if coord and coord.get('lat') else poi_lat
lon = str(coord.get('lon')) if coord and coord.get('lon') else poi_lon
address_id = '{lon};{lat}'.format(lon=lon, lat=lat)
resp = {
"id": address_id,
"label": geocoding.get('label'),
"name": geocoding.get('name'),
"coord": {"lat": lat, "lon": lon},
"house_number": get_house_number(geocoding.get('housenumber')),
}
admins = create_administrative_regions_field(geocoding) or create_admin_field(geocoding)
if admins:
resp['administrative_regions'] = admins
return resp
class GeocodeJson(AbstractAutocomplete):
"""
Autocomplete with an external service returning geocodejson
(https://github.com/geocoders/geocodejson-spec/)
"""
# the geocodejson types
TYPE_STOP_AREA = "public_transport:stop_area"
TYPE_CITY = "city"
TYPE_POI = "poi"
TYPE_HOUSE = "house"
TYPE_STREET = "street"
TYPE_LIST = [TYPE_STOP_AREA, TYPE_CITY, TYPE_POI, TYPE_HOUSE, TYPE_STREET]
def __init__(self, **kwargs):
self.host = kwargs.get('host')
self.timeout = kwargs.get('timeout', 2) # used for slow call, like geocoding
# used for fast call like reverse geocoding and features
self.fast_timeout = kwargs.get('fast_timeout', 0.2)
self.breaker = pybreaker.CircuitBreaker(
fail_max=app.config['CIRCUIT_BREAKER_MAX_BRAGI_FAIL'],
reset_timeout=app.config['CIRCUIT_BREAKER_BRAGI_TIMEOUT_S'],
)
# create a session to allow connection pooling via keep alive
if kwargs.get('disable_keepalive', False):
self.session = requests
else:
self.session = requests.Session()
def call_bragi(self, url, method, **kwargs):
try:
return self.breaker.call(method, url, **kwargs)
except pybreaker.CircuitBreakerError as e:
logging.getLogger(__name__).error('external autocomplete service dead (error: {})'.format(e))
raise GeocodeJsonUnavailable('circuit breaker open')
except requests.Timeout:
logging.getLogger(__name__).error('autocomplete request timeout')
raise GeocodeJsonUnavailable('external autocomplete service timeout')
except:
logging.getLogger(__name__).exception('error in autocomplete request')
raise GeocodeJsonUnavailable('impossible to access external autocomplete service')
@classmethod
def _check_response(cls, response, uri):
if response is None:
raise GeocodeJsonError('impossible to access autocomplete service')
if response.status_code == 404:
raise UnknownObject(uri)
if response.status_code == 503:
raise GeocodeJsonUnavailable('geocodejson responded with 503')
if response.status_code != 200:
error_msg = 'Autocomplete request failed with HTTP code {}'.format(response.status_code)
if response.text:
error_msg += ' ({})'.format(response.text)
raise GeocodeJsonError(error_msg)
@classmethod
def _clean_response(cls, response, depth=1):
def is_deleteable(_key, _value, _depth):
if _depth > -1:
return False
else:
if _key == 'administrative_regions':
return True
elif isinstance(_value, dict) and _value.get('type') in cls.TYPE_LIST:
return True
else:
return False
def _clear_object(obj):
if isinstance(obj, list):
del obj[:]
elif isinstance(obj, dict):
obj.clear()
def _manage_depth(_key, _value, _depth):
if is_deleteable(_key, _value, _depth):
_clear_object(_value)
elif isinstance(_value, dict):
for k, v in _value.items():
_manage_depth(k, v, _depth - 1)
features = response.get('features')
if features:
for feature in features:
key = 'geocoding'
value = feature.get('properties', {}).get('geocoding')
if not value:
continue
_manage_depth(key, value, depth)
return response
@classmethod
def response_marshaler(cls, response_bragi, uri=None, depth=1):
cls._check_response(response_bragi, uri)
try:
json_response = response_bragi.json()
except ValueError:
logging.getLogger(__name__).error(
"impossible to get json for response %s with body: %s",
response_bragi.status_code,
response_bragi.text,
)
raise
# Clean dict objects depending on depth passed in request parameter.
json_response = cls._clean_response(json_response, depth)
from jormungandr.interfaces.v1.serializer.geocode_json import GeocodePlacesSerializer
return GeocodePlacesSerializer(json_response).data
def make_url(self, end_point, uri=None):
if end_point not in ['autocomplete', 'features', 'reverse']:
raise GeocodeJsonError('Unknown endpoint')
if not self.host:
raise GeocodeJsonError('global autocomplete not configured')
url = "{host}/{end_point}".format(host=self.host, end_point=end_point)
if uri:
url = '{url}/{uri}'.format(url=url, uri=uri)
return url
def basic_params(self, instances):
'''
These are the parameters common to the three endpoints
'''
if not instances:
return []
params = [('pt_dataset[]', i.name) for i in instances]
params.extend([('poi_dataset[]', i.poi_dataset) for i in instances if i.poi_dataset])
return params
def make_params(self, request, instances, timeout):
'''
These are the parameters used specifically for the autocomplete endpoint.
'''
params = self.basic_params(instances)
params.extend([("q", request["q"]), ("limit", request["count"])])
if request.get("type[]"):
types = []
map_type = {
"administrative_region": [self.TYPE_CITY],
"address": [self.TYPE_STREET, self.TYPE_HOUSE],
"stop_area": [self.TYPE_STOP_AREA],
"poi": [self.TYPE_POI],
}
for type in request.get("type[]"):
if type == 'stop_point':
logging.getLogger(__name__).debug('stop_point is not handled by bragi')
continue
for t in map_type[type]:
params.append(("type[]", t))
if request.get("from"):
lon, lat = self.get_coords(request["from"])
params.extend([('lon', lon), ('lat', lat)])
if timeout:
# bragi timeout is in ms
params.append(("timeout", int(timeout * 1000)))
return params
def get(self, request, instances):
params = self.make_params(request, instances, self.timeout)
shape = request.get('shape', None)
url = self.make_url('autocomplete')
kwargs = {"params": params, "timeout": self.timeout}
method = self.session.get
if shape:
kwargs["json"] = {"shape": shape}
method = self.session.post
logging.getLogger(__name__).debug("call bragi with parameters %s", kwargs)
raw_response = self.call_bragi(url, method, **kwargs)
depth = request.get('depth', 1)
return self.response_marshaler(raw_response, None, depth)
def geo_status(self, instance):
raise NotImplementedError
@staticmethod
def get_coords(param):
"""
Get coordinates (longitude, latitude).
For moment we consider that the param can only be a coordinate.
"""
return param.split(";")
def get_by_uri(self, uri, instances=None, current_datetime=None):
params = self.basic_params(instances)
lon, lat = get_lon_lat_from_id(uri)
if lon is not None and lat is not None:
url = self.make_url('reverse')
params.extend([('lon', lon), ('lat', lat)])
else:
url = self.make_url('features', uri)
params.append(("timeout", int(self.fast_timeout * 1000)))
raw_response = self.call_bragi(url, self.session.get, timeout=self.fast_timeout, params=params)
return self.response_marshaler(raw_response, uri)
def status(self):
return {'class': self.__class__.__name__, 'timeout': self.timeout, 'fast_timeout': self.fast_timeout}
def is_handling_stop_points(self):
return False
class GeocodeJsonError(AutocompleteError):
pass
class GeocodeJsonUnavailable(AutocompleteUnavailable):
pass
| agpl-3.0 |
lukka/imguploader | tests/tests.py | 1 | 7106 | import sys
import os
'''Get the directory where this script is'''
def getScriptDirectory():
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
''' Add to the sys.path the path to the Imgur Python module directory. '''
sys.path.append(os.path.join(getScriptDirectory(), "../imgur-python"))
import imgurpython
import unittest
from unittest.mock import MagicMock, Mock, mock_open, patch
import imguploader
import traceback
from importlib import import_module
from PIL import Image
# Used to mock the Image.open() behavior
def raiseIfNotImageTypeFile(a):
#Succeed for a filename that ends with jpg or png, fail for everything else raising an exception.
# It resembles remotely the behavior of Image.open().
if (not (a.endswith("jpg") or a.endswith("png"))):
raise Exception()
class TestSuite_ImgUploader(unittest.TestCase):
def test_getConsoleLevel(self):
assert(imguploader.getConsoleLevel(None) == imguploader.CONSOLE_DEFAULT_LEVEL)
assert(imguploader.getConsoleLevel("ciccio") == imguploader.CONSOLE_DEFAULT_LEVEL)
assert(imguploader.getConsoleLevel("") == imguploader.CONSOLE_DEFAULT_LEVEL)
assert(imguploader.getConsoleLevel("11") == 11)
assert(imguploader.getConsoleLevel(0) == 0)
assert(imguploader.getConsoleLevel(4000000) == 4000000)
assert(imguploader.getConsoleLevel(4000) == 4000)
assert(imguploader.getConsoleLevel(400) == 400)
assert(imguploader.getConsoleLevel(40) == 40)
def test_UploadedImage(self):
fileName = "fileName"
full = "full"
thumb = "thumb"
assert(imguploader.UploadedImage(fileName, full, thumb).getImageFileName() == fileName)
assert(imguploader.UploadedImage(fileName, full, thumb).getURLFullImage() == full)
assert(imguploader.UploadedImage(fileName, full, thumb).getURLThumbImage() == thumb)
@patch("imguploader.open", create=True)
@patch("fcntl.flock", return_value=None)
def test_UploadedImagesTracker(self, pMockForOpen, pMockForFlock):
#Test assertion raised when flock fails.
mocked_open = mock_open()
with patch("imguploader.open", mocked_open, create=True):
with patch("fcntl.flock", MagicMock(side_effect=IOError)):
self.assertRaises(imguploader.UploadedImagesTrackerLockAcquiringFailed, imguploader.UploadedImagesTracker, "directory")
#Test assertion raised when the activity log file is corrupted.
mocked_open = mock_open(read_data="noooooooooooo")
with patch("imguploader.open", mocked_open, create=True):
with patch("fcntl.flock", MagicMock(return_value=None)):
self.assertRaises(imguploader.UploadedImagesTrackerException, imguploader.UploadedImagesTracker, "directory")
#Test proper construction of the UploadedImagesTracker.getImageList() returned list.
mocked_open = mock_open(read_data="fileName<URLfull<URLthumb")
with patch("imguploader.open", mocked_open, create=True):
with patch("fcntl.flock", MagicMock(return_value=None)):
entry = imguploader.UploadedImagesTracker("directory")
assert(entry.getImageList()[0].getImageFileName() == "fileName")
assert(entry.isImageAlreadyUploaded("fileName") == True)
#Test for proper call to close() on the activity log file.
lOpenMocked = mock_open(read_data="")
with patch("imguploader.open", lOpenMocked, create=True):
with imguploader.UploadedImagesTracker("directory"):
pass
lOpenMocked().close.assert_called_once_with()
#Test for UploadedImagesTracker.addUploadedImage()
uploadedImagesTracker = imguploader.UploadedImagesTracker("directory")
uploadedImagesTracker.addUploadedImage("imageFileName", "FullURL", "ThumbURL")
assert(uploadedImagesTracker._uploadedImages[0].getImageFileName() == "imageFileName")
assert(uploadedImagesTracker._uploadedImages[0].getURLFullImage() == "FullURL")
assert(uploadedImagesTracker._uploadedImages[0].getURLThumbImage() == "ThumbURL")
@patch("imguploader.open", create=True)
@patch("fcntl.flock", return_value=None)
def test_ImageUploader(self, pMockForOpen, pMockForFlock):
print("test_ImageUploader()<<")
#Test for exception raised when no config file is found.
imguploader.ImageUploader._CFG_CONFIG_FILE_NAME = "this_file_cannot_exists_right_huh"
self.assertRaises(imguploader.ImageUploaderException, imguploader.ImageUploader, ".")
#Test for correctness of ImageUploader.getImagesList()
# Simulate the presence of 'first.jpg' and 'second.png' along a bunch of non-image files.
with patch('os.listdir', MagicMock(return_value = ['first.jpg', 'second.png', 'info.txt', 'error.log',
"amiga.iff", "core.dump", "armour.bld", "third.png"])) as lListDirMock:
with patch('os.path.isfile', MagicMock(return_value=True)) as lIsFileMock:
with patch('PIL.Image.open', side_effect=raiseIfNotImageTypeFile) as lImageOpenMock:
lImageList = imguploader.ImageUploader.getImagesList('fasfsas')
self.assertEqual(lImageList, ["first.jpg", "second.png", 'third.png'])
# Test for 'connection aborted' casted by
# response = method_to_call(url, headers=header, data=data)
# in file imguploader/imgur-python/imgurpython/client.py", line 124, in make_request
# The imguploader.ImageUploader class must catch it and cast an appropriate imguploader.ImageUploaderException.
print("connection aborted casted test:<<")
imguploader.ImageUploader._parseValidateConfigurationFile = MagicMock(return_value = True)
imguploader.ImageUploader._renameExistingFile = MagicMock()
with patch('imgurpython.ImgurClient', autospec=True) as lImgurClientMock:
lImageMock = MagicMock()
lImageMock._getexif.return_value = None
with patch('PIL.Image.open', return_value=lImageMock) as lImageOpenMock:
lImgurClientMock.upload_from_path = MagicMock(side_effect=Exception)
lImgUp = imguploader.ImageUploader("/fake/path", 1)
imguploader.ImageUploader.getImagesList = MagicMock(return_value=["xxx.jpg"])
lImgUp._generateHTMLFile = MagicMock()
#Set the ImageUploader._backendClass private var member that is not set as _parseValidationConfigurationFile is mocked.
lBackendsModule = import_module("imgbackends")
lImgUp._backendClass = getattr(lBackendsModule, "ImgurBackend")
lImgUp._backendClass.uploadImage = MagicMock(side_effect=Exception())
lImgTracker = MagicMock()
lImgTracker.isImageAlreadyUploaded.return_value = False;
#assert not raises:
lImgUp.uploadImagesAndCreateHTMLGallery(lImgTracker)
print("test_ImageUploader()>>")
| gpl-3.0 |
tesidroni/mp | Lib/_MozillaCookieJar.py | 191 | 5809 | """Mozilla / Netscape cookie loading / saving."""
import re, time
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT)
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.