code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
staying_in: title: Staying in '08 going_out: title: Outdoor Pursuits 2k+8
unknown
github
https://github.com/rails/rails
activerecord/test/fixtures/zines.yml
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("unspecified_app_with_conflict", "0001_initial")] operations = [ migrations.CreateModel( "Something", [ ("id", models.AutoField(primary_key=True)), ], ) ]
python
github
https://github.com/django/django
tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/0002_conflicting_second.py
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.protocols.tls}. """ from __future__ import division, absolute_import from zope.interface.verify import verifyObject from zope.interface import Interface, directlyProvides from twisted.python.compat import intToBytes, iterbytes try: from twisted.protocols.tls import TLSMemoryBIOProtocol, TLSMemoryBIOFactory from twisted.protocols.tls import _PullToPush, _ProducerMembrane except ImportError: # Skip the whole test module if it can't be imported. skip = "pyOpenSSL 0.10 or newer required for twisted.protocol.tls" else: # Otherwise, the pyOpenSSL dependency must be satisfied, so all these # imports will work. from OpenSSL.crypto import X509Type from OpenSSL.SSL import (TLSv1_METHOD, Error, Context, ConnectionType, WantReadError) from twisted.internet.ssl import PrivateCertificate from twisted.test.ssl_helpers import (ClientTLSContext, ServerTLSContext, certPath) from twisted.python.filepath import FilePath from twisted.python.failure import Failure from twisted.python import log from twisted.internet.interfaces import ISystemHandle, ISSLTransport from twisted.internet.interfaces import IPushProducer from twisted.internet.error import ConnectionDone, ConnectionLost from twisted.internet.defer import Deferred, gatherResults from twisted.internet.protocol import Protocol, ClientFactory, ServerFactory from twisted.internet.task import TaskStopped from twisted.protocols.loopback import loopbackAsync, collapsingPumpPolicy from twisted.trial.unittest import TestCase from twisted.test.test_tcp import ConnectionLostNotifyingProtocol from twisted.test.proto_helpers import StringTransport class HandshakeCallbackContextFactory: """ L{HandshakeCallbackContextFactory} is a factory for SSL contexts which allows applications to get notification when the SSL handshake completes. @ivar _finished: A L{Deferred} which will be called back when the handshake is done. """ # pyOpenSSL needs to expose this. # https://bugs.launchpad.net/pyopenssl/+bug/372832 SSL_CB_HANDSHAKE_DONE = 0x20 def __init__(self): self._finished = Deferred() def factoryAndDeferred(cls): """ Create a new L{HandshakeCallbackContextFactory} and return a two-tuple of it and a L{Deferred} which will fire when a connection created with it completes a TLS handshake. """ contextFactory = cls() return contextFactory, contextFactory._finished factoryAndDeferred = classmethod(factoryAndDeferred) def _info(self, connection, where, ret): """ This is the "info callback" on the context. It will be called periodically by pyOpenSSL with information about the state of a connection. When it indicates the handshake is complete, it will fire C{self._finished}. """ if where & self.SSL_CB_HANDSHAKE_DONE: self._finished.callback(None) def getContext(self): """ Create and return an SSL context configured to use L{self._info} as the info callback. """ context = Context(TLSv1_METHOD) context.set_info_callback(self._info) return context class AccumulatingProtocol(Protocol): """ A protocol which collects the bytes it receives and closes its connection after receiving a certain minimum of data. @ivar howMany: The number of bytes of data to wait for before closing the connection. @ivar receiving: A C{list} of C{str} of the bytes received so far. """ def __init__(self, howMany): self.howMany = howMany def connectionMade(self): self.received = [] def dataReceived(self, bytes): self.received.append(bytes) if sum(map(len, self.received)) >= self.howMany: self.transport.loseConnection() def connectionLost(self, reason): if not reason.check(ConnectionDone): log.err(reason) def buildTLSProtocol(server=False, transport=None): """ Create a protocol hooked up to a TLS transport hooked up to a StringTransport. """ # We want to accumulate bytes without disconnecting, so set high limit: clientProtocol = AccumulatingProtocol(999999999999) clientFactory = ClientFactory() clientFactory.protocol = lambda: clientProtocol if server: contextFactory = ServerTLSContext() else: contextFactory = ClientTLSContext() wrapperFactory = TLSMemoryBIOFactory( contextFactory, not server, clientFactory) sslProtocol = wrapperFactory.buildProtocol(None) if transport is None: transport = StringTransport() sslProtocol.makeConnection(transport) return clientProtocol, sslProtocol class TLSMemoryBIOFactoryTests(TestCase): """ Ensure TLSMemoryBIOFactory logging acts correctly. """ def test_quiet(self): """ L{TLSMemoryBIOFactory.doStart} and L{TLSMemoryBIOFactory.doStop} do not log any messages. """ contextFactory = ServerTLSContext() logs = [] logger = logs.append log.addObserver(logger) self.addCleanup(log.removeObserver, logger) wrappedFactory = ServerFactory() # Disable logging on the wrapped factory: wrappedFactory.doStart = lambda: None wrappedFactory.doStop = lambda: None factory = TLSMemoryBIOFactory(contextFactory, False, wrappedFactory) factory.doStart() factory.doStop() self.assertEqual(logs, []) def test_logPrefix(self): """ L{TLSMemoryBIOFactory.logPrefix} amends the wrapped factory's log prefix with a short string (C{"TLS"}) indicating the wrapping, rather than its full class name. """ contextFactory = ServerTLSContext() factory = TLSMemoryBIOFactory(contextFactory, False, ServerFactory()) self.assertEqual("ServerFactory (TLS)", factory.logPrefix()) def test_logPrefixFallback(self): """ If the wrapped factory does not provide L{ILoggingContext}, L{TLSMemoryBIOFactory.logPrefix} uses the wrapped factory's class name. """ class NoFactory(object): pass contextFactory = ServerTLSContext() factory = TLSMemoryBIOFactory(contextFactory, False, NoFactory()) self.assertEqual("NoFactory (TLS)", factory.logPrefix()) class TLSMemoryBIOTests(TestCase): """ Tests for the implementation of L{ISSLTransport} which runs over another L{ITransport}. """ def test_interfaces(self): """ L{TLSMemoryBIOProtocol} instances provide L{ISSLTransport} and L{ISystemHandle}. """ proto = TLSMemoryBIOProtocol(None, None) self.assertTrue(ISSLTransport.providedBy(proto)) self.assertTrue(ISystemHandle.providedBy(proto)) def test_wrappedProtocolInterfaces(self): """ L{TLSMemoryBIOProtocol} instances provide the interfaces provided by the transport they wrap. """ class ITransport(Interface): pass class MyTransport(object): def write(self, bytes): pass clientFactory = ClientFactory() contextFactory = ClientTLSContext() wrapperFactory = TLSMemoryBIOFactory( contextFactory, True, clientFactory) transport = MyTransport() directlyProvides(transport, ITransport) tlsProtocol = TLSMemoryBIOProtocol(wrapperFactory, Protocol()) tlsProtocol.makeConnection(transport) self.assertTrue(ITransport.providedBy(tlsProtocol)) def test_getHandle(self): """ L{TLSMemoryBIOProtocol.getHandle} returns the L{OpenSSL.SSL.Connection} instance it uses to actually implement TLS. This may seem odd. In fact, it is. The L{OpenSSL.SSL.Connection} is not actually the "system handle" here, nor even an object the reactor knows about directly. However, L{twisted.internet.ssl.Certificate}'s C{peerFromTransport} and C{hostFromTransport} methods depend on being able to get an L{OpenSSL.SSL.Connection} object in order to work properly. Implementing L{ISystemHandle.getHandle} like this is the easiest way for those APIs to be made to work. If they are changed, then it may make sense to get rid of this implementation of L{ISystemHandle} and return the underlying socket instead. """ factory = ClientFactory() contextFactory = ClientTLSContext() wrapperFactory = TLSMemoryBIOFactory(contextFactory, True, factory) proto = TLSMemoryBIOProtocol(wrapperFactory, Protocol()) transport = StringTransport() proto.makeConnection(transport) self.assertIsInstance(proto.getHandle(), ConnectionType) def test_makeConnection(self): """ When L{TLSMemoryBIOProtocol} is connected to a transport, it connects the protocol it wraps to a transport. """ clientProtocol = Protocol() clientFactory = ClientFactory() clientFactory.protocol = lambda: clientProtocol contextFactory = ClientTLSContext() wrapperFactory = TLSMemoryBIOFactory( contextFactory, True, clientFactory) sslProtocol = wrapperFactory.buildProtocol(None) transport = StringTransport() sslProtocol.makeConnection(transport) self.assertNotIdentical(clientProtocol.transport, None) self.assertNotIdentical(clientProtocol.transport, transport) self.assertIdentical(clientProtocol.transport, sslProtocol) def handshakeProtocols(self): """ Start handshake between TLS client and server. """ clientFactory = ClientFactory() clientFactory.protocol = Protocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverFactory = ServerFactory() serverFactory.protocol = Protocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) return (sslClientProtocol, sslServerProtocol, handshakeDeferred, connectionDeferred) def test_handshake(self): """ The TLS handshake is performed when L{TLSMemoryBIOProtocol} is connected to a transport. """ tlsClient, tlsServer, handshakeDeferred, _ = self.handshakeProtocols() # Only wait for the handshake to complete. Anything after that isn't # important here. return handshakeDeferred def test_handshakeFailure(self): """ L{TLSMemoryBIOProtocol} reports errors in the handshake process to the application-level protocol object using its C{connectionLost} method and disconnects the underlying transport. """ clientConnectionLost = Deferred() clientFactory = ClientFactory() clientFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( clientConnectionLost)) clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverConnectionLost = Deferred() serverFactory = ServerFactory() serverFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( serverConnectionLost)) # This context factory rejects any clients which do not present a # certificate. certificateData = FilePath(certPath).getContent() certificate = PrivateCertificate.loadPEM(certificateData) serverContextFactory = certificate.options(certificate) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) def cbConnectionLost(protocol): # The connection should close on its own in response to the error # induced by the client not supplying the required certificate. # After that, check to make sure the protocol's connectionLost was # called with the right thing. protocol.lostConnectionReason.trap(Error) clientConnectionLost.addCallback(cbConnectionLost) serverConnectionLost.addCallback(cbConnectionLost) # Additionally, the underlying transport should have been told to # go away. return gatherResults([ clientConnectionLost, serverConnectionLost, connectionDeferred]) def test_getPeerCertificate(self): """ L{TLSMemoryBIOProtocol.getPeerCertificate} returns the L{OpenSSL.crypto.X509Type} instance representing the peer's certificate. """ # Set up a client and server so there's a certificate to grab. clientFactory = ClientFactory() clientFactory.protocol = Protocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverFactory = ServerFactory() serverFactory.protocol = Protocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the handshake def cbHandshook(ignored): # Grab the server's certificate and check it out cert = sslClientProtocol.getPeerCertificate() self.assertIsInstance(cert, X509Type) self.assertEqual( cert.digest('sha1'), # openssl x509 -noout -sha1 -fingerprint -in server.pem b'45:DD:FD:E2:BD:BF:8B:D0:00:B7:D2:7A:BB:20:F5:34:05:4B:15:80') handshakeDeferred.addCallback(cbHandshook) return handshakeDeferred def test_writeAfterHandshake(self): """ Bytes written to L{TLSMemoryBIOProtocol} before the handshake is complete are received by the protocol on the other side of the connection once the handshake succeeds. """ bytes = b"some bytes" clientProtocol = Protocol() clientFactory = ClientFactory() clientFactory.protocol = lambda: clientProtocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes)) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the handshake to finish before writing anything. def cbHandshook(ignored): clientProtocol.transport.write(bytes) # The server will drop the connection once it gets the bytes. return connectionDeferred handshakeDeferred.addCallback(cbHandshook) # Once the connection is lost, make sure the server received the # expected bytes. def cbDisconnected(ignored): self.assertEqual(b"".join(serverProtocol.received), bytes) handshakeDeferred.addCallback(cbDisconnected) return handshakeDeferred def writeBeforeHandshakeTest(self, sendingProtocol, bytes): """ Run test where client sends data before handshake, given the sending protocol and expected bytes. """ clientFactory = ClientFactory() clientFactory.protocol = sendingProtocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes)) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEqual(b"".join(serverProtocol.received), bytes) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_writeBeforeHandshake(self): """ Bytes written to L{TLSMemoryBIOProtocol} before the handshake is complete are received by the protocol on the other side of the connection once the handshake succeeds. """ bytes = b"some bytes" class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.write(bytes) return self.writeBeforeHandshakeTest(SimpleSendingProtocol, bytes) def test_writeSequence(self): """ Bytes written to L{TLSMemoryBIOProtocol} with C{writeSequence} are received by the protocol on the other side of the connection. """ bytes = b"some bytes" class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.writeSequence(list(iterbytes(bytes))) return self.writeBeforeHandshakeTest(SimpleSendingProtocol, bytes) def test_writeAfterLoseConnection(self): """ Bytes written to L{TLSMemoryBIOProtocol} after C{loseConnection} is called are not transmitted (unless there is a registered producer, which will be tested elsewhere). """ bytes = b"some bytes" class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.write(bytes) self.transport.loseConnection() self.transport.write(b"hello") self.transport.writeSequence([b"world"]) return self.writeBeforeHandshakeTest(SimpleSendingProtocol, bytes) def test_writeUnicodeRaisesTypeError(self): """ Writing C{unicode} to L{TLSMemoryBIOProtocol} throws a C{TypeError}. """ notBytes = u"hello" result = [] class SimpleSendingProtocol(Protocol): def connectionMade(self): try: self.transport.write(notBytes) except TypeError: result.append(True) self.transport.write(b"bytes") self.transport.loseConnection() d = self.writeBeforeHandshakeTest(SimpleSendingProtocol, b"bytes") return d.addCallback(lambda ign: self.assertEqual(result, [True])) def test_multipleWrites(self): """ If multiple separate TLS messages are received in a single chunk from the underlying transport, all of the application bytes from each message are delivered to the application-level protocol. """ bytes = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i'] class SimpleSendingProtocol(Protocol): def connectionMade(self): for b in bytes: self.transport.write(b) clientFactory = ClientFactory() clientFactory.protocol = SimpleSendingProtocol clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(sum(map(len, bytes))) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol, collapsingPumpPolicy) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEqual(b"".join(serverProtocol.received), b''.join(bytes)) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_hugeWrite(self): """ If a very long string is passed to L{TLSMemoryBIOProtocol.write}, any trailing part of it which cannot be send immediately is buffered and sent later. """ bytes = b"some bytes" factor = 8192 class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.write(bytes * factor) clientFactory = ClientFactory() clientFactory.protocol = SimpleSendingProtocol clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes) * factor) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEqual(b"".join(serverProtocol.received), bytes * factor) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_disorderlyShutdown(self): """ If a L{TLSMemoryBIOProtocol} loses its connection unexpectedly, this is reported to the application. """ clientConnectionLost = Deferred() clientFactory = ClientFactory() clientFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( clientConnectionLost)) clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) # Client speaks first, so the server can be dumb. serverProtocol = Protocol() loopbackAsync(serverProtocol, sslClientProtocol) # Now destroy the connection. serverProtocol.transport.loseConnection() # And when the connection completely dies, check the reason. def cbDisconnected(clientProtocol): clientProtocol.lostConnectionReason.trap(Error) clientConnectionLost.addCallback(cbDisconnected) return clientConnectionLost def test_loseConnectionAfterHandshake(self): """ L{TLSMemoryBIOProtocol.loseConnection} sends a TLS close alert and shuts down the underlying connection cleanly on both sides, after transmitting all buffered data. """ class NotifyingProtocol(ConnectionLostNotifyingProtocol): def __init__(self, onConnectionLost): ConnectionLostNotifyingProtocol.__init__(self, onConnectionLost) self.data = [] def dataReceived(self, bytes): self.data.append(bytes) clientConnectionLost = Deferred() clientFactory = ClientFactory() clientProtocol = NotifyingProtocol(clientConnectionLost) clientFactory.protocol = lambda: clientProtocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverConnectionLost = Deferred() serverProtocol = NotifyingProtocol(serverConnectionLost) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = ServerTLSContext() wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) loopbackAsync(sslServerProtocol, sslClientProtocol) chunkOfBytes = b"123456890" * 100000 # Wait for the handshake before dropping the connection. def cbHandshake(ignored): # Write more than a single bio_read, to ensure client will still # have some data it needs to write when it receives the TLS close # alert, and that simply doing a single bio_read won't be # sufficient. Thus we will verify that any amount of buffered data # will be written out before the connection is closed, rather than # just small amounts that can be returned in a single bio_read: clientProtocol.transport.write(chunkOfBytes) serverProtocol.transport.loseConnection() # Now wait for the client and server to notice. return gatherResults([clientConnectionLost, serverConnectionLost]) handshakeDeferred.addCallback(cbHandshake) # Wait for the connection to end, then make sure the client and server # weren't notified of a handshake failure that would cause the test to # fail. def cbConnectionDone(result): (clientProtocol, serverProtocol) = result clientProtocol.lostConnectionReason.trap(ConnectionDone) serverProtocol.lostConnectionReason.trap(ConnectionDone) # The server should have received all bytes sent by the client: self.assertEqual(b"".join(serverProtocol.data), chunkOfBytes) # The server should have closed its underlying transport, in # addition to whatever it did to shut down the TLS layer. self.assertTrue(serverProtocol.transport.q.disconnect) # The client should also have closed its underlying transport once # it saw the server shut down the TLS layer, so as to avoid relying # on the server to close the underlying connection. self.assertTrue(clientProtocol.transport.q.disconnect) handshakeDeferred.addCallback(cbConnectionDone) return handshakeDeferred def test_connectionLostOnlyAfterUnderlyingCloses(self): """ The user protocol's connectionLost is only called when transport underlying TLS is disconnected. """ class LostProtocol(Protocol): disconnected = None def connectionLost(self, reason): self.disconnected = reason wrapperFactory = TLSMemoryBIOFactory(ClientTLSContext(), True, ClientFactory()) protocol = LostProtocol() tlsProtocol = TLSMemoryBIOProtocol(wrapperFactory, protocol) transport = StringTransport() tlsProtocol.makeConnection(transport) # Pretend TLS shutdown finished cleanly; the underlying transport # should be told to close, but the user protocol should not yet be # notified: tlsProtocol._tlsShutdownFinished(None) self.assertEqual(transport.disconnecting, True) self.assertEqual(protocol.disconnected, None) # Now close the underlying connection; the user protocol should be # notified with the given reason (since TLS closed cleanly): tlsProtocol.connectionLost(Failure(ConnectionLost("ono"))) self.assertTrue(protocol.disconnected.check(ConnectionLost)) self.assertEqual(protocol.disconnected.value.args, ("ono",)) def test_loseConnectionTwice(self): """ If TLSMemoryBIOProtocol.loseConnection is called multiple times, all but the first call have no effect. """ wrapperFactory = TLSMemoryBIOFactory(ClientTLSContext(), True, ClientFactory()) tlsProtocol = TLSMemoryBIOProtocol(wrapperFactory, Protocol()) transport = StringTransport() tlsProtocol.makeConnection(transport) self.assertEqual(tlsProtocol.disconnecting, False) # Make sure loseConnection calls _shutdownTLS the first time (mostly # to make sure we've overriding it correctly): calls = [] def _shutdownTLS(shutdown=tlsProtocol._shutdownTLS): calls.append(1) return shutdown() tlsProtocol._shutdownTLS = _shutdownTLS tlsProtocol.loseConnection() self.assertEqual(tlsProtocol.disconnecting, True) self.assertEqual(calls, [1]) # Make sure _shutdownTLS isn't called a second time: tlsProtocol.loseConnection() self.assertEqual(calls, [1]) def test_unexpectedEOF(self): """ Unexpected disconnects get converted to ConnectionLost errors. """ tlsClient, tlsServer, handshakeDeferred, disconnectDeferred = ( self.handshakeProtocols()) serverProtocol = tlsServer.wrappedProtocol data = [] reason = [] serverProtocol.dataReceived = data.append serverProtocol.connectionLost = reason.append # Write data, then disconnect *underlying* transport, resulting in an # unexpected TLS disconnect: def handshakeDone(ign): tlsClient.write(b"hello") tlsClient.transport.loseConnection() handshakeDeferred.addCallback(handshakeDone) # Receiver should be disconnected, with ConnectionLost notification # (masking the Unexpected EOF SSL error): def disconnected(ign): self.assertTrue(reason[0].check(ConnectionLost), reason[0]) disconnectDeferred.addCallback(disconnected) return disconnectDeferred def test_errorWriting(self): """ Errors while writing cause the protocols to be disconnected. """ tlsClient, tlsServer, handshakeDeferred, disconnectDeferred = ( self.handshakeProtocols()) reason = [] tlsClient.wrappedProtocol.connectionLost = reason.append # Pretend TLS connection is unhappy sending: class Wrapper(object): def __init__(self, wrapped): self._wrapped = wrapped def __getattr__(self, attr): return getattr(self._wrapped, attr) def send(self, *args): raise Error("ONO!") tlsClient._tlsConnection = Wrapper(tlsClient._tlsConnection) # Write some data: def handshakeDone(ign): tlsClient.write(b"hello") handshakeDeferred.addCallback(handshakeDone) # Failed writer should be disconnected with SSL error: def disconnected(ign): self.assertTrue(reason[0].check(Error), reason[0]) disconnectDeferred.addCallback(disconnected) return disconnectDeferred class TLSProducerTests(TestCase): """ The TLS transport must support the IConsumer interface. """ def setupStreamingProducer(self, transport=None): class HistoryStringTransport(StringTransport): def __init__(self): StringTransport.__init__(self) self.producerHistory = [] def pauseProducing(self): self.producerHistory.append("pause") StringTransport.pauseProducing(self) def resumeProducing(self): self.producerHistory.append("resume") StringTransport.resumeProducing(self) def stopProducing(self): self.producerHistory.append("stop") StringTransport.stopProducing(self) clientProtocol, tlsProtocol = buildTLSProtocol(transport=transport) producer = HistoryStringTransport() clientProtocol.transport.registerProducer(producer, True) self.assertEqual(tlsProtocol.transport.streaming, True) return clientProtocol, tlsProtocol, producer def flushTwoTLSProtocols(self, tlsProtocol, serverTLSProtocol): """ Transfer bytes back and forth between two TLS protocols. """ # We want to make sure all bytes are passed back and forth; JP # estimated that 3 rounds should be enough: for i in range(3): clientData = tlsProtocol.transport.value() if clientData: serverTLSProtocol.dataReceived(clientData) tlsProtocol.transport.clear() serverData = serverTLSProtocol.transport.value() if serverData: tlsProtocol.dataReceived(serverData) serverTLSProtocol.transport.clear() if not serverData and not clientData: break self.assertEqual(tlsProtocol.transport.value(), b"") self.assertEqual(serverTLSProtocol.transport.value(), b"") def test_streamingProducerPausedInNormalMode(self): """ When the TLS transport is not blocked on reads, it correctly calls pauseProducing on the registered producer. """ _, tlsProtocol, producer = self.setupStreamingProducer() # The TLS protocol's transport pretends to be full, pausing its # producer: tlsProtocol.transport.producer.pauseProducing() self.assertEqual(producer.producerState, 'paused') self.assertEqual(producer.producerHistory, ['pause']) self.assertEqual(tlsProtocol._producer._producerPaused, True) def test_streamingProducerResumedInNormalMode(self): """ When the TLS transport is not blocked on reads, it correctly calls resumeProducing on the registered producer. """ _, tlsProtocol, producer = self.setupStreamingProducer() tlsProtocol.transport.producer.pauseProducing() self.assertEqual(producer.producerHistory, ['pause']) # The TLS protocol's transport pretends to have written everything # out, so it resumes its producer: tlsProtocol.transport.producer.resumeProducing() self.assertEqual(producer.producerState, 'producing') self.assertEqual(producer.producerHistory, ['pause', 'resume']) self.assertEqual(tlsProtocol._producer._producerPaused, False) def test_streamingProducerPausedInWriteBlockedOnReadMode(self): """ When the TLS transport is blocked on reads, it correctly calls pauseProducing on the registered producer. """ clientProtocol, tlsProtocol, producer = self.setupStreamingProducer() # Write to TLS transport. Because we do this before the initial TLS # handshake is finished, writing bytes triggers a WantReadError, # indicating that until bytes are read for the handshake, more bytes # cannot be written. Thus writing bytes before the handshake should # cause the producer to be paused: clientProtocol.transport.write(b"hello") self.assertEqual(producer.producerState, 'paused') self.assertEqual(producer.producerHistory, ['pause']) self.assertEqual(tlsProtocol._producer._producerPaused, True) def test_streamingProducerResumedInWriteBlockedOnReadMode(self): """ When the TLS transport is blocked on reads, it correctly calls resumeProducing on the registered producer. """ clientProtocol, tlsProtocol, producer = self.setupStreamingProducer() # Write to TLS transport, triggering WantReadError; this should cause # the producer to be paused. We use a large chunk of data to make sure # large writes don't trigger multiple pauses: clientProtocol.transport.write(b"hello world" * 320000) self.assertEqual(producer.producerHistory, ['pause']) # Now deliver bytes that will fix the WantRead condition; this should # unpause the producer: serverProtocol, serverTLSProtocol = buildTLSProtocol(server=True) self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol) self.assertEqual(producer.producerHistory, ['pause', 'resume']) self.assertEqual(tlsProtocol._producer._producerPaused, False) # Make sure we haven't disconnected for some reason: self.assertEqual(tlsProtocol.transport.disconnecting, False) self.assertEqual(producer.producerState, 'producing') def test_streamingProducerTwice(self): """ Registering a streaming producer twice throws an exception. """ clientProtocol, tlsProtocol, producer = self.setupStreamingProducer() originalProducer = tlsProtocol._producer producer2 = object() self.assertRaises(RuntimeError, clientProtocol.transport.registerProducer, producer2, True) self.assertIdentical(tlsProtocol._producer, originalProducer) def test_streamingProducerUnregister(self): """ Unregistering a streaming producer removes it, reverting to initial state. """ clientProtocol, tlsProtocol, producer = self.setupStreamingProducer() clientProtocol.transport.unregisterProducer() self.assertEqual(tlsProtocol._producer, None) self.assertEqual(tlsProtocol.transport.producer, None) def loseConnectionWithProducer(self, writeBlockedOnRead): """ Common code for tests involving writes by producer after loseConnection is called. """ clientProtocol, tlsProtocol, producer = self.setupStreamingProducer() serverProtocol, serverTLSProtocol = buildTLSProtocol(server=True) if not writeBlockedOnRead: # Do the initial handshake before write: self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol) else: # In this case the write below will trigger write-blocked-on-read # condition... pass # Now write, then lose connection: clientProtocol.transport.write(b"x ") clientProtocol.transport.loseConnection() self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol) # Underlying transport should not have loseConnection called yet, nor # should producer be stopped: self.assertEqual(tlsProtocol.transport.disconnecting, False) self.assertFalse("stop" in producer.producerHistory) # Writes from client to server should continue to go through, since we # haven't unregistered producer yet: clientProtocol.transport.write(b"hello") clientProtocol.transport.writeSequence([b" ", b"world"]) # Unregister producer; this should trigger TLS shutdown: clientProtocol.transport.unregisterProducer() self.assertNotEqual(tlsProtocol.transport.value(), b"") self.assertEqual(tlsProtocol.transport.disconnecting, False) # Additional writes should not go through: clientProtocol.transport.write(b"won't") clientProtocol.transport.writeSequence([b"won't!"]) # Finish TLS close handshake: self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol) self.assertEqual(tlsProtocol.transport.disconnecting, True) # Bytes made it through, as long as they were written before producer # was unregistered: self.assertEqual(b"".join(serverProtocol.received), b"x hello world") def test_streamingProducerLoseConnectionWithProducer(self): """ loseConnection() waits for the producer to unregister itself, then does a clean TLS close alert, then closes the underlying connection. """ return self.loseConnectionWithProducer(False) def test_streamingProducerLoseConnectionWithProducerWBOR(self): """ Even when writes are blocked on reading, loseConnection() waits for the producer to unregister itself, then does a clean TLS close alert, then closes the underlying connection. """ return self.loseConnectionWithProducer(True) def test_streamingProducerBothTransportsDecideToPause(self): """ pauseProducing() events can come from both the TLS transport layer and the underlying transport. In this case, both decide to pause, underlying first. """ class PausingStringTransport(StringTransport): _didPause = False def write(self, data): if not self._didPause and self.producer is not None: self._didPause = True self.producer.pauseProducing() StringTransport.write(self, data) class TLSConnection(object): def __init__(self): self.l = [] def send(self, bytes): # on first write, don't send all bytes: if not self.l: bytes = bytes[:-1] # pause on second write: if len(self.l) == 1: self.l.append("paused") raise WantReadError() # otherwise just take in data: self.l.append(bytes) return len(bytes) def bio_write(self, data): pass def bio_read(self, size): return b'X' def recv(self, size): raise WantReadError() transport = PausingStringTransport() clientProtocol, tlsProtocol, producer = self.setupStreamingProducer( transport) self.assertEqual(producer.producerState, 'producing') # Shove in fake TLSConnection that will raise WantReadError the second # time send() is called. This will allow us to have bytes written to # to the PausingStringTransport, so it will pause the producer. Then, # WantReadError will be thrown, triggering the TLS transport's # producer code path. tlsProtocol._tlsConnection = TLSConnection() clientProtocol.transport.write(b"hello") self.assertEqual(producer.producerState, 'paused') self.assertEqual(producer.producerHistory, ['pause']) # Now, underlying transport resumes, and then we deliver some data to # TLS transport so that it will resume: tlsProtocol.transport.producer.resumeProducing() self.assertEqual(producer.producerState, 'producing') self.assertEqual(producer.producerHistory, ['pause', 'resume']) tlsProtocol.dataReceived(b"hello") self.assertEqual(producer.producerState, 'producing') self.assertEqual(producer.producerHistory, ['pause', 'resume']) def test_streamingProducerStopProducing(self): """ If the underlying transport tells its producer to stopProducing(), this is passed on to the high-level producer. """ _, tlsProtocol, producer = self.setupStreamingProducer() tlsProtocol.transport.producer.stopProducing() self.assertEqual(producer.producerState, 'stopped') def test_nonStreamingProducer(self): """ Non-streaming producers get wrapped as streaming producers. """ clientProtocol, tlsProtocol = buildTLSProtocol() producer = NonStreamingProducer(clientProtocol.transport) # Register non-streaming producer: clientProtocol.transport.registerProducer(producer, False) streamingProducer = tlsProtocol.transport.producer._producer # Verify it was wrapped into streaming producer: self.assertIsInstance(streamingProducer, _PullToPush) self.assertEqual(streamingProducer._producer, producer) self.assertEqual(streamingProducer._consumer, clientProtocol.transport) self.assertEqual(tlsProtocol.transport.streaming, True) # Verify the streaming producer was started, and ran until the end: def done(ignore): # Our own producer is done: self.assertEqual(producer.consumer, None) # The producer has been unregistered: self.assertEqual(tlsProtocol.transport.producer, None) # The streaming producer wrapper knows it's done: self.assertEqual(streamingProducer._finished, True) producer.result.addCallback(done) serverProtocol, serverTLSProtocol = buildTLSProtocol(server=True) self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol) return producer.result def test_interface(self): """ L{_ProducerMembrane} implements L{IPushProducer}. """ producer = StringTransport() membrane = _ProducerMembrane(producer) self.assertTrue(verifyObject(IPushProducer, membrane)) def registerProducerAfterConnectionLost(self, streaming): """ If a producer is registered after the transport has disconnected, the producer is not used, and its stopProducing method is called. """ clientProtocol, tlsProtocol = buildTLSProtocol() clientProtocol.connectionLost = lambda reason: reason.trap(Error) class Producer(object): stopped = False def resumeProducing(self): return 1/0 # this should never be called def stopProducing(self): self.stopped = True # Disconnect the transport: tlsProtocol.connectionLost(Failure(ConnectionDone())) # Register the producer; startProducing should not be called, but # stopProducing will: producer = Producer() tlsProtocol.registerProducer(producer, False) self.assertIdentical(tlsProtocol.transport.producer, None) self.assertEqual(producer.stopped, True) def test_streamingProducerAfterConnectionLost(self): """ If a streaming producer is registered after the transport has disconnected, the producer is not used, and its stopProducing method is called. """ self.registerProducerAfterConnectionLost(True) def test_nonStreamingProducerAfterConnectionLost(self): """ If a non-streaming producer is registered after the transport has disconnected, the producer is not used, and its stopProducing method is called. """ self.registerProducerAfterConnectionLost(False) class NonStreamingProducer(object): """ A pull producer which writes 10 times only. """ counter = 0 stopped = False def __init__(self, consumer): self.consumer = consumer self.result = Deferred() def resumeProducing(self): if self.counter < 10: self.consumer.write(intToBytes(self.counter)) self.counter += 1 if self.counter == 10: self.consumer.unregisterProducer() self._done() else: if self.consumer is None: raise RuntimeError("BUG: resume after unregister/stop.") def pauseProducing(self): raise RuntimeError("BUG: pause should never be called.") def _done(self): self.consumer = None d = self.result del self.result d.callback(None) def stopProducing(self): self.stopped = True self._done() class NonStreamingProducerTests(TestCase): """ Non-streaming producers can be adapted into being streaming producers. """ def streamUntilEnd(self, consumer): """ Verify the consumer writes out all its data, but is not called after that. """ nsProducer = NonStreamingProducer(consumer) streamingProducer = _PullToPush(nsProducer, consumer) consumer.registerProducer(streamingProducer, True) # The producer will call unregisterProducer(), and we need to hook # that up so the streaming wrapper is notified; the # TLSMemoryBIOProtocol will have to do this itself, which is tested # elsewhere: def unregister(orig=consumer.unregisterProducer): orig() streamingProducer.stopStreaming() consumer.unregisterProducer = unregister done = nsProducer.result def doneStreaming(_): # All data was streamed, and the producer unregistered itself: self.assertEqual(consumer.value(), b"0123456789") self.assertEqual(consumer.producer, None) # And the streaming wrapper stopped: self.assertEqual(streamingProducer._finished, True) done.addCallback(doneStreaming) # Now, start streaming: streamingProducer.startStreaming() return done def test_writeUntilDone(self): """ When converted to a streaming producer, the non-streaming producer writes out all its data, but is not called after that. """ consumer = StringTransport() return self.streamUntilEnd(consumer) def test_pause(self): """ When the streaming producer is paused, the underlying producer stops getting resumeProducing calls. """ class PausingStringTransport(StringTransport): writes = 0 def __init__(self): StringTransport.__init__(self) self.paused = Deferred() def write(self, data): self.writes += 1 StringTransport.write(self, data) if self.writes == 3: self.producer.pauseProducing() d = self.paused del self.paused d.callback(None) consumer = PausingStringTransport() nsProducer = NonStreamingProducer(consumer) streamingProducer = _PullToPush(nsProducer, consumer) consumer.registerProducer(streamingProducer, True) # Make sure the consumer does not continue: def shouldNotBeCalled(ignore): self.fail("BUG: The producer should not finish!") nsProducer.result.addCallback(shouldNotBeCalled) done = consumer.paused def paused(ignore): # The CooperatorTask driving the producer was paused: self.assertEqual(streamingProducer._coopTask._pauseCount, 1) done.addCallback(paused) # Now, start streaming: streamingProducer.startStreaming() return done def test_resume(self): """ When the streaming producer is paused and then resumed, the underlying producer starts getting resumeProducing calls again after the resume. The test will never finish (or rather, time out) if the resume producing call is not working. """ class PausingStringTransport(StringTransport): writes = 0 def write(self, data): self.writes += 1 StringTransport.write(self, data) if self.writes == 3: self.producer.pauseProducing() self.producer.resumeProducing() consumer = PausingStringTransport() return self.streamUntilEnd(consumer) def test_stopProducing(self): """ When the streaming producer is stopped by the consumer, the underlying producer is stopped, and streaming is stopped. """ class StoppingStringTransport(StringTransport): writes = 0 def write(self, data): self.writes += 1 StringTransport.write(self, data) if self.writes == 3: self.producer.stopProducing() consumer = StoppingStringTransport() nsProducer = NonStreamingProducer(consumer) streamingProducer = _PullToPush(nsProducer, consumer) consumer.registerProducer(streamingProducer, True) done = nsProducer.result def doneStreaming(_): # Not all data was streamed, and the producer was stopped: self.assertEqual(consumer.value(), b"012") self.assertEqual(nsProducer.stopped, True) # And the streaming wrapper stopped: self.assertEqual(streamingProducer._finished, True) done.addCallback(doneStreaming) # Now, start streaming: streamingProducer.startStreaming() return done def resumeProducingRaises(self, consumer, expectedExceptions): """ Common implementation for tests where the underlying producer throws an exception when its resumeProducing is called. """ class ThrowingProducer(NonStreamingProducer): def resumeProducing(self): if self.counter == 2: return 1/0 else: NonStreamingProducer.resumeProducing(self) nsProducer = ThrowingProducer(consumer) streamingProducer = _PullToPush(nsProducer, consumer) consumer.registerProducer(streamingProducer, True) # Register log observer: loggedMsgs = [] log.addObserver(loggedMsgs.append) self.addCleanup(log.removeObserver, loggedMsgs.append) # Make consumer unregister do what TLSMemoryBIOProtocol would do: def unregister(orig=consumer.unregisterProducer): orig() streamingProducer.stopStreaming() consumer.unregisterProducer = unregister # Start streaming: streamingProducer.startStreaming() done = streamingProducer._coopTask.whenDone() done.addErrback(lambda reason: reason.trap(TaskStopped)) def stopped(ign): self.assertEqual(consumer.value(), b"01") # Any errors from resumeProducing were logged: errors = self.flushLoggedErrors() self.assertEqual(len(errors), len(expectedExceptions)) for f, (expected, msg), logMsg in zip( errors, expectedExceptions, loggedMsgs): self.assertTrue(f.check(expected)) self.assertIn(msg, logMsg['why']) # And the streaming wrapper stopped: self.assertEqual(streamingProducer._finished, True) done.addCallback(stopped) return done def test_resumeProducingRaises(self): """ If the underlying producer raises an exception when resumeProducing is called, the streaming wrapper should log the error, unregister from the consumer and stop streaming. """ consumer = StringTransport() done = self.resumeProducingRaises( consumer, [(ZeroDivisionError, "failed, producing will be stopped")]) def cleanShutdown(ignore): # Producer was unregistered from consumer: self.assertEqual(consumer.producer, None) done.addCallback(cleanShutdown) return done def test_resumeProducingRaiseAndUnregisterProducerRaises(self): """ If the underlying producer raises an exception when resumeProducing is called, the streaming wrapper should log the error, unregister from the consumer and stop streaming even if the unregisterProducer call also raise. """ consumer = StringTransport() def raiser(): raise RuntimeError() consumer.unregisterProducer = raiser return self.resumeProducingRaises( consumer, [(ZeroDivisionError, "failed, producing will be stopped"), (RuntimeError, "failed to unregister producer")]) def test_stopStreamingTwice(self): """ stopStreaming() can be called more than once without blowing up. This is useful for error-handling paths. """ consumer = StringTransport() nsProducer = NonStreamingProducer(consumer) streamingProducer = _PullToPush(nsProducer, consumer) streamingProducer.startStreaming() streamingProducer.stopStreaming() streamingProducer.stopStreaming() self.assertEqual(streamingProducer._finished, True) def test_interface(self): """ L{_PullToPush} implements L{IPushProducer}. """ consumer = StringTransport() nsProducer = NonStreamingProducer(consumer) streamingProducer = _PullToPush(nsProducer, consumer) self.assertTrue(verifyObject(IPushProducer, streamingProducer))
unknown
codeparrot/codeparrot-clean
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [] operations = [ migrations.CreateModel( name="IPAddressField", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ("ip", models.IPAddressField(null=True, blank=True)), ], ), ]
python
github
https://github.com/django/django
tests/migrations/deprecated_field_migrations/0001_initial.py
from __future__ import with_statement import logging import urllib import string import sys import re import traceback import time import util import models import BeautifulSoup def find_checkout_form(soup): return soup.find('form', {'name' : lambda(x):x=='process_form_2' or x=='checkout_form'}) class LoginPage: def __init__(self, http, html, url): self.soup = BeautifulSoup.BeautifulSoup(html) self.http = http self.url = url def isLoginPage(self): return self.soup.find('input', {'name':'code'}) != None def makePost(self, username, password, name): idinput = self.soup.find('input', {'name':'code'}) form = idinput.findPrevious('form') action = None try: action = form['action'] except KeyError: action = self.url params = util.makePostData(form) # Fill the visible fields params['code'] = username if form.find('input', {'name':'pin'}) != None: logging.debug("found pin field") params['pin'] = password if form.find('input', {'name':'name'}) != None: logging.debug("found name field") params['name'] = name # Spring web flow params aren't inside the form since they're submitted by js flow_params = util.makePostData(self.soup, {'type':'hidden', 'name':('lt','_eventId')}) params.update(flow_params) data = util.urlencode(params) return action, data def login(self, username, password, name): if not self.isLoginPage(): raise util.NavStateException("not a login page") action, data = self.makePost(username, password, name) logging.debug("action=%s, data=%s" % (action, data)) html = self.http.post(action, data, {'Content-Type': 'application/x-www-form-urlencoded'}); # check to see if this is a login page newPage = LoginPage(None, html, None) if newPage.isLoginPage(): logging.debug("found another login page") raise util.LoginException("Library login failed") # double check for a login error message util.check_login_error(html) #logging.debug(html) return html class AccountOverviewPage: def __init__(self, http, html): self.soup = BeautifulSoup.BeautifulSoup(html) self.http = http #print self.soup.prettify() def itemsOutUrl(self): link = self.soup.find('a', {'href':re.compile('/items')}) if link == self.soup.Null: return None itemsOutURL = link['href'] itemsOutURL = itemsOutURL.replace('&amp;', '&') return itemsOutURL def clickItemsOut(self): url = self.itemsOutUrl() if url == None: return None return self.http.get(self.itemsOutUrl()) def contains_items_out(self): return find_checkout_form(self.soup) is not None class ItemsOutPage: def __init__(self, http, html, url): self.http = http self.url = url self.soup = BeautifulSoup.BeautifulSoup(html) self.form = find_checkout_form(self.soup) #print self.soup.prettify() self.itemsOut = {} try: self.parse() except Exception, e: logging.error("error parsing items out") if http is not None: http.log_history(logging.error) raise def parse(self): noentries = self.soup.find('tr', {'class':'patFuncNoEntries'}) if noentries != None: self.itemsOut = {} return self.itemsOut rows = self.form.findAll('tr', {'class':'patFuncEntry'}) for row in rows: #ignore comments, etc if not isinstance(row, BeautifulSoup.Tag): continue item = models.Item() # title/author link <a href="/patroninfo~S51/1443596/item&1816471"> The cleaner / Brett Battles. </a> # or sometimes it's not a link: /Making Marines;DVD;;; titletd = row.find('td', {'class' : 'patFuncTitle'}) #print str(titletd) titlelink = titletd.find('a') if titlelink is None: titlestr = titletd.string else: # <a href="...">Time Magazine <span>August 2011</span></a> titlestr = titlelink.text #print titlestr parts = titlestr.split('/') #if there's nothing before the /, remove it. We will assume what came after the / is the title # ie. /Making Marines;DVD;;; if len(parts)>1 and parts[0].strip() == '': del(parts[0]) # for some reason many title links have a superfluous ' /' at the end -- remove this title = parts[0].strip(' /.') title = title.strip() title = util.unescape(title) item.title = util.stripNonAscii(title) #print item.title if len(parts) > 1: author = parts[1] author = author.strip(' .') if author.startswith("by "): author = author.replace("by ", "", 1) # sometimes there is extraneous information after the author's name, ex: Dylan, Bob, 1941- L = author.split(',') author = ','.join(L[0:2]) author = util.unescape(author) item.author = util.stripNonAscii(author) #<td align="left" class="patFuncStatus"> DUE 12-22-08 <em><b> RENEWAL SUCCESSFUL</b><br />Now due 12-30-08</em> <span class="patFuncRenewCount">Renewed 1 time</span> #<td align="left" class="patFuncStatus">DUE 11-23-08</td> #<td align="left" class="patFuncStatus">DUE 11-23-08 +1 HOLD</td> #<td align="left" class="patFuncStatus"> DUE 02-27-09ORPHAN SHELF <span class="patFuncRenewCount">Renewed 2 times</span></td> statustd = row.find('td', {'class' : 'patFuncStatus'}) duedate = None if str(statustd).find("Now due") != -1: em = statustd.em duetext = em.contents[2].strip() duewords = duetext.split() duedate = duewords[2] else: duetext = statustd.contents[0].strip() duewords = duetext.split() duedate = duewords[1] if len(duedate) > 8: duedate = duedate[0:8] #print duedate item.dueDate = util.toDatetime(duedate) # get renewal error if any errors = statustd.find('font', {'color':'red'}) if errors != None and len(errors)==1: error = errors.contents[0].strip() if error.find("RENEWED") != -1: item.renewalError = error # <span class="patFuncRenewCount">Renewed 1 time</span> renewedspan = row.find('span', {'class' : 'patFuncRenewCount'}) if renewedspan != row.Null: timesRenewed = renewedspan.string.strip() timesRenewed = util.unescape(timesRenewed) words = timesRenewed.split(' ') item.timesRenewed = int(words[1]) else: item.timesRenewed = 0 # renew checkbox renewitem = row.find('input', {'name': re.compile('renew')}) item.renewitemkey = None if renewitem != None: item.renewitemkey = renewitem['name'] item.renewitemvalue = renewitem['value'] self.itemsOut[item.title] = item #print item.title + ' ' + str(item.dueDate) def makeRenewalPost(self, titles): params = {} params = util.makePostData(self.form) data = util.urlencode(params) for title in titles: if self.itemsOut.has_key(title) == False: continue renewalParam = {} item = self.itemsOut[title] if item.renewitemkey != None: # not all items have a renewal checkbox, currently our gadget UI doesn't ever suppress it though renewalParam[item.renewitemkey] = item.renewitemvalue data += "&" + util.urlencode(renewalParam) action = None try: action = self.form['action'] except KeyError: action = self.url return action, data def renew(self, titles): action, data = self.makeRenewalPost(titles) html = self.http.post(action, data, headers={'Content-Type': 'application/x-www-form-urlencoded'}) # this will be the list of what's checked out/renewed/etc newItemsOutPage = ItemsOutPage(self.http, html, self.url) self.itemsOut = {} self.itemsOut = newItemsOutPage.itemsOut #print self.itemsOut.values() for currentItem in self.itemsOut.values(): #print currentItem.title if (currentItem.title in titles) & (currentItem.renewalError == None): currentItem.renewed = True else: currentItem.renewed = False return self.itemsOut class LibraryBot: def __init__(self, url, userid, password, name=""): self.http = util.HttpConversation(url) html = None html = self.http.get(url) #print url=' + url + ' content=' + html loginPage = LoginPage(self.http, html, url) logging.debug("name=%s" % (name)) html = loginPage.login(userid, password, name) self.accountOverviewPage = AccountOverviewPage(self.http, html) self.itemsOutPage = None if self.accountOverviewPage.contains_items_out(): self.itemsOutPage = ItemsOutPage(self.http, html, 'account_overview') self.error = None def itemsOut(self): if self.itemsOutPage == None: html = self.accountOverviewPage.clickItemsOut() if html == None: itemsOut = {} return itemsOut self.itemsOutPage = ItemsOutPage(self.http, html, self.accountOverviewPage.itemsOutUrl()) return self.itemsOutPage.itemsOut def renew(self, titles): if self.itemsOutPage == None: html = self.accountOverviewPage.clickItemsOut() if html == None: itemsOut = {} return itemsOut self.itemsOutPage = ItemsOutPage(self.http, html, self.accountOverviewPage.itemsOutUrl()) return self.itemsOutPage.renew(titles) def main(): #bot = LibraryBot('https://sflib1.sfpl.org/patroninfo~S1', '21223024932231', '3023') #bot = LibraryBot('https://www.saclibrarycatalog.org/patroninfo~S51', '23029013264660', '0866') #bot = LibraryBot('http://waldo.library.nashville.org/patroninfo', '25192005235094', '8706') bot = LibraryBot('https://catalog.kcls.org/patroninfo~S1', '0032442113', '6786') itemsOut = bot.itemsOut() if itemsOut == None: print 'itemsout failed: ', bot.error for item in itemsOut.values(): print item.title if __name__ == '__main__': main() ''' for k,v in item.iteritems(): if k.startswith("soup"): print k ,'=', v.prettify() else: print k, '=', v '''
unknown
codeparrot/codeparrot-clean
// Copyright 2023 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package validate import ( "errors" "time" "github.com/anishathalye/porcupine" "github.com/google/go-cmp/cmp" "go.uber.org/zap" "go.etcd.io/etcd/tests/v3/robustness/model" ) var ( errRespNotMatched = errors.New("response didn't match expected") errFutureRevRespRequested = errors.New("request about a future rev with response") ) func validateLinearizableOperationsAndVisualize(lg *zap.Logger, operations []porcupine.Operation, timeout time.Duration) LinearizationResult { lg.Info("Validating linearizable operations", zap.Duration("timeout", timeout)) start := time.Now() check, info := porcupine.CheckOperationsVerbose(model.NonDeterministicModel, operations, timeout) result := LinearizationResult{ Info: info, Model: model.NonDeterministicModel, } switch check { case porcupine.Ok: result.Status = Success lg.Info("Linearization success", zap.Duration("duration", time.Since(start))) case porcupine.Unknown: result.Status = Failure result.Message = "timed out" result.Timeout = true lg.Error("Linearization timed out", zap.Duration("duration", time.Since(start))) case porcupine.Illegal: result.Status = Failure result.Message = "illegal" lg.Error("Linearization illegal", zap.Duration("duration", time.Since(start))) default: result.Status = Failure result.Message = "unknown" } return result } func validateSerializableOperations(lg *zap.Logger, operations []porcupine.Operation, replay *model.EtcdReplay) Result { lg.Info("Validating serializable operations") start := time.Now() err := validateSerializableOperationsError(lg, operations, replay) if err != nil { lg.Error("Serializable validation failed", zap.Duration("duration", time.Since(start)), zap.Error(err)) } lg.Info("Serializable validation success", zap.Duration("duration", time.Since(start))) return ResultFromError(err) } func validateSerializableOperationsError(lg *zap.Logger, operations []porcupine.Operation, replay *model.EtcdReplay) (lastErr error) { for _, read := range operations { request := read.Input.(model.EtcdRequest) response := read.Output.(model.MaybeEtcdResponse) err := validateSerializableRead(lg, replay, request, response) if err != nil { lastErr = err } } return lastErr } func validateSerializableRead(lg *zap.Logger, replay *model.EtcdReplay, request model.EtcdRequest, response model.MaybeEtcdResponse) error { if response.Persisted || response.Error != "" { return nil } state, err := replay.StateForRevision(request.Range.Revision) if err != nil { if response.Error == model.ErrEtcdFutureRev.Error() { return nil } lg.Error("Failed validating serializable operation", zap.Any("request", request), zap.Any("response", response)) return errFutureRevRespRequested } _, expectResp := state.Step(request) if diff := cmp.Diff(response.EtcdResponse.Range, expectResp.Range); diff != "" { lg.Error("Failed validating serializable operation", zap.Any("request", request), zap.String("diff", diff)) return errRespNotMatched } return nil }
go
github
https://github.com/etcd-io/etcd
tests/robustness/validate/operations.go
import itertools import os import json from collections import OrderedDict, defaultdict import sqlparse import dbt.project import dbt.utils import dbt.include import dbt.tracking from dbt.utils import get_materialization, NodeType, is_type from dbt.linker import Linker import dbt.compat import dbt.context.runtime import dbt.contracts.project import dbt.exceptions import dbt.flags import dbt.loader from dbt.contracts.graph.compiled import CompiledNode, CompiledGraph from dbt.clients.system import write_json from dbt.logger import GLOBAL_LOGGER as logger graph_file_name = 'graph.gpickle' manifest_file_name = 'manifest.json' def print_compile_stats(stats): names = { NodeType.Model: 'models', NodeType.Test: 'tests', NodeType.Archive: 'archives', NodeType.Analysis: 'analyses', NodeType.Macro: 'macros', NodeType.Operation: 'operations', NodeType.Seed: 'seed files', } results = {k: 0 for k in names.keys()} results.update(stats) stat_line = ", ".join( ["{} {}".format(ct, names.get(t)) for t, ct in results.items()]) logger.info("Found {}".format(stat_line)) def _add_prepended_cte(prepended_ctes, new_cte): for dct in prepended_ctes: if dct['id'] == new_cte['id']: dct['sql'] = new_cte['sql'] return prepended_ctes.append(new_cte) def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes): for new_cte in new_prepended_ctes: _add_prepended_cte(prepended_ctes, new_cte) def prepend_ctes(model, manifest): model, _, manifest = recursively_prepend_ctes(model, manifest) return (model, manifest) def recursively_prepend_ctes(model, manifest): if model.extra_ctes_injected: return (model, model.extra_ctes, manifest) if dbt.flags.STRICT_MODE: # ensure that all the nodes in this manifest are compiled CompiledGraph(**manifest.to_flat_graph()) prepended_ctes = [] for cte in model.extra_ctes: cte_id = cte['id'] cte_to_add = manifest.nodes.get(cte_id) cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes( cte_to_add, manifest) _extend_prepended_ctes(prepended_ctes, new_prepended_ctes) new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name')) sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql) _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql}) model.prepend_ctes(prepended_ctes) manifest.nodes[model.unique_id] = model return (model, prepended_ctes, manifest) class Compiler(object): def __init__(self, project): self.project = project def initialize(self): dbt.clients.system.make_directory(self.project['target-path']) dbt.clients.system.make_directory(self.project['modules-path']) def compile_node(self, node, manifest): logger.debug("Compiling {}".format(node.get('unique_id'))) data = node.to_dict() data.update({ 'compiled': False, 'compiled_sql': None, 'extra_ctes_injected': False, 'extra_ctes': [], 'injected_sql': None, }) compiled_node = CompiledNode(**data) context = dbt.context.runtime.generate( compiled_node, self.project, manifest) compiled_node.compiled_sql = dbt.clients.jinja.get_rendered( node.get('raw_sql'), context, node) compiled_node.compiled = True injected_node, _ = prepend_ctes(compiled_node, manifest) should_wrap = {NodeType.Test, NodeType.Analysis, NodeType.Operation} if injected_node.resource_type in should_wrap: # data tests get wrapped in count(*) # TODO : move this somewhere more reasonable if 'data' in injected_node.tags and \ is_type(injected_node, NodeType.Test): injected_node.wrapped_sql = ( "select count(*) from (\n{test_sql}\n) sbq").format( test_sql=injected_node.injected_sql) else: # don't wrap schema tests or analyses. injected_node.wrapped_sql = injected_node.injected_sql elif is_type(injected_node, NodeType.Archive): # unfortunately we do everything automagically for # archives. in the future it'd be nice to generate # the SQL at the parser level. pass elif(is_type(injected_node, NodeType.Model) and get_materialization(injected_node) == 'ephemeral'): pass else: injected_node.wrapped_sql = None return injected_node def write_manifest_file(self, manifest): """Write the manifest file to disk. manifest should be a Manifest. """ filename = manifest_file_name manifest_path = os.path.join(self.project['target-path'], filename) write_json(manifest_path, manifest.serialize()) def write_graph_file(self, linker): filename = graph_file_name graph_path = os.path.join(self.project['target-path'], filename) linker.write_graph(graph_path) def link_node(self, linker, node, manifest): linker.add_node(node.unique_id) linker.update_node_data( node.unique_id, node.to_dict()) for dependency in node.depends_on_nodes: if manifest.nodes.get(dependency): linker.dependency( node.unique_id, (manifest.nodes.get(dependency).unique_id)) else: dbt.exceptions.dependency_not_found(node, dependency) def link_graph(self, linker, manifest): for node in manifest.nodes.values(): self.link_node(linker, node, manifest) cycle = linker.find_cycles() if cycle: raise RuntimeError("Found a cycle: {}".format(cycle)) def get_all_projects(self): root_project = self.project.cfg all_projects = {root_project.get('name'): root_project} dependency_projects = dbt.utils.dependency_projects(self.project) for project in dependency_projects: name = project.cfg.get('name', 'unknown') all_projects[name] = project.cfg if dbt.flags.STRICT_MODE: dbt.contracts.project.ProjectList(**all_projects) return all_projects def _check_resource_uniqueness(cls, manifest): names_resources = {} alias_resources = {} for resource, node in manifest.nodes.items(): if node.resource_type not in NodeType.refable(): continue name = node.name alias = "{}.{}".format(node.schema, node.alias) existing_node = names_resources.get(name) if existing_node is not None: dbt.exceptions.raise_duplicate_resource_name( existing_node, node) existing_alias = alias_resources.get(alias) if existing_alias is not None: dbt.exceptions.raise_ambiguous_alias( existing_alias, node) names_resources[name] = node alias_resources[alias] = node def compile(self): linker = Linker() all_projects = self.get_all_projects() manifest = dbt.loader.GraphLoader.load_all(self.project, all_projects) self.write_manifest_file(manifest) self._check_resource_uniqueness(manifest) self.link_graph(linker, manifest) stats = defaultdict(int) for node_name, node in itertools.chain( manifest.nodes.items(), manifest.macros.items()): stats[node.resource_type] += 1 self.write_graph_file(linker) print_compile_stats(stats) return manifest, linker
unknown
codeparrot/codeparrot-clean
# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test cycle management.""" __author__ = 'alexis.torres@gmail.com (Alexis O. Torres)' import logging import webapp2 from common.handlers import base from models import test_cycle class TestCyclesHandler(base.BaseHandler): """Handles managing of cycles.""" def get(self): cycles = test_cycle.FetchTestCycles() self.response.headers['Content-Type'] = 'application/json' self.response.out.write(test_cycle.JsonEncode(cycles)) app = webapp2.WSGIApplication( [('/cycles', TestCyclesHandler), ('/cycles/all', TestCyclesHandler)], debug=True)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Resource object code # # Created: Wed Mar 20 13:49:41 2013 # by: The Resource Compiler for PyQt (Qt v4.8.4) # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore qt_resource_data = b"\ \x00\x00\x03\x4c\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\ \x00\x00\x00\x3d\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\ \x00\x00\x00\x71\x02\xf0\x8c\x31\x00\x00\x00\x8e\x05\x93\x08\xe5\ \x00\x00\x00\xaf\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\ \x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\ \x00\x00\x01\x25\x0e\x9f\xe7\x05\x00\x00\x01\x40\x69\x00\x00\x01\ \x87\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x75\x00\x65\x05\x00\x05\ \xcf\xc7\x01\x03\x00\x00\x00\x10\x00\x26\x00\x46\x00\x69\x00\x63\ \x00\x68\x00\x69\x00\x65\x00\x72\x05\x00\x2a\xd0\x25\x01\x03\x00\ \x00\x00\x10\x00\x26\x00\x51\x00\x75\x00\x69\x00\x74\x00\x74\x00\ \x65\x00\x72\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0e\x00\x50\ \x00\x72\x00\x65\x00\x6d\x00\x69\x00\x65\x00\x72\x05\x00\x4d\x09\ \xa4\x01\x03\x00\x00\x00\x12\x00\x54\x00\x72\x00\x6f\x00\x69\x00\ \x73\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\ \x00\x00\x00\x16\x00\x4c\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x65\ \x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\ \x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\ \x75\x00\x65\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x44\ \x00\x65\x00\x75\x00\x78\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x05\ \x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\ \x6d\x00\xe9\x00\x74\x00\x72\x00\x69\x00\x71\x00\x75\x00\x65\x05\ \x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\x00\x72\ \x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\x00\x76\x00\x65\ \x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x00\x46\x00\x72\x00\ \x61\x00\x6e\x00\xe7\x00\x61\x00\x69\x00\x73\x05\x0c\x4e\x30\xd8\ \x01\x03\x00\x00\x00\x3c\x00\x45\x00\x78\x00\x65\x00\x6d\x00\x70\ \x00\x6c\x00\x65\x00\x20\x00\x64\x00\x27\x00\x69\x00\x6e\x00\x74\ \x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\ \x00\x61\x00\x6c\x00\x69\x00\x73\x00\x61\x00\x74\x00\x69\x00\x6f\ \x00\x6e\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\ \x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x02\xb2\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\ \x00\x00\x00\x33\x00\x4d\x09\xa4\x00\x00\x00\x46\x00\x5a\xf0\x84\ \x00\x00\x00\x57\x02\xf0\x8c\x31\x00\x00\x00\x68\x05\x93\x08\xe5\ \x00\x00\x00\x81\x05\x9b\xa6\x44\x00\x00\x00\x90\x06\x3c\xe8\x53\ \x00\x00\x00\xa1\x06\xec\x79\x65\x00\x00\x00\xb2\x0c\x4e\x30\xd8\ \x00\x00\x00\xc5\x0e\x9f\xe7\x05\x00\x00\x00\xd6\x69\x00\x00\x00\ \xed\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x04\xbc\xf4\xae\x30\x05\x00\x05\xcf\xc7\ \x01\x03\x00\x00\x00\x08\xd3\x0c\xc7\x7c\x00\x26\x00\x46\x05\x00\ \x2a\xd0\x25\x01\x03\x00\x00\x00\x08\xc8\x85\xb8\xcc\x00\x26\x00\ \x58\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\xcc\xab\xbc\x88\ \xc9\xf8\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\xc1\x38\xbc\ \x88\xc9\xf8\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0e\xc5\xb8\ \xc5\xb4\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\ \x31\x01\x03\x00\x00\x00\x04\xbe\x57\xac\x01\x05\x05\x93\x08\xe5\ \x01\x03\x00\x00\x00\x06\xb4\x50\xbc\x88\xc9\xf8\x05\x05\x9b\xa6\ \x44\x01\x03\x00\x00\x00\x06\xb4\xf1\xce\x21\xb3\xc4\x05\x06\x3c\ \xe8\x53\x01\x03\x00\x00\x00\x08\xc6\xd0\xad\xfc\xd6\x54\xbc\x95\ \x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x06\xd5\x5c\xad\x6d\xc5\ \xb4\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x0c\xad\x6d\xc8\x1c\ \xd6\x54\x00\x20\xc6\x08\xc8\x1c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\ \x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\ \x00\ \x00\x00\x03\x26\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\ \x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x4a\x00\x5a\xf0\x84\ \x00\x00\x00\x61\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\ \x00\x00\x00\x93\x05\x9b\xa6\x44\x00\x00\x00\xaa\x06\x3c\xe8\x53\ \x00\x00\x00\xc1\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\ \x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\ \x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x06\x04\x12\x04\x38\x04\x34\x05\x00\x05\ \xcf\xc7\x01\x03\x00\x00\x00\x08\x04\x24\x04\x30\x04\x39\x04\x3b\ \x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x04\x12\x04\x4b\x04\ \x45\x04\x3e\x04\x34\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\ \x04\x1f\x04\x35\x04\x40\x04\x32\x04\x4b\x04\x39\x05\x00\x4d\x09\ \xa4\x01\x03\x00\x00\x00\x0c\x04\x22\x04\x40\x04\x35\x04\x42\x04\ \x38\x04\x39\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x10\x04\x2f\ \x04\x37\x04\x4b\x04\x3a\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\ \xf0\x8c\x31\x01\x03\x00\x00\x00\x0c\x04\x1a\x04\x43\x04\x40\x04\ \x41\x04\x38\x04\x32\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\ \x04\x12\x04\x42\x04\x3e\x04\x40\x04\x3e\x04\x39\x05\x05\x9b\xa6\ \x44\x01\x03\x00\x00\x00\x1c\x04\x18\x04\x37\x04\x3e\x04\x3c\x04\ \x35\x04\x42\x04\x40\x04\x38\x04\x47\x04\x35\x04\x41\x04\x3a\x04\ \x38\x04\x39\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x04\x1f\ \x04\x35\x04\x40\x04\x41\x04\x3f\x04\x35\x04\x3a\x04\x42\x04\x38\ \x04\x32\x04\x30\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x04\ \x20\x04\x43\x04\x41\x04\x41\x04\x3a\x04\x38\x04\x39\x05\x0c\x4e\ \x30\xd8\x01\x03\x00\x00\x00\x34\x04\x1f\x04\x40\x04\x38\x04\x3c\ \x04\x35\x04\x40\x00\x20\x04\x38\x04\x3d\x04\x42\x04\x35\x04\x40\ \x04\x3d\x04\x30\x04\x46\x04\x38\x04\x3d\x04\x3e\x04\x30\x04\x3b\ \x04\x38\x04\x37\x04\x30\x04\x46\x04\x38\x04\x38\x05\x0e\x9f\xe7\ \x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\ \x6e\x64\x6f\x77\x00\ \x00\x00\x03\x2e\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\ \x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\ \x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x84\x05\x93\x08\xe5\ \x00\x00\x00\xa1\x05\x9b\xa6\x44\x00\x00\x00\xb6\x06\x3c\xe8\x53\ \x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\ \x00\x00\x01\x0d\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\ \x69\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x73\x00\x61\x05\ \x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x41\x00\x72\ \x00\x6b\x00\x69\x00\x76\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\ \x10\x00\x26\x00\x41\x00\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\ \x61\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf6\ \x00\x72\x00\x73\x00\x74\x00\x61\x05\x00\x4d\x09\xa4\x01\x03\x00\ \x00\x00\x0c\x00\x54\x00\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\ \x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\ \x00\xe5\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\ \x31\x01\x03\x00\x00\x00\x0a\x00\x53\x00\x6b\x00\x65\x00\x76\x00\ \x74\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\ \x00\x64\x00\x72\x00\x61\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\ \x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\ \x69\x00\x73\x00\x6b\x00\x74\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\ \x00\x16\x00\x50\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\ \x00\x74\x00\x69\x00\x76\x00\x74\x05\x06\xec\x79\x65\x01\x03\x00\ \x00\x00\x0e\x00\x53\x00\x76\x00\x65\x00\x6e\x00\x73\x00\x6b\x00\ \x61\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\ \x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\ \x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\x00\x72\x00\x69\ \x00\x6e\x00\x67\x00\x73\x00\x65\x00\x78\x00\x65\x00\x6d\x00\x70\ \x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\ \x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x03\x50\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\ \x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x5c\x00\x5a\xf0\x84\ \x00\x00\x00\x75\x02\xf0\x8c\x31\x00\x00\x00\x90\x05\x93\x08\xe5\ \x00\x00\x00\xb1\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\ \x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\ \x00\x00\x01\x2b\x0e\x9f\xe7\x05\x00\x00\x01\x44\x69\x00\x00\x01\ \x8b\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x6e\x00\x73\x00\x69\x00\ \x63\x00\x68\x00\x74\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\ \x00\x26\x00\x44\x00\x61\x00\x74\x00\x65\x00\x69\x05\x00\x2a\xd0\ \x25\x01\x03\x00\x00\x00\x10\x00\x42\x00\x65\x00\x26\x00\x65\x00\ \x6e\x00\x64\x00\x65\x00\x6e\x05\x00\x47\xdf\x04\x01\x03\x00\x00\ \x00\x0e\x00\x45\x00\x72\x00\x73\x00\x74\x00\x65\x00\x6e\x00\x73\ \x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x10\x00\x44\x00\x72\x00\ \x69\x00\x74\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x00\x5a\xf0\x84\ \x01\x03\x00\x00\x00\x16\x00\x53\x00\x70\x00\x72\x00\x61\x00\x63\ \x00\x68\x00\x65\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\ \x31\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x63\x00\x68\x00\x69\x00\ \x65\x00\x66\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x5a\ \x00\x77\x00\x65\x00\x69\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x05\ \x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\ \x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\x63\x00\x68\x05\ \x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x1c\x00\x50\x00\x65\x00\x72\ \x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x69\ \x00\x73\x00\x63\x00\x68\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\ \x0e\x00\x44\x00\x65\x00\x75\x00\x74\x00\x73\x00\x63\x00\x68\x05\ \x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3c\x00\x49\x00\x6e\x00\x74\ \x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\ \x00\x61\x00\x6c\x00\x69\x00\x73\x00\x69\x00\x65\x00\x72\x00\x75\ \x00\x6e\x00\x67\x00\x73\x00\x62\x00\x65\x00\x69\x00\x73\x00\x70\ \x00\x69\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\ \x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x02\xbc\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\ \x00\x00\x00\x37\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\ \x00\x00\x00\x5f\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\ \x00\x00\x00\x87\x05\x9b\xa6\x44\x00\x00\x00\x98\x06\x3c\xe8\x53\ \x00\x00\x00\xa9\x06\xec\x79\x65\x00\x00\x00\xbc\x0c\x4e\x30\xd8\ \x00\x00\x00\xcf\x0e\x9f\xe7\x05\x00\x00\x00\xe2\x69\x00\x00\x00\ \xf7\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x04\x89\xc6\x56\xfe\x05\x00\x05\xcf\xc7\ \x01\x03\x00\x00\x00\x0c\x65\x87\x4e\xf6\x00\x5b\x00\x26\x00\x46\ \x00\x5d\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0c\x90\x00\x51\ \xfa\x00\x5b\x00\x26\x00\x78\x00\x5d\x05\x00\x47\xdf\x04\x01\x03\ \x00\x00\x00\x06\x7b\x2c\x4e\x00\x4e\x2a\x05\x00\x4d\x09\xa4\x01\ \x03\x00\x00\x00\x06\x7b\x2c\x4e\x09\x4e\x2a\x05\x00\x5a\xf0\x84\ \x01\x03\x00\x00\x00\x0c\x8b\xed\x8a\x00\x00\x3a\x00\x20\x00\x25\ \x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x06\x65\x9c\x62\ \x95\x5f\x71\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\ \x4e\x8c\x4e\x2a\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x08\x7b\ \x49\x89\xd2\x62\x95\x5f\x71\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\ \x00\x08\x90\x0f\x89\xc6\x62\x95\x5f\x71\x05\x06\xec\x79\x65\x01\ \x03\x00\x00\x00\x08\x7b\x80\x4f\x53\x4e\x2d\x65\x87\x05\x0c\x4e\ \x30\xd8\x01\x03\x00\x00\x00\x0a\x56\xfd\x96\x45\x53\x16\x83\x03\ \x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\ \x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x02\xe0\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\ \x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4c\x00\x5a\xf0\x84\ \x00\x00\x00\x5d\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\ \x00\x00\x00\x8d\x05\x9b\xa6\x44\x00\x00\x00\xa0\x06\x3c\xe8\x53\ \x00\x00\x00\xb3\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\ \x00\x00\x00\xdf\x0e\x9f\xe7\x05\x00\x00\x00\xf8\x69\x00\x00\x01\ \x1b\x03\x00\x00\x00\x06\x00\x52\x00\x54\x00\x4c\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x08\x06\x45\x06\x31\x06\x26\x06\x49\x05\ \x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x06\x27\x06\x44\x06\x45\ \x06\x44\x06\x41\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\x06\ \x23\x06\x2e\x06\x31\x06\x2c\x05\x00\x47\xdf\x04\x01\x03\x00\x00\ \x00\x06\x06\x23\x06\x48\x06\x44\x05\x00\x4d\x09\xa4\x01\x03\x00\ \x00\x00\x08\x06\x2b\x06\x27\x06\x44\x06\x2b\x05\x00\x5a\xf0\x84\ \x01\x03\x00\x00\x00\x12\x06\x27\x06\x44\x06\x44\x06\x3a\x06\x29\ \x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\ \x00\x00\x08\x06\x45\x06\x35\x06\x45\x06\x2a\x05\x05\x93\x08\xe5\ \x01\x03\x00\x00\x00\x08\x06\x2b\x06\x27\x06\x46\x06\x49\x05\x05\ \x9b\xa6\x44\x01\x03\x00\x00\x00\x0c\x06\x45\x06\x2a\x06\x45\x06\ \x27\x06\x2b\x06\x44\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x0a\ \x06\x45\x06\x46\x06\x38\x06\x48\x06\x31\x05\x06\xec\x79\x65\x01\ \x03\x00\x00\x00\x0e\x06\x27\x06\x44\x06\x39\x06\x31\x06\x28\x06\ \x4a\x06\x29\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x18\x06\x45\ \x06\x2b\x06\x27\x06\x44\x00\x20\x06\x27\x06\x44\x06\x2a\x06\x2f\ \x06\x48\x06\x4a\x06\x44\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\ \x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x03\x1c\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x28\x00\x47\xdf\x04\ \x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\ \x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x82\x05\x93\x08\xe5\ \x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xbc\x06\x3c\xe8\x53\ \x00\x00\x00\xd1\x06\xec\x79\x65\x00\x00\x00\xf2\x0c\x4e\x30\xd8\ \x00\x00\x01\x15\x0e\x9f\xe7\x05\x00\x00\x01\x2a\x69\x00\x00\x01\ \x57\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x0c\x00\x50\x00\x6f\x00\x68\x00\x6c\x00\ \x65\x00\x64\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\ \x00\x53\x00\x6f\x00\x75\x00\x62\x00\x6f\x00\x72\x05\x00\x2a\xd0\ \x25\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x4b\x00\x6f\x00\x6e\x00\ \x65\x00\x63\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x00\x50\ \x00\x72\x00\x76\x00\x6e\x00\xed\x05\x00\x4d\x09\xa4\x01\x03\x00\ \x00\x00\x0a\x00\x54\x01\x59\x00\x65\x00\x74\x00\xed\x05\x00\x5a\ \xf0\x84\x01\x03\x00\x00\x00\x12\x00\x4a\x00\x61\x00\x79\x00\x7a\ \x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\ \x03\x00\x00\x00\x12\x00\x4e\x00\x61\x00\x6b\x00\x6c\x00\x6f\x00\ \x6e\x01\x1b\x00\x6e\x00\xfd\x05\x05\x93\x08\xe5\x01\x03\x00\x00\ \x00\x0a\x00\x44\x00\x72\x00\x75\x00\x68\x00\xfd\x05\x05\x9b\xa6\ \x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\ \x65\x00\x74\x00\x72\x00\x69\x00\x63\x00\x6b\x00\xfd\x05\x06\x3c\ \xe8\x53\x01\x03\x00\x00\x00\x18\x00\x50\x00\x65\x00\x72\x00\x73\ \x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x6e\x00\xed\ \x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x01\x0c\x00\x65\x00\ \x73\x00\x6b\x00\xfd\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x22\ \x00\x55\x00\x6b\x00\xe1\x00\x7a\x00\x6b\x00\x61\x00\x20\x00\x6c\ \x00\x6f\x00\x6b\x00\x61\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x63\ \x00\x65\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\ \x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x03\x28\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x26\x00\x47\xdf\x04\ \x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\ \x00\x00\x00\x65\x02\xf0\x8c\x31\x00\x00\x00\x7a\x05\x93\x08\xe5\ \x00\x00\x00\x99\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\ \x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xea\x0c\x4e\x30\xd8\ \x00\x00\x01\x0b\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\ \x63\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x0a\x00\x56\x00\x69\x00\x73\x00\x74\x00\ \x61\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\ \x00\x69\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\ \x0a\x00\x26\x00\x45\x00\x73\x00\x63\x00\x69\x05\x00\x47\xdf\x04\ \x01\x03\x00\x00\x00\x0a\x00\x50\x00\x72\x00\x69\x00\x6d\x00\x6f\ \x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x65\x00\ \x72\x00\x7a\x00\x6f\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x14\ \x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x3a\x00\x20\ \x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x0e\x00\ \x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x61\x05\x05\x93\ \x08\xe5\x01\x03\x00\x00\x00\x0e\x00\x53\x00\x65\x00\x63\x00\x6f\ \x00\x6e\x00\x64\x00\x6f\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\ \x14\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\ \x69\x00\x63\x00\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\ \x00\x50\x00\x72\x00\x6f\x00\x73\x00\x70\x00\x65\x00\x74\x00\x74\ \x00\x69\x00\x63\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\ \x10\x00\x49\x00\x74\x00\x61\x00\x6c\x00\x69\x00\x61\x00\x6e\x00\ \x6f\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x73\ \x00\x65\x00\x6d\x00\x70\x00\x69\x00\x6f\x00\x20\x00\x64\x00\x69\ \x00\x20\x00\x6c\x00\x6f\x00\x63\x00\x61\x00\x6c\x00\x69\x00\x7a\ \x00\x7a\x00\x61\x00\x7a\x00\x69\x00\x6f\x00\x6e\x00\x65\x05\x0e\ \x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\ \x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x03\x24\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\ \x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\ \x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\ \x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\ \x00\x00\x00\xc7\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\ \x00\x00\x01\x05\x0e\x9f\xe7\x05\x00\x00\x01\x1a\x69\x00\x00\x01\ \x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x69\x00\x73\x05\x00\x05\ \xcf\xc7\x01\x03\x00\x00\x00\x08\x00\x26\x00\x46\x00\x69\x00\x6c\ \x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x10\x00\x26\x00\x41\x00\ \x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\x74\x05\x00\x47\xdf\x04\ \x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf8\x00\x72\x00\x73\x00\x74\ \x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0c\x00\x54\x00\ \x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\ \x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\x00\xe5\x00\x6b\x00\x3a\ \x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\ \x0c\x00\x53\x00\x6b\x00\x6a\x00\x65\x00\x76\x00\x74\x05\x05\x93\ \x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\x00\x64\x00\x72\ \x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x14\x00\x49\x00\ \x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\ \x6b\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x14\x00\x50\x00\x65\ \x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\ \x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x00\x4e\x00\x6f\x00\ \x72\x00\x73\x00\x6b\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3a\ \x00\x49\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x73\ \x00\x6a\x00\x6f\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\ \x00\x72\x00\x69\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x6b\x00\x73\ \x00\x65\x00\x6d\x00\x70\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\ \x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\ \x6f\x77\x00\ \x00\x00\x03\x24\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\ \x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x54\x00\x5a\xf0\x84\ \x00\x00\x00\x69\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\ \x00\x00\x00\x9d\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\ \x00\x00\x00\xcd\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\ \x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\ \x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x06\x03\x8c\x03\xc8\x03\xb7\x05\x00\x05\ \xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\x03\x91\x03\xc1\x03\xc7\ \x03\xb5\x03\xaf\x03\xbf\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\ \x0e\x03\x88\x00\x26\x03\xbe\x03\xbf\x03\xb4\x03\xbf\x03\xc2\x05\ \x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x03\xa0\x03\xc1\x03\xce\ \x03\xc4\x03\xbf\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x03\ \xa4\x03\xc1\x03\xaf\x03\xc4\x03\xbf\x05\x00\x5a\xf0\x84\x01\x03\ \x00\x00\x00\x14\x03\x93\x03\xbb\x03\xce\x03\xc3\x03\xc3\x03\xb1\ \x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\ \x00\x00\x0c\x03\xa0\x03\xbb\x03\xac\x03\xb3\x03\xb9\x03\xb1\x05\ \x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0e\x03\x94\x03\xb5\x03\xcd\ \x03\xc4\x03\xb5\x03\xc1\x03\xbf\x05\x05\x9b\xa6\x44\x01\x03\x00\ \x00\x00\x14\x03\x99\x03\xc3\x03\xbf\x03\xbc\x03\xb5\x03\xc4\x03\ \xc1\x03\xb9\x03\xba\x03\xae\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\ \x00\x12\x03\xa0\x03\xc1\x03\xbf\x03\xbf\x03\xc0\x03\xc4\x03\xb9\ \x03\xba\x03\xae\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x03\ \x95\x03\xbb\x03\xbb\x03\xb7\x03\xbd\x03\xb9\x03\xba\x03\xac\x05\ \x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x30\x03\xa0\x03\xb1\x03\xc1\ \x03\xac\x03\xb4\x03\xb5\x03\xb9\x03\xb3\x03\xbc\x03\xb1\x00\x20\ \x03\xb4\x03\xb9\x03\xb5\x03\xb8\x03\xbd\x03\xbf\x03\xc0\x03\xbf\ \x03\xaf\x03\xb7\x03\xc3\x03\xb7\x03\xc2\x05\x0e\x9f\xe7\x05\x01\ \x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\ \x6f\x77\x00\ \x00\x00\x03\x26\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\ \x00\x00\x00\x45\x00\x4d\x09\xa4\x00\x00\x00\x5a\x00\x5a\xf0\x84\ \x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x80\x05\x93\x08\xe5\ \x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xb8\x06\x3c\xe8\x53\ \x00\x00\x00\xc9\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\ \x00\x00\x01\x07\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\ \x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x73\x00\x70\x00\x65\x00\ \x6b\x00\x74\x00\x6f\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\ \x00\x26\x00\x44\x00\x6f\x00\x73\x00\x69\x00\x65\x00\x72\x00\x6f\ \x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\ \x69\x00\x6e\x00\x69\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x08\ \x00\x55\x00\x6e\x00\x75\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\ \x00\x00\x08\x00\x54\x00\x72\x00\x69\x00\x65\x05\x00\x5a\xf0\x84\ \x01\x03\x00\x00\x00\x14\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x76\ \x00\x6f\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\ \x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x6b\x00\ \x76\x00\x61\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x00\x44\ \x00\x75\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x12\x00\ \x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\ \x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\ \x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\ \x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x12\x00\x45\x00\ \x73\x00\x70\x00\x65\x00\x72\x00\x61\x00\x6e\x00\x74\x00\x6f\x05\ \x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x6b\x00\x7a\ \x00\x65\x00\x6d\x00\x70\x00\x6c\x00\x6f\x00\x20\x00\x70\x00\x72\ \x00\x69\x00\x20\x00\x69\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\ \x00\x61\x00\x63\x00\x69\x00\x69\x00\x67\x00\x6f\x05\x0e\x9f\xe7\ \x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\ \x6e\x64\x6f\x77\x00\ \x00\x00\x03\x2a\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\ \x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\ \x00\x00\x00\x63\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\ \x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\ \x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\ \x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\ \x65\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x65\x00\x77\x05\ \x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\x69\ \x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\ \x45\x00\x26\x00\x78\x00\x69\x00\x74\x05\x00\x47\xdf\x04\x01\x03\ \x00\x00\x00\x0a\x00\x46\x00\x69\x00\x72\x00\x73\x00\x74\x05\x00\ \x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x68\x00\x69\x00\ \x72\x00\x64\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x18\x00\x4c\ \x00\x61\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x67\x00\x65\x00\x3a\ \x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\ \x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x65\x05\ \x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x65\x00\x63\ \x00\x6f\x00\x6e\x00\x64\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\ \x12\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\ \x69\x00\x63\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\ \x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\ \x00\x76\x00\x65\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x00\ \x45\x00\x6e\x00\x67\x00\x6c\x00\x69\x00\x73\x00\x68\x05\x0c\x4e\ \x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\x00\x74\x00\x65\ \x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x61\ \x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\ \x00\x20\x00\x45\x00\x78\x00\x61\x00\x6d\x00\x70\x00\x6c\x00\x65\ \x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\ \x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\ \x00\x00\x02\xd2\ \x3c\ \xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\ \x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\ \x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\ \x00\x00\x00\x3f\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\ \x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\ \x00\x00\x00\x8f\x05\x9b\xa6\x44\x00\x00\x00\xa4\x06\x3c\xe8\x53\ \x00\x00\x00\xb5\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\ \x00\x00\x00\xdb\x0e\x9f\xe7\x05\x00\x00\x00\xec\x69\x00\x00\x01\ \x0d\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\ \x92\x01\x03\x00\x00\x00\x08\x88\x68\x79\x3a\x65\xb9\x5f\x0f\x05\ \x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\x30\xd5\x30\xa1\x30\xa4\ \x30\xeb\x00\x28\x00\x26\x00\x46\x00\x29\x05\x00\x2a\xd0\x25\x01\ \x03\x00\x00\x00\x0c\x7d\x42\x4e\x86\x00\x28\x00\x26\x00\x58\x00\ \x29\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x00\ \x88\x4c\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\ \x09\x88\x4c\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0c\x8a\x00\ \x8a\x9e\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\ \x03\x00\x00\x00\x0a\x65\x9c\x30\x81\x62\x95\x5f\x71\x6c\xd5\x05\ \x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x8c\x88\x4c\ \x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x0a\x7b\x49\x89\xd2\x62\ \x95\x5f\x71\x6c\xd5\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x06\ \x90\x60\x8f\xd1\x6c\xd5\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\ \x06\x65\xe5\x67\x2c\x8a\x9e\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\ \x00\x16\x56\xfd\x96\x9b\x53\x16\x00\x28\x00\x69\x00\x31\x00\x38\ \x00\x6e\x00\x29\x30\x6e\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\ \x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\ \x00\ " qt_resource_name = b"\ \x00\x0c\ \x0d\xfc\x11\x13\ \x00\x74\ \x00\x72\x00\x61\x00\x6e\x00\x73\x00\x6c\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\ \x00\x0a\ \x04\x50\xdc\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x66\x00\x72\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x6f\xac\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6b\x00\x6f\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x65\x0c\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x72\x00\x75\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x67\x1c\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x73\x00\x76\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x58\x0c\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x64\x00\x65\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x7d\x3c\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x7a\x00\x68\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x55\xdc\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x57\xec\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x63\x00\x73\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x6d\xfc\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x69\x00\x74\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x68\xac\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6e\x00\x6f\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x56\x7c\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6c\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x59\xac\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6f\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x59\x9c\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6e\x00\x2e\x00\x71\x00\x6d\ \x00\x0a\ \x04\x6c\xbc\x9d\ \x00\x69\ \x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6a\x00\x70\x00\x2e\x00\x71\x00\x6d\ " qt_resource_struct = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x0e\x00\x00\x00\x02\ \x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ \x00\x00\x00\xba\x00\x00\x00\x00\x00\x01\x00\x00\x12\x76\ \x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xce\ \x00\x00\x00\xd4\x00\x00\x00\x00\x00\x01\x00\x00\x15\x5a\ \x00\x00\x00\x86\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x62\ \x00\x00\x01\x56\x00\x00\x00\x00\x00\x01\x00\x00\x25\x20\ \x00\x00\x01\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x21\xf6\ \x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x06\x06\ \x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x09\x30\ \x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xa6\ \x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x28\x4e\ \x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x18\x7a\ \x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x03\x50\ \x00\x00\x00\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x0f\xb6\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
unknown
codeparrot/codeparrot-clean
# informix/base.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # coding: gbk # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for the Informix database. .. note:: The Informix dialect functions on current SQLAlchemy versions but is not regularly tested, and may have many issues and caveats not currently handled. """ import datetime from sqlalchemy import sql, schema, exc, pool, util from sqlalchemy.sql import compiler, text from sqlalchemy.engine import default, reflection from sqlalchemy import types as sqltypes RESERVED_WORDS = set( ["abs", "absolute", "access", "access_method", "acos", "active", "add", "address", "add_months", "admin", "after", "aggregate", "alignment", "all", "allocate", "all_rows", "alter", "and", "ansi", "any", "append", "array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach", "attributes", "audit", "authentication", "authid", "authorization", "authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode", "avg", "avoid_execute", "avoid_fact", "avoid_full", "avoid_hash", "avoid_index", "avoid_index_sj", "avoid_multi_index", "avoid_nl", "avoid_star_join", "avoid_subqf", "based", "before", "begin", "between", "bigint", "bigserial", "binary", "bitand", "bitandnot", "bitnot", "bitor", "bitxor", "blob", "blobdir", "boolean", "both", "bound_impl_pdq", "buffered", "builtin", "by", "byte", "cache", "call", "cannothash", "cardinality", "cascade", "case", "cast", "ceil", "char", "character", "character_length", "char_length", "check", "class", "class_origin", "client", "clob", "clobdir", "close", "cluster", "clustersize", "cobol", "codeset", "collation", "collection", "column", "columns", "commit", "committed", "commutator", "component", "components", "concat", "concurrent", "connect", "connection", "connection_name", "connect_by_iscycle", "connect_by_isleaf", "connect_by_rootconst", "constraint", "constraints", "constructor", "context", "continue", "copy", "cos", "costfunc", "count", "crcols", "create", "cross", "current", "current_role", "currval", "cursor", "cycle", "database", "datafiles", "dataskip", "date", "datetime", "day", "dba", "dbdate", "dbinfo", "dbpassword", "dbsecadm", "dbservername", "deallocate", "debug", "debugmode", "debug_env", "dec", "decimal", "declare", "decode", "decrypt_binary", "decrypt_char", "dec_t", "default", "default_role", "deferred", "deferred_prepare", "define", "delay", "delete", "deleting", "delimited", "delimiter", "deluxe", "desc", "describe", "descriptor", "detach", "diagnostics", "directives", "dirty", "disable", "disabled", "disconnect", "disk", "distinct", "distributebinary", "distributesreferences", "distributions", "document", "domain", "donotdistribute", "dormant", "double", "drop", "dtime_t", "each", "elif", "else", "enabled", "encryption", "encrypt_aes", "encrypt_tdes", "end", "enum", "environment", "error", "escape", "exception", "exclusive", "exec", "execute", "executeanywhere", "exemption", "exists", "exit", "exp", "explain", "explicit", "express", "expression", "extdirectives", "extend", "extent", "external", "fact", "false", "far", "fetch", "file", "filetoblob", "filetoclob", "fillfactor", "filtering", "first", "first_rows", "fixchar", "fixed", "float", "floor", "flush", "for", "force", "forced", "force_ddl_exec", "foreach", "foreign", "format", "format_units", "fortran", "found", "fraction", "fragment", "fragments", "free", "from", "full", "function", "general", "get", "gethint", "global", "go", "goto", "grant", "greaterthan", "greaterthanorequal", "group", "handlesnulls", "hash", "having", "hdr", "hex", "high", "hint", "hold", "home", "hour", "idslbacreadarray", "idslbacreadset", "idslbacreadtree", "idslbacrules", "idslbacwritearray", "idslbacwriteset", "idslbacwritetree", "idssecuritylabel", "if", "ifx_auto_reprepare", "ifx_batchedread_table", "ifx_int8_t", "ifx_lo_create_spec_t", "ifx_lo_stat_t", "immediate", "implicit", "implicit_pdq", "in", "inactive", "increment", "index", "indexes", "index_all", "index_sj", "indicator", "informix", "init", "initcap", "inline", "inner", "inout", "insert", "inserting", "instead", "int", "int8", "integ", "integer", "internal", "internallength", "interval", "into", "intrvl_t", "is", "iscanonical", "isolation", "item", "iterator", "java", "join", "keep", "key", "label", "labeleq", "labelge", "labelglb", "labelgt", "labelle", "labellt", "labellub", "labeltostring", "language", "last", "last_day", "leading", "left", "length", "lessthan", "lessthanorequal", "let", "level", "like", "limit", "list", "listing", "load", "local", "locator", "lock", "locks", "locopy", "loc_t", "log", "log10", "logn", "long", "loop", "lotofile", "low", "lower", "lpad", "ltrim", "lvarchar", "matched", "matches", "max", "maxerrors", "maxlen", "maxvalue", "mdy", "median", "medium", "memory", "memory_resident", "merge", "message_length", "message_text", "middle", "min", "minute", "minvalue", "mod", "mode", "moderate", "modify", "module", "money", "month", "months_between", "mounting", "multiset", "multi_index", "name", "nchar", "negator", "new", "next", "nextval", "next_day", "no", "nocache", "nocycle", "nomaxvalue", "nomigrate", "nominvalue", "none", "non_dim", "non_resident", "noorder", "normal", "not", "notemplatearg", "notequal", "null", "nullif", "numeric", "numrows", "numtodsinterval", "numtoyminterval", "nvarchar", "nvl", "octet_length", "of", "off", "old", "on", "online", "only", "opaque", "opclass", "open", "optcompind", "optical", "optimization", "option", "or", "order", "ordered", "out", "outer", "output", "override", "page", "parallelizable", "parameter", "partition", "pascal", "passedbyvalue", "password", "pdqpriority", "percaltl_cos", "pipe", "pli", "pload", "policy", "pow", "power", "precision", "prepare", "previous", "primary", "prior", "private", "privileges", "procedure", "properties", "public", "put", "raise", "range", "raw", "read", "real", "recordend", "references", "referencing", "register", "rejectfile", "relative", "release", "remainder", "rename", "reoptimization", "repeatable", "replace", "replication", "reserve", "resolution", "resource", "restart", "restrict", "resume", "retain", "retainupdatelocks", "return", "returned_sqlstate", "returning", "returns", "reuse", "revoke", "right", "robin", "role", "rollback", "rollforward", "root", "round", "routine", "row", "rowid", "rowids", "rows", "row_count", "rpad", "rtrim", "rule", "sameas", "samples", "sampling", "save", "savepoint", "schema", "scroll", "seclabel_by_comp", "seclabel_by_name", "seclabel_to_char", "second", "secondary", "section", "secured", "security", "selconst", "select", "selecting", "selfunc", "selfuncargs", "sequence", "serial", "serial8", "serializable", "serveruuid", "server_name", "session", "set", "setsessionauth", "share", "short", "siblings", "signed", "sin", "sitename", "size", "skall", "skinhibit", "skip", "skshow", "smallfloat", "smallint", "some", "specific", "sql", "sqlcode", "sqlcontext", "sqlerror", "sqlstate", "sqlwarning", "sqrt", "stability", "stack", "standard", "start", "star_join", "statchange", "statement", "static", "statistics", "statlevel", "status", "stdev", "step", "stop", "storage", "store", "strategies", "string", "stringtolabel", "struct", "style", "subclass_origin", "substr", "substring", "sum", "support", "sync", "synonym", "sysdate", "sysdbclose", "sysdbopen", "system", "sys_connect_by_path", "table", "tables", "tan", "task", "temp", "template", "test", "text", "then", "time", "timeout", "to", "today", "to_char", "to_date", "to_dsinterval", "to_number", "to_yminterval", "trace", "trailing", "transaction", "transition", "tree", "trigger", "triggers", "trim", "true", "trunc", "truncate", "trusted", "type", "typedef", "typeid", "typename", "typeof", "uid", "uncommitted", "under", "union", "unique", "units", "unknown", "unload", "unlock", "unsigned", "update", "updating", "upon", "upper", "usage", "use", "uselastcommitted", "user", "use_hash", "use_nl", "use_subqf", "using", "value", "values", "var", "varchar", "variable", "variance", "variant", "varying", "vercols", "view", "violations", "void", "volatile", "wait", "warning", "weekday", "when", "whenever", "where", "while", "with", "without", "work", "write", "writedown", "writeup", "xadatasource", "xid", "xload", "xunload", "year" ]) class InfoDateTime(sqltypes.DateTime): def bind_processor(self, dialect): def process(value): if value is not None: if value.microsecond: value = value.replace(microsecond=0) return value return process class InfoTime(sqltypes.Time): def bind_processor(self, dialect): def process(value): if value is not None: if value.microsecond: value = value.replace(microsecond=0) return value return process def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.time() else: return value return process colspecs = { sqltypes.DateTime : InfoDateTime, sqltypes.TIMESTAMP: InfoDateTime, sqltypes.Time: InfoTime, } ischema_names = { 0 : sqltypes.CHAR, # CHAR 1 : sqltypes.SMALLINT, # SMALLINT 2 : sqltypes.INTEGER, # INT 3 : sqltypes.FLOAT, # Float 3 : sqltypes.Float, # SmallFloat 5 : sqltypes.DECIMAL, # DECIMAL 6 : sqltypes.Integer, # Serial 7 : sqltypes.DATE, # DATE 8 : sqltypes.Numeric, # MONEY 10 : sqltypes.DATETIME, # DATETIME 11 : sqltypes.LargeBinary, # BYTE 12 : sqltypes.TEXT, # TEXT 13 : sqltypes.VARCHAR, # VARCHAR 15 : sqltypes.NCHAR, # NCHAR 16 : sqltypes.NVARCHAR, # NVARCHAR 17 : sqltypes.Integer, # INT8 18 : sqltypes.Integer, # Serial8 43 : sqltypes.String, # LVARCHAR -1 : sqltypes.BLOB, # BLOB -1 : sqltypes.CLOB, # CLOB } class InfoTypeCompiler(compiler.GenericTypeCompiler): def visit_DATETIME(self, type_): return "DATETIME YEAR TO SECOND" def visit_TIME(self, type_): return "DATETIME HOUR TO SECOND" def visit_TIMESTAMP(self, type_): return "DATETIME YEAR TO SECOND" def visit_large_binary(self, type_): return "BYTE" def visit_boolean(self, type_): return "SMALLINT" class InfoSQLCompiler(compiler.SQLCompiler): def default_from(self): return " from systables where tabname = 'systables' " def get_select_precolumns(self, select): s = "" if select._offset: s += "SKIP %s " % select._offset if select._limit: s += "FIRST %s " % select._limit s += select._distinct and "DISTINCT " or "" return s def visit_select(self, select, asfrom=False, parens=True, **kw): text = compiler.SQLCompiler.visit_select(self, select, asfrom, parens, **kw) if asfrom and parens and self.dialect.server_version_info < (11,): #assuming that 11 version doesn't need this, not tested return "table(multiset" + text + ")" else: return text def limit_clause(self, select): return "" def visit_function(self, func, **kw): if func.name.lower() == 'current_date': return "today" elif func.name.lower() == 'current_time': return "CURRENT HOUR TO SECOND" elif func.name.lower() in ('current_timestamp', 'now'): return "CURRENT YEAR TO SECOND" else: return compiler.SQLCompiler.visit_function(self, func, **kw) def visit_mod(self, binary, **kw): return "MOD(%s, %s)" % (self.process(binary.left), self.process(binary.right)) class InfoDDLCompiler(compiler.DDLCompiler): def visit_add_constraint(self, create): preparer = self.preparer return "ALTER TABLE %s ADD CONSTRAINT %s" % ( self.preparer.format_table(create.element.table), self.process(create.element) ) def get_column_specification(self, column, **kw): colspec = self.preparer.format_column(column) first = None if column.primary_key and column.autoincrement: try: first = [c for c in column.table.primary_key.columns if (c.autoincrement and isinstance(c.type, sqltypes.Integer) and not c.foreign_keys)].pop(0) except IndexError: pass if column is first: colspec += " SERIAL" else: colspec += " " + self.dialect.type_compiler.process(column.type) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def get_column_default_string(self, column): if (isinstance(column.server_default, schema.DefaultClause) and isinstance(column.server_default.arg, basestring)): if isinstance(column.type, (sqltypes.Integer, sqltypes.Numeric)): return self.sql_compiler.process(text(column.server_default.arg)) return super(InfoDDLCompiler, self).get_column_default_string(column) ### Informix wants the constraint name at the end, hence this ist c&p from sql/compiler.py def visit_primary_key_constraint(self, constraint): if len(constraint) == 0: return '' text = "PRIMARY KEY " text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote) for c in constraint) text += self.define_constraint_deferrability(constraint) if constraint.name is not None: text += " CONSTRAINT %s" % self.preparer.format_constraint(constraint) return text def visit_foreign_key_constraint(self, constraint): preparer = self.dialect.identifier_preparer remote_table = list(constraint._elements.values())[0].column.table text = "FOREIGN KEY (%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name, f.parent.quote) for f in constraint._elements.values()), preparer.format_table(remote_table), ', '.join(preparer.quote(f.column.name, f.column.quote) for f in constraint._elements.values()) ) text += self.define_constraint_cascades(constraint) text += self.define_constraint_deferrability(constraint) if constraint.name is not None: text += " CONSTRAINT %s " % \ preparer.format_constraint(constraint) return text def visit_unique_constraint(self, constraint): text = "UNIQUE (%s)" % (', '.join(self.preparer.quote(c.name, c.quote) for c in constraint)) text += self.define_constraint_deferrability(constraint) if constraint.name is not None: text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint) return text class InformixIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS class InformixDialect(default.DefaultDialect): name = 'informix' max_identifier_length = 128 # adjusts at runtime based on server version type_compiler = InfoTypeCompiler statement_compiler = InfoSQLCompiler ddl_compiler = InfoDDLCompiler colspecs = colspecs ischema_names = ischema_names preparer = InformixIdentifierPreparer default_paramstyle = 'qmark' def __init__(self, has_transactions=True, *args, **kwargs): self.has_transactions = has_transactions default.DefaultDialect.__init__(self, *args, **kwargs) def initialize(self, connection): super(InformixDialect, self).initialize(connection) # http://www.querix.com/support/knowledge-base/error_number_message/error_200 if self.server_version_info < (9, 2): self.max_identifier_length = 18 else: self.max_identifier_length = 128 def do_begin(self, connection): cu = connection.cursor() cu.execute('SET LOCK MODE TO WAIT') if self.has_transactions: cu.execute('SET ISOLATION TO REPEATABLE READ') def do_commit(self, connection): if self.has_transactions: connection.commit() def do_rollback(self, connection): if self.has_transactions: connection.rollback() def _get_table_names(self, connection, schema, type, **kw): schema = schema or self.default_schema_name s = "select tabname, owner from systables where owner=? and tabtype=?" return [row[0] for row in connection.execute(s, schema, type)] @reflection.cache def get_table_names(self, connection, schema=None, **kw): return self._get_table_names(connection, schema, 'T', **kw) @reflection.cache def get_view_names(self, connection, schema=None, **kw): return self._get_table_names(connection, schema, 'V', **kw) @reflection.cache def get_schema_names(self, connection, **kw): s = "select owner from systables" return [row[0] for row in connection.execute(s)] def has_table(self, connection, table_name, schema=None): schema = schema or self.default_schema_name cursor = connection.execute( """select tabname from systables where tabname=? and owner=?""", table_name, schema) return cursor.first() is not None @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): schema = schema or self.default_schema_name c = connection.execute( """select colname, coltype, collength, t3.default, t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t3.tabid = t2.tabid and t3.colno = t1.colno order by t1.colno""", table_name, schema) primary_cols = self.get_primary_keys(connection, table_name, schema, **kw) columns = [] rows = c.fetchall() for name, colattr, collength, default, colno in rows: name = name.lower() autoincrement = False primary_key = False if name in primary_cols: primary_key = True # in 7.31, coltype = 0x000 # ^^-- column type # ^-- 1 not null, 0 null not_nullable, coltype = divmod(colattr, 256) if coltype not in (0, 13) and default: default = default.split()[-1] if coltype == 6: # Serial, mark as autoincrement autoincrement = True if coltype == 0 or coltype == 13: # char, varchar coltype = ischema_names[coltype](collength) if default: default = "'%s'" % default elif coltype == 5: # decimal precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF if scale == 255: scale = 0 coltype = sqltypes.Numeric(precision, scale) else: try: coltype = ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NULLTYPE column_info = dict(name=name, type=coltype, nullable=not not_nullable, default=default, autoincrement=autoincrement, primary_key=primary_key) columns.append(column_info) return columns @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): schema_sel = schema or self.default_schema_name c = connection.execute( """select t1.constrname as cons_name, t4.colname as local_column, t7.tabname as remote_table, t6.colname as remote_column, t7.owner as remote_owner from sysconstraints as t1 , systables as t2 , sysindexes as t3 , syscolumns as t4 , sysreferences as t5 , syscolumns as t6 , systables as t7 , sysconstraints as t8 , sysindexes as t9 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R' and t3.tabid = t2.tabid and t3.idxname = t1.idxname and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3, t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10, t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16) and t5.constrid = t1.constrid and t8.constrid = t5.primary and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3, t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10, t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname = t8.idxname and t7.tabid = t5.ptabid""", table_name, schema_sel) def fkey_rec(): return { 'name' : None, 'constrained_columns' : [], 'referred_schema' : None, 'referred_table' : None, 'referred_columns' : [] } fkeys = util.defaultdict(fkey_rec) rows = c.fetchall() for cons_name, local_column, \ remote_table, remote_column, remote_owner in rows: rec = fkeys[cons_name] rec['name'] = cons_name local_cols, remote_cols = \ rec['constrained_columns'], rec['referred_columns'] if not rec['referred_table']: rec['referred_table'] = remote_table if schema is not None: rec['referred_schema'] = remote_owner if local_column not in local_cols: local_cols.append(local_column) if remote_column not in remote_cols: remote_cols.append(remote_column) return fkeys.values() @reflection.cache def get_primary_keys(self, connection, table_name, schema=None, **kw): schema = schema or self.default_schema_name # Select the column positions from sysindexes for sysconstraints data = connection.execute( """select t2.* from systables as t1, sysindexes as t2, sysconstraints as t3 where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=? and t2.idxname=t3.idxname and t3.constrtype='P'""", table_name, schema ).fetchall() colpositions = set() for row in data: colpos = set([getattr(row, 'part%d' % x) for x in range(1,16)]) colpositions |= colpos if not len(colpositions): return [] # Select the column names using the columnpositions # TODO: Maybe cache a bit of those col infos (eg select all colnames for one table) place_holder = ','.join('?'*len(colpositions)) c = connection.execute( """select t1.colname from syscolumns as t1, systables as t2 where t2.tabname=? and t1.tabid = t2.tabid and t1.colno in (%s)""" % place_holder, table_name, *colpositions ).fetchall() return reduce(lambda x,y: list(x)+list(y), c, []) @reflection.cache def get_indexes(self, connection, table_name, schema, **kw): # TODO: schema... c = connection.execute( """select t1.* from sysindexes as t1 , systables as t2 where t1.tabid = t2.tabid and t2.tabname=?""", table_name) indexes = [] for row in c.fetchall(): colnames = [getattr(row, 'part%d' % x) for x in range(1,16)] colnames = [x for x in colnames if x] place_holder = ','.join('?'*len(colnames)) c = connection.execute( """select t1.colname from syscolumns as t1, systables as t2 where t2.tabname=? and t1.tabid = t2.tabid and t1.colno in (%s)""" % place_holder, table_name, *colnames ).fetchall() c = reduce(lambda x,y: list(x)+list(y), c, []) indexes.append({ 'name': row.idxname, 'unique': row.idxtype.lower() == 'u', 'column_names': c }) return indexes @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): schema = schema or self.default_schema_name c = connection.execute( """select t1.viewtext from sysviews as t1 , systables as t2 where t1.tabid=t2.tabid and t2.tabname=? and t2.owner=? order by seqno""", view_name, schema).fetchall() return ''.join([row[0] for row in c]) def _get_default_schema_name(self, connection): return connection.execute('select CURRENT_ROLE from systables').scalar()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright (c) 2018 Marcus Watkins <marwatk@marcuswatkins.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from units.compat.mock import patch from ansible.modules.source_control import gitlab_hooks from ansible.module_utils._text import to_bytes from ansible.module_utils import basic import pytest import json from units.modules.utils import set_module_args fake_server_state = [ { "id": 1, "url": "https://notification-server.example.com/gitlab-hook", "project_id": 10, "push_events": True, "issues_events": True, "merge_requests_events": True, "tag_push_events": True, "note_events": True, "job_events": True, "pipeline_events": True, "wiki_page_events": True, "enable_ssl_verification": True, "created_at": "2012-10-12T17:04:47Z" }, ] class FakeReader: def __init__(self, object): self.content = json.dumps(object, sort_keys=True) def read(self): return self.content class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" pass class AnsibleFailJson(Exception): """Exception class to be raised by module.fail_json and caught by the test case""" pass def exit_json(*args, **kwargs): """function to patch over exit_json; package return data into an exception""" if 'changed' not in kwargs: kwargs['changed'] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): """function to patch over fail_json; package return data into an exception""" kwargs['failed'] = True raise AnsibleFailJson(kwargs) @pytest.fixture def fetch_url_mock(mocker): return mocker.patch('ansible.module_utils.gitlab.fetch_url') @pytest.fixture def module_mock(mocker): return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) def test_access_token_output(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://my-ci-server.example.com/gitlab-hook', 'state': 'absent' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() first_call = fetch_url_mock.call_args_list[0][1] assert first_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/hooks' assert first_call['headers']['Authorization'] == 'Bearer test-access-token' assert 'Private-Token' not in first_call['headers'] assert first_call['method'] == 'GET' def test_private_token_output(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'private_token': 'test-private-token', 'project': 'foo/bar', 'hook_url': 'https://my-ci-server.example.com/gitlab-hook', 'state': 'absent' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() first_call = fetch_url_mock.call_args_list[0][1] assert first_call['url'] == 'https://gitlab.example.com/api/v4/projects/foo%2Fbar/hooks' assert first_call['headers']['Private-Token'] == 'test-private-token' assert 'Authorization' not in first_call['headers'] assert first_call['method'] == 'GET' def test_bad_http_first_response(capfd, fetch_url_mock, module_mock): fetch_url_mock.side_effect = [[FakeReader("Permission denied"), {'status': 403}], [FakeReader("Permission denied"), {'status': 403}]] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://my-ci-server.example.com/gitlab-hook', 'state': 'absent' }) with pytest.raises(AnsibleFailJson): gitlab_hooks.main() def test_bad_http_second_response(capfd, fetch_url_mock, module_mock): fetch_url_mock.side_effect = [[FakeReader(fake_server_state), {'status': 200}], [FakeReader("Permission denied"), {'status': 403}]] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://my-ci-server.example.com/gitlab-hook', 'state': 'present' }) with pytest.raises(AnsibleFailJson): gitlab_hooks.main() def test_delete_non_existing(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://my-ci-server.example.com/gitlab-hook', 'state': 'absent' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() assert result.value.args[0]['changed'] is False def test_delete_existing(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://notification-server.example.com/gitlab-hook', 'state': 'absent' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() second_call = fetch_url_mock.call_args_list[1][1] assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/hooks/1' assert second_call['method'] == 'DELETE' assert result.value.args[0]['changed'] is True def test_add_new(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://my-ci-server.example.com/gitlab-hook', 'state': 'present' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() second_call = fetch_url_mock.call_args_list[1][1] assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/hooks' assert second_call['method'] == 'POST' assert second_call['data'] == ('{"enable_ssl_verification": false, "issues_events": false, "job_events": false, ' '"merge_requests_events": false, "note_events": false, "pipeline_events": false, "push_events": true, "tag_push_events": ' 'false, "token": null, "url": "https://my-ci-server.example.com/gitlab-hook", "wiki_page_events": false}') assert result.value.args[0]['changed'] is True def test_update_existing(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://notification-server.example.com/gitlab-hook', 'push_events': 'yes', 'issues_events': 'yes', 'merge_requests_events': 'yes', 'tag_push_events': 'yes', 'note_events': 'yes', 'job_events': 'yes', 'pipeline_events': 'yes', 'wiki_page_events': 'no', 'enable_ssl_verification': 'yes', 'state': 'present' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() second_call = fetch_url_mock.call_args_list[1][1] assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/hooks/1' assert second_call['method'] == 'PUT' assert second_call['data'] == ('{"enable_ssl_verification": true, "issues_events": true, "job_events": true, ' '"merge_requests_events": true, "note_events": true, "pipeline_events": true, "push_events": true, "tag_push_events": ' 'true, "token": null, "url": "https://notification-server.example.com/gitlab-hook", "wiki_page_events": false}') assert result.value.args[0]['changed'] is True def test_unchanged_existing(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://notification-server.example.com/gitlab-hook', 'push_events': 'yes', 'issues_events': 'yes', 'merge_requests_events': 'yes', 'tag_push_events': 'yes', 'note_events': 'yes', 'job_events': 'yes', 'pipeline_events': 'yes', 'wiki_page_events': 'yes', 'enable_ssl_verification': 'yes', 'state': 'present' }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() assert result.value.args[0]['changed'] is False assert fetch_url_mock.call_count == 1 def test_unchanged_existing_with_token(capfd, fetch_url_mock, module_mock): fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}] set_module_args({ 'api_url': 'https://gitlab.example.com/api', 'access_token': 'test-access-token', 'project': '10', 'hook_url': 'https://notification-server.example.com/gitlab-hook', 'push_events': 'yes', 'issues_events': 'yes', 'merge_requests_events': 'yes', 'tag_push_events': 'yes', 'note_events': 'yes', 'job_events': 'yes', 'pipeline_events': 'yes', 'wiki_page_events': 'yes', 'enable_ssl_verification': 'yes', 'state': 'present', 'token': 'secret-token', }) with pytest.raises(AnsibleExitJson) as result: gitlab_hooks.main() second_call = fetch_url_mock.call_args_list[1][1] assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/hooks/1' assert second_call['method'] == 'PUT' assert second_call['data'] == ('{"enable_ssl_verification": true, "issues_events": true, "job_events": true, ' '"merge_requests_events": true, "note_events": true, "pipeline_events": true, "push_events": true, ' '"tag_push_events": true, "token": "secret-token", "url": "https://notification-server.example.com/gitlab-hook", ' '"wiki_page_events": true}') assert result.value.args[0]['changed'] is True
unknown
codeparrot/codeparrot-clean
**Supported operators for device: XLA_CPU_JIT** Operator | Type Constraint ------------------------------------- | --------------- `Abs` | `T={double,float,int32,int64}` `Acos` | `T={complex64,double,float,int32,int64}` `Acosh` | `T={complex64,double,float}` `Add` | `T={complex64,double,float,int32,int64}` `AddN` | `T={complex64,double,float,int32,int64,uint32,uint64}` `AdjustContrastv2` | `AdjustHue` | `AdjustSaturation` | `All` | `Tidx={int32,int64}` `Angle` | `Tout={double,float}`<br>`T={complex64}` `Any` | `Tidx={int32,int64}` `ApproximateEqual` | `T={complex64,double,float,int32,int64,uint32,uint64}` `ArgMax` | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={float}` `ArgMin` | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `Asin` | `T={complex64,double,float,int32,int64}` `Asinh` | `T={complex64,double,float}` `AssignAddVariableOp` | `dtype={complex64,double,float,int32,int64,uint32,uint64}` `AssignSubVariableOp` | `dtype={complex64,double,float,int32,int64,uint32,uint64}` `AssignVariableOp` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `Atan` | `T={complex64,double,float,int32,int64}` `Atan2` | `T={double,float}` `Atanh` | `T={complex64,double,float}` `AvgPool` | `T={double,float}` `AvgPool3D` | `T={double,float}` `AvgPool3DGrad` | `T={double,float}` `AvgPoolGrad` | `T={double,float}` `BatchMatMul` | `T={complex64,double,float,int32}` `BatchToSpace` | `Tidx={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `BatchToSpaceND` | `Tcrops={int32,int64}`<br>`Tblock_shape={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `BiasAdd` | `T={complex64,double,float,int32,int64,uint32,uint64}` `BiasAddGrad` | `T={complex64,double,float,int32,int64,uint32,uint64}` `BiasAddV1` | `T={complex64,double,float,int32,int64,uint32,uint64}` `BitwiseAnd` | `T={int32,int64,uint32,uint64}` `BitwiseOr` | `T={int32,int64,uint32,uint64}` `BroadcastArgs` | `T={int32,int64}` `BroadcastGradientArgs` | `T={int32,int64}` `Cast` | `DstT={bool,complex64,double,float,int32,int64,uint32,uint64}`<br>`SrcT={bool,complex64,double,float,int32,int64,uint32,uint64}` `Ceil` | `T={double,float}` `Cholesky` | `T={double,float}` `Complex` | `Tout={complex64}`<br>`T={double,float}` `ComplexAbs` | `Tout={double,float}`<br>`T={complex64}` `Concat` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `ConcatOffset` | `ConcatV2` | `Tidx={int32}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Conj` | `T={complex64}` `Const` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `ControlTrigger` | `Conv2D` | `T={float}` `Conv2DBackpropFilter` | `T={float}` `Conv2DBackpropInput` | `T={float}` `Conv3D` | `T={double,float}` `Conv3DBackpropFilterV2` | `T={double,float}` `Conv3DBackpropInputV2` | `T={double,float}` `Cos` | `T={complex64,double,float}` `Cosh` | `T={complex64,double,float}` `Cross` | `T={double,float,int32,int64,uint32,uint64}` `Cumprod` | `Tidx={int32,int64}`<br>`T={float}` `Cumsum` | `Tidx={int32,int64}`<br>`T={float}` `DepthToSpace` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `DepthwiseConv2dNative` | `T={double,float}` `DepthwiseConv2dNativeBackpropFilter` | `T={double,float}` `DepthwiseConv2dNativeBackpropInput` | `T={double,float}` `Diag` | `T={complex64,double,float,int32,int64}` `DiagPart` | `T={complex64,double,float,int32,int64}` `Div` | `T={complex64,double,float,int32,int64}` `DynamicStitch` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Elu` | `T={double,float}` `EluGrad` | `T={double,float}` `Equal` | `T={bool,complex64,double,float,int32,int64}` `Exp` | `T={complex64,double,float}` `ExpandDims` | `Tdim={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Expm1` | `T={complex64,double,float}` `ExtractImagePatches` | `T={double,float,int32,int64,uint32,uint64}` `FFT` | `FFT2D` | `FFT3D` | `FakeQuantWithMinMaxArgs` | `FakeQuantWithMinMaxArgsGradient` | `FakeQuantWithMinMaxVars` | `FakeQuantWithMinMaxVarsGradient` | `Fill` | `index_type={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Floor` | `T={double,float}` `FloorDiv` | `T={complex64,double,float,int32,int64}` `FloorMod` | `T={double,float,int32,int64}` `FusedBatchNorm` | `T={float}` `FusedBatchNormGrad` | `T={float}` `FusedBatchNormGradV2` | `U={float}`<br>`T={float}` `FusedBatchNormV2` | `U={float}`<br>`T={float}` `Gather` | `Tindices={int32,int64}`<br>`Tparams={bool,complex64,double,float,int32,int64,uint32,uint64}` `GatherNd` | `Tindices={int32,int64}`<br>`Tparams={bool,complex64,double,float,int32,int64,uint32,uint64}` `GatherV2` | `Taxis={int32,int64}`<br>`Tindices={int32,int64}`<br>`Tparams={bool,complex64,double,float,int32,int64,uint32,uint64}` `Greater` | `T={double,float,int32,int64,uint32,uint64}` `GreaterEqual` | `T={double,float,int32,int64,uint32,uint64}` `HSVToRGB` | `T={double,float}` `IFFT` | `IFFT2D` | `IFFT3D` | `IRFFT` | `IRFFT2D` | `IRFFT3D` | `Identity` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `IdentityN` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Imag` | `Tout={double,float}`<br>`T={complex64}` `Inv` | `T={complex64,double,float,int32,int64}` `Invert` | `T={int32,int64,uint32,uint64}` `InvertPermutation` | `T={int32}` `IsFinite` | `T={double,float}` `IsInf` | `T={double,float}` `IsNan` | `T={double,float}` `L2Loss` | `T={double,float}` `LRN` | `T={float}` `LRNGrad` | `T={float}` `LeftShift` | `T={int32,int64,uint32,uint64}` `Less` | `T={double,float,int32,int64,uint32,uint64}` `LessEqual` | `T={double,float,int32,int64,uint32,uint64}` `LinSpace` | `Tidx={int32,int64}`<br>`T={double,float}` `Log` | `T={complex64,double,float}` `Log1p` | `T={complex64,double,float}` `LogSoftmax` | `T={double,float}` `LogicalAnd` | `LogicalNot` | `LogicalOr` | `MatMul` | `T={complex64,double,float}` `MatrixBandPart` | `Tindex={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `MatrixDiag` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `MatrixDiagPart` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `MatrixSetDiag` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `MatrixTriangularSolve` | `T={complex64,double,float}` `Max` | `Tidx={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `MaxPool` | `T={double,float,int32,int64}` `MaxPool3D` | `T={float}` `MaxPool3DGrad` | `TInput={float}`<br>`T={float}` `MaxPoolGrad` | `T={double,float,int32,int64,uint32,uint64}` `MaxPoolGradGrad` | `T={float}` `MaxPoolGradGradV2` | `T={float}` `MaxPoolGradV2` | `T={double,float,int32,int64,uint32,uint64}` `MaxPoolV2` | `T={double,float,int32,int64}` `Maximum` | `T={double,float,int32,int64}` `Mean` | `Tidx={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `Min` | `Tidx={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `Minimum` | `T={double,float,int32,int64}` `MirrorPad` | `Tpaddings={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Mod` | `T={double,float,int32,int64}` `Mul` | `T={complex64,double,float,int32,int64}` `Multinomial` | `output_dtype={int32,int64}`<br>`T={double,float,int32,int64,uint32,uint64}` `Neg` | `T={complex64,double,float,int32,int64}` `NoOp` | `NotEqual` | `T={bool,complex64,double,float,int32,int64}` `OneHot` | `TI={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `OnesLike` | `T={bool,complex64,double,float,int32,int64}` `Pack` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Pad` | `Tpaddings={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `PadV2` | `Tpaddings={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `ParallelDynamicStitch` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Pow` | `T={complex64,double,float,int32,int64}` `PreventGradient` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Prod` | `Tidx={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `QuantizeAndDequantizeV2` | `T={double,float}` `RFFT` | `RFFT2D` | `RFFT3D` | `RGBToHSV` | `T={double,float}` `RandomStandardNormal` | `dtype={float}` `RandomUniform` | `T={int32,int64}`<br>`dtype={double,float}` `RandomUniformInt` | `T={int32,int64}`<br>`Tout={int32,int64}` `Range` | `Tidx={double,float,int32,int64}` `Rank` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `ReadVariableOp` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `Real` | `Tout={double,float}`<br>`T={complex64}` `RealDiv` | `T={complex64,double,float,int32,int64}` `Reciprocal` | `T={complex64,double,float,int32,int64}` `ReciprocalGrad` | `T={complex64,double,float}` `Relu` | `T={double,float,int32,int64,uint32,uint64}` `Relu6` | `T={double,float,int32,int64,uint32,uint64}` `Relu6Grad` | `T={double,float,int32,int64,uint32,uint64}` `ReluGrad` | `T={double,float,int32,int64,uint32,uint64}` `Reshape` | `Tshape={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `ResizeBilinear` | `T={double,float,int32,int64}` `ResizeBilinearGrad` | `T={double,float}` `ResourceApplyAdagrad` | `T={double,float}` `ResourceApplyAdam` | `T={double,float}` `ResourceApplyFtrl` | `T={double,float}` `ResourceApplyFtrlV2` | `T={double,float}` `ResourceApplyGradientDescent` | `T={double,float}` `ResourceApplyMomentum` | `T={double,float}` `ResourceApplyRMSProp` | `T={double,float}` `ResourceGather` | `Tindices={int32,int64}`<br>`dtype={complex64,double,float,int32,int64,uint32,uint64}` `ResourceStridedSliceAssign` | `Index={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Reverse` | `T={bool,complex64,double,float,int32,int64}` `ReverseSequence` | `Tlen={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `ReverseV2` | `T={bool,complex64,double,float,int32,int64}`<br>`Tidx={int32,int64}` `RightShift` | `T={int32,int64,uint32,uint64}` `Rint` | `T={double,float}` `Round` | `T={complex64,double,float,int32,int64}` `Rsqrt` | `T={complex64,double,float}` `RsqrtGrad` | `T={complex64,double,float}` `ScatterNd` | `Tindices={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Select` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Selu` | `T={double,float}` `SeluGrad` | `T={double,float}` `Shape` | `out_type={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `ShapeN` | `out_type={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Sigmoid` | `T={complex64,double,float}` `SigmoidGrad` | `T={complex64,double,float}` `Sign` | `T={complex64,double,float,int32,int64}` `Sin` | `T={complex64,double,float}` `Sinh` | `T={complex64,double,float}` `Size` | `out_type={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Slice` | `Index={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Snapshot` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Softmax` | `T={double,float}` `SoftmaxCrossEntropyWithLogits` | `T={double,float}` `Softplus` | `T={double,float,int32,int64,uint32,uint64}` `SoftplusGrad` | `T={double,float,int32,int64,uint32,uint64}` `Softsign` | `T={double,float,int32,int64,uint32,uint64}` `SoftsignGrad` | `T={double,float,int32,int64,uint32,uint64}` `SpaceToBatch` | `Tpaddings={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `SpaceToBatchND` | `Tblock_shape={int32,int64}`<br>`Tpaddings={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `SpaceToDepth` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `SparseMatMul` | `Tb={float}`<br>`Ta={float}` `SparseSoftmaxCrossEntropyWithLogits` | `Tlabels={int32,int64}`<br>`T={double,float}` `Split` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `SplitV` | `Tlen={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Sqrt` | `T={complex64,double,float}` `SqrtGrad` | `T={complex64,double,float}` `Square` | `T={complex64,double,float,int32,int64}` `SquaredDifference` | `T={complex64,double,float,int32,int64}` `Squeeze` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `StackCloseV2` | `StackPopV2` | `elem_type={bool,complex64,double,float,int32,int64,uint32,uint64}` `StackPushV2` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `StackV2` | `elem_type={bool,complex64,double,float,int32,int64,uint32,uint64}` `StatelessRandomNormal` | `Tseed={int32}`<br>`T={int32,int64}`<br>`dtype={float}` `StatelessRandomUniform` | `Tseed={int32}`<br>`T={int32,int64}`<br>`dtype={float}` `StopGradient` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `StridedSlice` | `Index={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `StridedSliceGrad` | `Index={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Sub` | `T={complex64,double,float,int32,int64}` `Sum` | `Tidx={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `SymbolicGradient` | `Tout={bool,complex64,double,float,int32,int64,uint32,uint64}`<br>`Tin={bool,complex64,double,float,int32,int64,uint32,uint64}` `Tan` | `T={complex64,double,float,int32,int64}` `Tanh` | `T={complex64,double,float}` `TanhGrad` | `T={complex64,double,float}` `TensorArrayCloseV3` | `TensorArrayConcatV3` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `TensorArrayGatherV3` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `TensorArrayGradV3` | `TensorArrayReadV3` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `TensorArrayScatterV3` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `TensorArraySizeV3` | `TensorArraySplitV3` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `TensorArrayV3` | `dtype={bool,complex64,double,float,int32,int64,uint32,uint64}` `TensorArrayWriteV3` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Tile` | `Tmultiples={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `Transpose` | `Tperm={int32,int64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `TruncateDiv` | `T={complex64,complex128,double,float,half,bfloat16,int8,int16,int32,int64,uint8,uint16,uint32,uint64}` `TruncateMod` | `T={double,float,int32,int64}` `TruncatedNormal` | `T={int32,int64}`<br>`dtype={double,float}` `Unpack` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `UnsortedSegmentSum` | `Tnumsegments={int32,int64}`<br>`Tindices={int32,int64}`<br>`T={complex64,double,float,int32,int64,uint32,uint64}` `VarIsInitializedOp` | `VariableShape` | `out_type={int32,int64}` `XlaWhile` | `T={bool,complex64,double,float,int32,int64,resource,uint32,uint64}` `ZerosLike` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `_Arg` | `T={bool,complex64,double,float,int32,int64,resource,uint32,uint64}` `_ArrayToList` | `out_types={bool,complex64,double,float,int32,int64,uint32,uint64}`<br>`T={bool,complex64,double,float,int32,int64,uint32,uint64}` `_ListToArray` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}`<br>`Tin={bool,complex64,double,float,int32,int64,uint32,uint64}` `_Retval` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `_XLARecv` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` `_XLASend` | `T={bool,complex64,double,float,int32,int64,uint32,uint64}` To regenerate this table, run: ```shell bazel run -c opt -- tensorflow/compiler/tf2xla:tf2xla_supported_ops --device=XLA_CPU_JIT ```
unknown
github
https://github.com/tensorflow/tensorflow
tensorflow/compiler/tf2xla/g3doc/cpu_supported_ops.md
# -*- coding: utf-8 -*- # # Modifcations Copyright (C) 2009 John Hampton <pacopablo@pacopablo.com> # # Original code from Trac trunk r8199 # http://svn.edgewall.org/reops/trac/trunk # # Moved from core.py # # Original Copyright and License: # Copyright (C) 2003-2009 Edgewall Software # Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com> # Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. # # Author: Jonas Borgström <jonas@edgewall.com> # Christopher Lenz <cmlenz@gmx.de> __all__ = [ 'DustbowlError', 'ConfigurationError', 'ConsoleObjectError', ] class DustbowlError(Exception): """Exception base class for errors in Gartersnake.""" title = 'Gartersnake Error' def __init__(self, message, title=None, show_traceback=False): Exception.__init__(self, message) self.message = message if title: self.title = title self.show_traceback = show_traceback def __unicode__(self): return unicode(self.message) class ConfigurationError(DustbowlError): """Exception raised when a value in the configuration file is not valid.""" title = 'Configuration Error' class ConsoleObjectError(Exception): """Raised when unable to assign a console object. This may be due to the console object already existing, or the namespace not being able to support the object. """ def __init__(self, key, is_ns=False, cur_ns=None): if not is_ns or (is_ns and not cur_ns): self.msg = "The key >>%s<< already exists in the console." % key else: self.msg = "The namespace %s is not an object that can be " \ "assigned methods and other namespaces. Can not " \ "add object: %s" % ('.'.join(cur_ns), key) self.key = key self.is_ns = is_ns self.cur_ns = cur_ns or [] def __repr__(self): return "<ConsoleObjectError(key=%s, is_ns=%s, cur_ns=%s)>" \ % (str(self.key), str(self.is_ns), str(self.cur_ns))
unknown
codeparrot/codeparrot-clean
""" pydevd_vars deals with variables: resolution/conversion to XML. """ import pickle from pydevd_constants import * #@UnusedWildImport from types import * #@UnusedWildImport from pydevd_custom_frames import getCustomFrame from pydevd_xml import * from _pydev_imps import _pydev_thread try: from StringIO import StringIO except ImportError: from io import StringIO import sys #@Reimport import _pydev_threading as threading import traceback import pydevd_save_locals from pydev_imports import Exec, quote, execfile from pydevd_utils import to_string try: import types frame_type = types.FrameType except: frame_type = type(sys._getframe()) #-------------------------------------------------------------------------- defining true and false for earlier versions try: __setFalse = False except: import __builtin__ setattr(__builtin__, 'True', 1) setattr(__builtin__, 'False', 0) #------------------------------------------------------------------------------------------------------ class for errors class VariableError(RuntimeError):pass class FrameNotFoundError(RuntimeError):pass def iterFrames(initialFrame): '''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)''' #cannot use yield frames = [] while initialFrame is not None: frames.append(initialFrame) initialFrame = initialFrame.f_back return frames def dumpFrames(thread_id): sys.stdout.write('dumping frames\n') if thread_id != GetThreadId(threading.currentThread()): raise VariableError("findFrame: must execute on same thread") curFrame = GetFrame() for frame in iterFrames(curFrame): sys.stdout.write('%s\n' % pickle.dumps(frame)) #=============================================================================== # AdditionalFramesContainer #=============================================================================== class AdditionalFramesContainer: lock = _pydev_thread.allocate_lock() additional_frames = {} #dict of dicts def addAdditionalFrameById(thread_id, frames_by_id): AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id def removeAdditionalFrameById(thread_id): del AdditionalFramesContainer.additional_frames[thread_id] def findFrame(thread_id, frame_id): """ returns a frame on the thread that has a given frame_id """ try: curr_thread_id = GetThreadId(threading.currentThread()) if thread_id != curr_thread_id : try: return getCustomFrame(thread_id, frame_id) #I.e.: thread_id could be a stackless frame id + thread_id. except: pass raise VariableError("findFrame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id)) lookingFor = int(frame_id) if AdditionalFramesContainer.additional_frames: if DictContains(AdditionalFramesContainer.additional_frames, thread_id): frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor) if frame is not None: return frame curFrame = GetFrame() if frame_id == "*": return curFrame # any frame is specified with "*" frameFound = None for frame in iterFrames(curFrame): if lookingFor == id(frame): frameFound = frame del frame break del frame #Important: python can hold a reference to the frame from the current context #if an exception is raised, so, if we don't explicitly add those deletes #we might have those variables living much more than we'd want to. #I.e.: sys.exc_info holding reference to frame that raises exception (so, other places #need to call sys.exc_clear()) del curFrame if frameFound is None: msgFrames = '' i = 0 for frame in iterFrames(GetFrame()): i += 1 msgFrames += str(id(frame)) if i % 5 == 0: msgFrames += '\n' else: msgFrames += ' - ' errMsg = '''findFrame: frame not found. Looking for thread_id:%s, frame_id:%s Current thread_id:%s, available frames: %s\n ''' % (thread_id, lookingFor, curr_thread_id, msgFrames) sys.stderr.write(errMsg) return None return frameFound except: import traceback traceback.print_exc() return None def getVariable(thread_id, frame_id, scope, attrs): """ returns the value of a variable :scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME BY_ID means we'll traverse the list of all objects alive to get the object. :attrs: after reaching the proper scope, we have to get the attributes until we find the proper location (i.e.: obj\tattr1\tattr2) :note: when BY_ID is used, the frame_id is considered the id of the object to find and not the frame (as we don't care about the frame in this case). """ if scope == 'BY_ID': if thread_id != GetThreadId(threading.currentThread()) : raise VariableError("getVariable: must execute on same thread") try: import gc objects = gc.get_objects() except: pass #Not all python variants have it. else: frame_id = int(frame_id) for var in objects: if id(var) == frame_id: if attrs is not None: attrList = attrs.split('\t') for k in attrList: _type, _typeName, resolver = getType(var) var = resolver.resolve(var, k) return var #If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected). sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,)) return None frame = findFrame(thread_id, frame_id) if frame is None: return {} if attrs is not None: attrList = attrs.split('\t') else: attrList = [] if scope == 'EXPRESSION': for count in xrange(len(attrList)): if count == 0: # An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression var = evaluateExpression(thread_id, frame_id, attrList[count], False) else: _type, _typeName, resolver = getType(var) var = resolver.resolve(var, attrList[count]) else: if scope == "GLOBAL": var = frame.f_globals del attrList[0] # globals are special, and they get a single dummy unused attribute else: # in a frame access both locals and globals as Python does var = {} var.update(frame.f_globals) var.update(frame.f_locals) for k in attrList: _type, _typeName, resolver = getType(var) var = resolver.resolve(var, k) return var def resolveCompoundVariable(thread_id, frame_id, scope, attrs): """ returns the value of the compound variable as a dictionary""" var = getVariable(thread_id, frame_id, scope, attrs) try: _type, _typeName, resolver = getType(var) return resolver.getDictionary(var) except: sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % ( thread_id, frame_id, scope, attrs,)) traceback.print_exc() def resolveVar(var, attrs): attrList = attrs.split('\t') for k in attrList: type, _typeName, resolver = getType(var) var = resolver.resolve(var, k) try: type, _typeName, resolver = getType(var) return resolver.getDictionary(var) except: traceback.print_exc() def customOperation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name): """ We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var. code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed. operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint) """ expressionValue = getVariable(thread_id, frame_id, scope, attrs) try: namespace = {'__name__': '<customOperation>'} if style == "EXECFILE": namespace['__file__'] = code_or_file execfile(code_or_file, namespace, namespace) else: # style == EXEC namespace['__file__'] = '<customOperationCode>' Exec(code_or_file, namespace, namespace) return str(namespace[operation_fn_name](expressionValue)) except: traceback.print_exc() def evalInContext(expression, globals, locals): result = None try: result = eval(expression, globals, locals) except Exception: s = StringIO() traceback.print_exc(file=s) result = s.getvalue() try: try: etype, value, tb = sys.exc_info() result = value finally: etype = value = tb = None except: pass result = ExceptionOnEvaluate(result) # Ok, we have the initial error message, but let's see if we're dealing with a name mangling error... try: if '__' in expression: # Try to handle '__' name mangling... split = expression.split('.') curr = locals.get(split[0]) for entry in split[1:]: if entry.startswith('__') and not hasattr(curr, entry): entry = '_%s%s' % (curr.__class__.__name__, entry) curr = getattr(curr, entry) result = curr except: pass return result def evaluateExpression(thread_id, frame_id, expression, doExec): '''returns the result of the evaluated expression @param doExec: determines if we should do an exec or an eval ''' frame = findFrame(thread_id, frame_id) if frame is None: return #Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 #(Names not resolved in generator expression in method) #See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals try: expression = str(expression.replace('@LINE@', '\n')) if doExec: try: #try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and #it will have whatever the user actually did) compiled = compile(expression, '<string>', 'eval') except: Exec(expression, updated_globals, frame.f_locals) pydevd_save_locals.save_locals(frame) else: result = eval(compiled, updated_globals, frame.f_locals) if result is not None: #Only print if it's not None (as python does) sys.stdout.write('%s\n' % (result,)) return else: return evalInContext(expression, updated_globals, frame.f_locals) finally: #Should not be kept alive if an exception happens and this frame is kept in the stack. del updated_globals del frame def changeAttrExpression(thread_id, frame_id, attr, expression, dbg): '''Changes some attribute in a given frame. ''' frame = findFrame(thread_id, frame_id) if frame is None: return try: expression = expression.replace('@LINE@', '\n') if dbg.plugin: result = dbg.plugin.change_variable(frame, attr, expression) if result: return result if attr[:7] == "Globals": attr = attr[8:] if attr in frame.f_globals: frame.f_globals[attr] = eval(expression, frame.f_globals, frame.f_locals) return frame.f_globals[attr] else: if pydevd_save_locals.is_save_locals_available(): frame.f_locals[attr] = eval(expression, frame.f_globals, frame.f_locals) pydevd_save_locals.save_locals(frame) return frame.f_locals[attr] #default way (only works for changing it in the topmost frame) result = eval(expression, frame.f_globals, frame.f_locals) Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals) return result except Exception: traceback.print_exc() MAXIMUM_ARRAY_SIZE = 100 MAX_SLICE_SIZE = 1000 def array_to_xml(array, roffset, coffset, rows, cols, format): xml = "" rows = min(rows, MAXIMUM_ARRAY_SIZE) cols = min(cols, MAXIMUM_ARRAY_SIZE) #there is no obvious rule for slicing (at least 5 choices) if len(array) == 1 and (rows > 1 or cols > 1): array = array[0] if array.size > len(array): array = array[roffset:, coffset:] rows = min(rows, len(array)) cols = min(cols, len(array[0])) if len(array) == 1: array = array[0] elif array.size == len(array): if roffset == 0 and rows == 1: array = array[coffset:] cols = min(cols, len(array)) elif coffset == 0 and cols == 1: array = array[roffset:] rows = min(rows, len(array)) xml += "<arraydata rows=\"%s\" cols=\"%s\"/>" % (rows, cols) for row in range(rows): xml += "<row index=\"%s\"/>" % to_string(row) for col in range(cols): value = array if rows == 1 or cols == 1: if rows == 1 and cols == 1: value = array[0] else: if rows == 1: dim = col else: dim = row value = array[dim] if "ndarray" in str(type(value)): value = value[0] else: value = array[row][col] value = format % value xml += varToXML(value, '') return xml def array_to_meta_xml(array, name, format): type = array.dtype.kind slice = name l = len(array.shape) # initial load, compute slice if format == '%': if l > 2: slice += '[0]' * (l - 2) for r in range(l - 2): array = array[0] if type == 'f': format = '.5f' elif type == 'i' or type == 'u': format = 'd' else: format = 's' else: format = format.replace('%', '') l = len(array.shape) reslice = "" if l > 2: raise Exception("%s has more than 2 dimensions." % slice) elif l == 1: # special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim # http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns # explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1 # we use kind of a hack - get information about memory from C_CONTIGUOUS is_row = array.flags['C_CONTIGUOUS'] if is_row: rows = 1 cols = min(len(array), MAX_SLICE_SIZE) if cols < len(array): reslice = '[0:%s]' % (cols) array = array[0:cols] else: cols = 1 rows = min(len(array), MAX_SLICE_SIZE) if rows < len(array): reslice = '[0:%s]' % (rows) array = array[0:rows] elif l == 2: rows = min(array.shape[-2], MAX_SLICE_SIZE) cols = min(array.shape[-1], MAX_SLICE_SIZE) if cols < array.shape[-1] or rows < array.shape[-2]: reslice = '[0:%s, 0:%s]' % (rows, cols) array = array[0:rows, 0:cols] #avoid slice duplication if not slice.endswith(reslice): slice += reslice bounds = (0, 0) if type in "biufc": bounds = (array.min(), array.max()) xml = '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \ (slice, rows, cols, format, type, bounds[1], bounds[0]) return array, xml, rows, cols, format
unknown
codeparrot/codeparrot-clean
# -*- encoding: utf-8 -*- ############################################################################## # # Currency rate date check module for OpenERP # Copyright (C) 2012-2013 Akretion (http://www.akretion.com) # @author Alexis de Lattre <alexis.delattre@akretion.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, _ from openerp.exceptions import Warning # Here are some explainations about the design of this module. # In odoo/openerp/addons/base/res/res_currency.py : # compute() -> _get_conversion_rate() # -> _current_rate() -> _get_current_rate() # The date used for the rate is the one in the context # Which one of the 3 functions should we inherit ? Good question... # It's probably better to inherit the lowest level function, # i.e. _get_current_rate() # Advantage : by inheriting the lowest level function, # we can be sure that the check # always apply, even for scenarios where we # read the field "rate" of the obj currency # => that's the solution I implement in the code below class ResCurrency(models.Model): _inherit = 'res.currency' def _get_current_rate( self, cr, uid, ids, raise_on_no_rate=True, context=None): if context is None: context = {} # We don't check if we don't have 'date' in context, which is # a pb because it means Odoo can do a rate conversion # on today's date with an old rate, but otherwize it would raise # too often, for example it would raise entering the # Currencies menu entry ! if context.get('date') and not context.get('disable_rate_date_check'): date = context.get('date') for currency_id in ids: # We could get the company from the currency, but it's not a # 'required' field, so we should probably continue to get it # from the user, shouldn't we ? user = self.pool['res.users'].browse(cr, uid, uid, context=context) # if it's the company currency, don't do anything # (there is just one old rate at 1.0) if user.company_id.currency_id.id == currency_id: continue else: # now we do the real work ! cr.execute( 'SELECT rate, name FROM res_currency_rate ' 'WHERE currency_id = %s ' 'AND name <= %s ' 'ORDER BY name desc LIMIT 1', (currency_id, date)) if cr.rowcount: rate_date = cr.fetchone()[1] rate_date_dt = fields.Datetime.from_string(rate_date) date_dt = fields.Datetime.from_string(date) max_delta = user.company_id.currency_rate_max_delta if (date_dt - rate_date_dt).days > max_delta: currency = self.browse( cr, uid, currency_id, context=context) raise Warning( _('You are requesting a rate conversion on %s ' 'for currency %s but the nearest ' 'rate before that date is ' 'dated %s and the maximum currency ' 'rate time delta for ' 'your company is %s days') % (date, currency.name, rate_date, max_delta)) # Now we call the regular function from the "base" module return super(ResCurrency, self)._get_current_rate( cr, uid, ids, raise_on_no_rate=raise_on_no_rate, context=context)
unknown
codeparrot/codeparrot-clean
"""Test functions for the sparse.linalg._expm_multiply module """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_allclose, assert_, assert_equal, suppress_warnings) from scipy.sparse import SparseEfficiencyWarning import scipy.linalg from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max, _onenormest_matrix_power, expm_multiply, _expm_multiply_simple, _expm_multiply_interval) def less_than_or_close(a, b): return np.allclose(a, b) or (a < b) class TestExpmActionSimple(object): """ These tests do not consider the case of multiple time steps in one call. """ def test_theta_monotonicity(self): pairs = sorted(_theta.items()) for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]): assert_(theta_a < theta_b) def test_p_max_default(self): m_max = 55 expected_p_max = 8 observed_p_max = _compute_p_max(m_max) assert_equal(observed_p_max, expected_p_max) def test_p_max_range(self): for m_max in range(1, 55+1): p_max = _compute_p_max(m_max) assert_(p_max*(p_max - 1) <= m_max + 1) p_too_big = p_max + 1 assert_(p_too_big*(p_too_big - 1) > m_max + 1) def test_onenormest_matrix_power(self): np.random.seed(1234) n = 40 nsamples = 10 for i in range(nsamples): A = scipy.linalg.inv(np.random.randn(n, n)) for p in range(4): if not p: M = np.identity(n) else: M = np.dot(M, A) estimated = _onenormest_matrix_power(A, p) exact = np.linalg.norm(M, 1) assert_(less_than_or_close(estimated, exact)) assert_(less_than_or_close(exact, 3*estimated)) def test_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): A = scipy.linalg.inv(np.random.randn(n, n)) B = np.random.randn(n, k) observed = expm_multiply(A, B) expected = np.dot(scipy.linalg.expm(A), B) assert_allclose(observed, expected) def test_matrix_vector_multiply(self): np.random.seed(1234) n = 40 nsamples = 10 for i in range(nsamples): A = scipy.linalg.inv(np.random.randn(n, n)) v = np.random.randn(n) observed = expm_multiply(A, v) expected = np.dot(scipy.linalg.expm(A), v) assert_allclose(observed, expected) def test_scaled_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): for t in (0.2, 1.0, 1.5): with np.errstate(invalid='ignore'): A = scipy.linalg.inv(np.random.randn(n, n)) B = np.random.randn(n, k) observed = _expm_multiply_simple(A, B, t=t) expected = np.dot(scipy.linalg.expm(t*A), B) assert_allclose(observed, expected) def test_scaled_expm_multiply_single_timepoint(self): np.random.seed(1234) t = 0.1 n = 5 k = 2 A = np.random.randn(n, n) B = np.random.randn(n, k) observed = _expm_multiply_simple(A, B, t=t) expected = scipy.linalg.expm(t*A).dot(B) assert_allclose(observed, expected) def test_sparse_expm_multiply(self): np.random.seed(1234) n = 40 k = 3 nsamples = 10 for i in range(nsamples): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) observed = expm_multiply(A, B) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") expected = scipy.linalg.expm(A).dot(B) assert_allclose(observed, expected) def test_complex(self): A = np.array([ [1j, 1j], [0, 1j]], dtype=complex) B = np.array([1j, 1j]) observed = expm_multiply(A, B) expected = np.array([ 1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)), 1j * np.exp(1j)], dtype=complex) assert_allclose(observed, expected) class TestExpmActionInterval(object): def test_sparse_expm_multiply_interval(self): np.random.seed(1234) start = 0.1 stop = 3.2 n = 40 k = 3 endpoint = True for num in (14, 13, 2): A = scipy.sparse.rand(n, n, density=0.05) B = np.random.randn(n, k) v = np.random.randn(n) for target in (B, v): X = expm_multiply(A, target, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(target)) def test_expm_multiply_interval_vector(self): np.random.seed(1234) start = 0.1 stop = 3.2 endpoint = True for num in (14, 13, 2): for n in (1, 2, 5, 20, 40): A = scipy.linalg.inv(np.random.randn(n, n)) v = np.random.randn(n) X = expm_multiply(A, v, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(v)) def test_expm_multiply_interval_matrix(self): np.random.seed(1234) start = 0.1 stop = 3.2 endpoint = True for num in (14, 13, 2): for n in (1, 2, 5, 20, 40): for k in (1, 2): A = scipy.linalg.inv(np.random.randn(n, n)) B = np.random.randn(n, k) X = expm_multiply(A, B, start=start, stop=stop, num=num, endpoint=endpoint) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(B)) def test_sparse_expm_multiply_interval_dtypes(self): # Test A & B int A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int) B = np.ones(5, dtype=int) Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr') assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) # Test A complex, B int A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex) B = np.ones(5, dtype=int) Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr') assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) # Test A int, B complex A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int) B = np.full(5, 1j, dtype=complex) Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr') assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) def test_expm_multiply_interval_status_0(self): self._help_test_specific_expm_interval_status(0) def test_expm_multiply_interval_status_1(self): self._help_test_specific_expm_interval_status(1) def test_expm_multiply_interval_status_2(self): self._help_test_specific_expm_interval_status(2) def _help_test_specific_expm_interval_status(self, target_status): np.random.seed(1234) start = 0.1 stop = 3.2 num = 13 endpoint = True n = 5 k = 2 nrepeats = 10 nsuccesses = 0 for num in [14, 13, 2] * nrepeats: A = np.random.randn(n, n) B = np.random.randn(n, k) status = _expm_multiply_interval(A, B, start=start, stop=stop, num=num, endpoint=endpoint, status_only=True) if status == target_status: X, status = _expm_multiply_interval(A, B, start=start, stop=stop, num=num, endpoint=endpoint, status_only=False) assert_equal(X.shape, (num, n, k)) samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint) for solution, t in zip(X, samples): assert_allclose(solution, scipy.linalg.expm(t*A).dot(B)) nsuccesses += 1 if not nsuccesses: msg = 'failed to find a status-' + str(target_status) + ' interval' raise Exception(msg)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ The astropy.time package provides functionality for manipulating times and dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI, UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in astronomy. """ import os import copy import enum import operator import threading from datetime import datetime, date, timedelta from time import strftime from warnings import warn import numpy as np import erfa from astropy import units as u, constants as const from astropy.units import UnitConversionError from astropy.utils import ShapedLikeNDArray from astropy.utils.compat.misc import override__dir__ from astropy.utils.data_info import MixinInfo, data_info_factory from astropy.utils.exceptions import AstropyWarning from .utils import day_frac from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS, TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime) # Import TimeFromEpoch to avoid breaking code that followed the old example of # making a custom timescale in the documentation. from .formats import TimeFromEpoch # noqa from astropy.extern import _strptime __all__ = ['TimeBase', 'Time', 'TimeDelta', 'TimeInfo', 'update_leap_seconds', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES', 'ScaleValueError', 'OperandTypeError'] STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') LOCAL_SCALES = ('local',) TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales) TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'), ('tai', 'tcg'): ('tt',), ('tai', 'ut1'): ('utc',), ('tai', 'tdb'): ('tt',), ('tcb', 'tcg'): ('tdb', 'tt'), ('tcb', 'tt'): ('tdb',), ('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'), ('tcb', 'utc'): ('tdb', 'tt', 'tai'), ('tcg', 'tdb'): ('tt',), ('tcg', 'ut1'): ('tt', 'tai', 'utc'), ('tcg', 'utc'): ('tt', 'tai'), ('tdb', 'ut1'): ('tt', 'tai', 'utc'), ('tdb', 'utc'): ('tt', 'tai'), ('tt', 'ut1'): ('tai', 'utc'), ('tt', 'utc'): ('tai',), } GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg') BARYCENTRIC_SCALES = ('tcb', 'tdb') ROTATIONAL_SCALES = ('ut1',) TIME_DELTA_TYPES = dict((scale, scales) for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES, ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales) TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES # For time scale changes, we need L_G and L_B, which are stored in erfam.h as # /* L_G = 1 - d(TT)/d(TCG) */ # define ERFA_ELG (6.969290134e-10) # /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */ # define ERFA_ELB (1.550519768e-8) # These are exposed in erfa as erfa.ELG and erfa.ELB. # Implied: d(TT)/d(TCG) = 1-L_G # and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G) # scale offsets as second = first + first * scale_offset[(first,second)] SCALE_OFFSETS = {('tt', 'tai'): None, ('tai', 'tt'): None, ('tcg', 'tt'): -erfa.ELG, ('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG), ('tcg', 'tai'): -erfa.ELG, ('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG), ('tcb', 'tdb'): -erfa.ELB, ('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)} # triple-level dictionary, yay! SIDEREAL_TIME_MODELS = { 'mean': { 'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')}, 'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')}, 'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}}, 'apparent': { 'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')}, 'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')}, 'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)}, 'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}} class _LeapSecondsCheck(enum.Enum): NOT_STARTED = 0 # No thread has reached the check RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held) DONE = 2 # update_leap_seconds has completed _LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED _LEAP_SECONDS_LOCK = threading.RLock() class TimeInfo(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attr_names = MixinInfo.attr_names | {'serialize_method'} _supports_indexing = True # The usual tuple of attributes needed for serialization is replaced # by a property, since Time can be serialized different ways. _represent_as_dict_extra_attrs = ('format', 'scale', 'precision', 'in_subfmt', 'out_subfmt', 'location', '_delta_ut1_utc', '_delta_tdb_tt') # When serializing, write out the `value` attribute using the column name. _represent_as_dict_primary_data = 'value' mask_val = np.ma.masked @property def _represent_as_dict_attrs(self): method = self.serialize_method[self._serialize_context] if method == 'formatted_value': out = ('value',) elif method == 'jd1_jd2': out = ('jd1', 'jd2') else: raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'") return out + self._represent_as_dict_extra_attrs def __init__(self, bound=False): super().__init__(bound) # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: # Specify how to serialize this object depending on context. # If ``True`` for a context, then use formatted ``value`` attribute # (e.g. the ISO time string). If ``False`` then use float jd1 and jd2. self.serialize_method = {'fits': 'jd1_jd2', 'ecsv': 'formatted_value', 'hdf5': 'jd1_jd2', 'yaml': 'jd1_jd2', None: 'jd1_jd2'} def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. Returns ------- arrays : list of ndarray """ parent = self._parent jd_approx = parent.jd jd_remainder = (parent - parent.__class__(jd_approx, format='jd')).jd return [jd_approx, jd_remainder] @property def unit(self): return None info_summary_stats = staticmethod( data_info_factory(names=MixinInfo._stats, funcs=[getattr(np, stat) for stat in MixinInfo._stats])) # When Time has mean, std, min, max methods: # funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats]) def _construct_from_dict_base(self, map): if 'jd1' in map and 'jd2' in map: # Initialize as JD but revert to desired format and out_subfmt (if needed) format = map.pop('format') out_subfmt = map.pop('out_subfmt', None) map['format'] = 'jd' map['val'] = map.pop('jd1') map['val2'] = map.pop('jd2') out = self._parent_cls(**map) out.format = format if out_subfmt is not None: out.out_subfmt = out_subfmt else: map['val'] = map.pop('value') out = self._parent_cls(**map) return out def _construct_from_dict(self, map): delta_ut1_utc = map.pop('_delta_ut1_utc', None) delta_tdb_tt = map.pop('_delta_tdb_tt', None) out = self._construct_from_dict_base(map) if delta_ut1_utc is not None: out._delta_ut1_utc = delta_ut1_utc if delta_tdb_tt is not None: out._delta_tdb_tt = delta_tdb_tt return out def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new Time instance which is consistent with the input Time objects ``cols`` and has ``length`` rows. This is intended for creating an empty Time instance whose elements can be set in-place for table operations like join or vstack. It checks that the input locations and attributes are consistent. This is used when a Time object is used as a mixin column in an astropy Table. Parameters ---------- cols : list List of input columns (Time objects) length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Time (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'description')) attrs.pop('dtype') # Not relevant for Time col0 = cols[0] # Check that location is consistent for all Time objects for col in cols[1:]: # This is the method used by __setitem__ to ensure that the right side # has a consistent location (and coerce data if necessary, but that does # not happen in this case since `col` is already a Time object). If this # passes then any subsequent table operations via setitem will work. try: col0._make_value_equivalent(slice(None), col) except ValueError: raise ValueError('input columns have inconsistent locations') # Make a new Time object with the desired shape and attributes shape = (length,) + attrs.pop('shape') jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA jd1 = np.full(shape, jd2000, dtype='f8') jd2 = np.zeros(shape, dtype='f8') tm_attrs = {attr: getattr(col0, attr) for attr in ('scale', 'location', 'precision', 'in_subfmt', 'out_subfmt')} out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs) out.format = col0.format # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class TimeDeltaInfo(TimeInfo): _represent_as_dict_extra_attrs = ('format', 'scale') def _construct_from_dict(self, map): return self._construct_from_dict_base(map) def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new TimeDelta instance which is consistent with the input Time objects ``cols`` and has ``length`` rows. This is intended for creating an empty Time instance whose elements can be set in-place for table operations like join or vstack. It checks that the input locations and attributes are consistent. This is used when a Time object is used as a mixin column in an astropy Table. Parameters ---------- cols : list List of input columns (Time objects) length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Time (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'description')) attrs.pop('dtype') # Not relevant for Time col0 = cols[0] # Make a new Time object with the desired shape and attributes shape = (length,) + attrs.pop('shape') jd1 = np.zeros(shape, dtype='f8') jd2 = np.zeros(shape, dtype='f8') out = self._parent_cls(jd1, jd2, format='jd', scale=col0.scale) out.format = col0.format # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class TimeBase(ShapedLikeNDArray): """Base time class from which Time and TimeDelta inherit.""" # Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__) # gets called over the __mul__ of Numpy arrays. __array_priority__ = 20000 # Declare that Time can be used as a Table column by defining the # attribute where column attributes will be stored. _astropy_column_attrs = None def __getnewargs__(self): return (self._time,) def _init_from_vals(self, val, val2, format, scale, copy, precision=None, in_subfmt=None, out_subfmt=None): """ Set the internal _format, scale, and _time attrs from user inputs. This handles coercion into the correct shapes and some basic input validation. """ if precision is None: precision = 3 if in_subfmt is None: in_subfmt = '*' if out_subfmt is None: out_subfmt = '*' # Coerce val into an array val = _make_array(val, copy) # If val2 is not None, ensure consistency if val2 is not None: val2 = _make_array(val2, copy) try: np.broadcast(val, val2) except ValueError: raise ValueError('Input val and val2 have inconsistent shape; ' 'they cannot be broadcast together.') if scale is not None: if not (isinstance(scale, str) and scale.lower() in self.SCALES): raise ScaleValueError("Scale {!r} is not in the allowed scales " "{}".format(scale, sorted(self.SCALES))) # If either of the input val, val2 are masked arrays then # find the masked elements and fill them. mask, val, val2 = _check_for_masked_and_fill(val, val2) # Parse / convert input values into internal jd1, jd2 based on format self._time = self._get_time_fmt(val, val2, format, scale, precision, in_subfmt, out_subfmt) self._format = self._time.name # Hack from #9969 to allow passing the location value that has been # collected by the TimeAstropyTime format class up to the Time level. # TODO: find a nicer way. if hasattr(self._time, '_location'): self.location = self._time._location del self._time._location # If any inputs were masked then masked jd2 accordingly. From above # routine ``mask`` must be either Python bool False or an bool ndarray # with shape broadcastable to jd2. if mask is not False: mask = np.broadcast_to(mask, self._time.jd2.shape) self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01 self._time.jd2[mask] = np.nan def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt): """ Given the supplied val, val2, format and scale try to instantiate the corresponding TimeFormat class to convert the input values into the internal jd1 and jd2. If format is `None` and the input is a string-type or object array then guess available formats and stop when one matches. """ if (format is None and (val.dtype.kind in ('S', 'U', 'O', 'M') or val.dtype.names)): # Input is a string, object, datetime, or a table-like ndarray # (structured array, recarray). These input types can be # uniquely identified by the format classes. formats = [(name, cls) for name, cls in self.FORMATS.items() if issubclass(cls, TimeUnique)] # AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry, # but try to guess it at the end. formats.append(('astropy_time', TimeAstropyTime)) elif not (isinstance(format, str) and format.lower() in self.FORMATS): if format is None: raise ValueError("No time format was given, and the input is " "not unique") else: raise ValueError("Format {!r} is not one of the allowed " "formats {}".format(format, sorted(self.FORMATS))) else: formats = [(format, self.FORMATS[format])] assert formats problems = {} for name, cls in formats: try: return cls(val, val2, scale, precision, in_subfmt, out_subfmt) except UnitConversionError: raise except (ValueError, TypeError) as err: # If ``format`` specified then there is only one possibility, so raise # immediately and include the upstream exception message to make it # easier for user to see what is wrong. if len(formats) == 1: raise ValueError( f'Input values did not match the format class {format}:' + os.linesep + f'{err.__class__.__name__}: {err}' ) from err else: problems[name] = err else: raise ValueError(f'Input values did not match any of the formats ' f'where the format keyword is optional: ' f'{problems}') from problems[formats[0][0]] @property def writeable(self): return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable @writeable.setter def writeable(self, value): self._time.jd1.flags.writeable = value self._time.jd2.flags.writeable = value @property def format(self): """ Get or set time format. The format defines the way times are represented when accessed via the ``.value`` attribute. By default it is the same as the format used for initializing the `Time` instance, but it can be set to any other value that could be used for initialization. These can be listed with:: >>> list(Time.FORMATS) ['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date', 'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] """ return self._format @format.setter def format(self, format): """Set time format""" if format not in self.FORMATS: raise ValueError(f'format must be one of {list(self.FORMATS)}') format_cls = self.FORMATS[format] # Get the new TimeFormat object to contain time in new format. Possibly # coerce in/out_subfmt to '*' (default) if existing subfmt values are # not valid in the new format. self._time = format_cls( self._time.jd1, self._time.jd2, self._time._scale, self.precision, in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt), out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt), from_jd=True) self._format = format def __repr__(self): return ("<{} object: scale='{}' format='{}' value={}>" .format(self.__class__.__name__, self.scale, self.format, getattr(self, self.format))) def __str__(self): return str(getattr(self, self.format)) def __hash__(self): try: loc = getattr(self, 'location', None) if loc is not None: loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m) return hash((self.jd1, self.jd2, self.scale, loc)) except TypeError: if self.ndim != 0: reason = '(must be scalar)' elif self.masked: reason = '(value is masked)' else: raise raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}") @property def scale(self): """Time scale""" return self._time.scale def _set_scale(self, scale): """ This is the key routine that actually does time scale conversions. This is not public and not connected to the read-only scale property. """ if scale == self.scale: return if scale not in self.SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(self.SCALES))) # Determine the chain of scale transformations to get from the current # scale to the new scale. MULTI_HOPS contains a dict of all # transformations (xforms) that require intermediate xforms. # The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order. xform = (self.scale, scale) xform_sort = tuple(sorted(xform)) multi = MULTI_HOPS.get(xform_sort, ()) xforms = xform_sort[:1] + multi + xform_sort[-1:] # If we made the reverse xform then reverse it now. if xform_sort != xform: xforms = tuple(reversed(xforms)) # Transform the jd1,2 pairs through the chain of scale xforms. jd1, jd2 = self._time.jd1, self._time.jd2_filled for sys1, sys2 in zip(xforms[:-1], xforms[1:]): # Some xforms require an additional delta_ argument that is # provided through Time methods. These values may be supplied by # the user or computed based on available approximations. The # get_delta_ methods are available for only one combination of # sys1, sys2 though the property applies for both xform directions. args = [jd1, jd2] for sys12 in ((sys1, sys2), (sys2, sys1)): dt_method = '_get_delta_{}_{}'.format(*sys12) try: get_dt = getattr(self, dt_method) except AttributeError: pass else: args.append(get_dt(jd1, jd2)) break conv_func = getattr(erfa, sys1 + sys2) jd1, jd2 = conv_func(*args) jd1, jd2 = day_frac(jd1, jd2) if self.masked: jd2[self.mask] = np.nan self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision, self.in_subfmt, self.out_subfmt, from_jd=True) @property def precision(self): """ Decimal precision when outputting seconds as floating point (int value between 0 and 9 inclusive). """ return self._time.precision @precision.setter def precision(self, val): del self.cache if not isinstance(val, int) or val < 0 or val > 9: raise ValueError('precision attribute must be an int between ' '0 and 9') self._time.precision = val @property def in_subfmt(self): """ Unix wildcard pattern to select subformats for parsing string input times. """ return self._time.in_subfmt @in_subfmt.setter def in_subfmt(self, val): self._time.in_subfmt = val del self.cache @property def out_subfmt(self): """ Unix wildcard pattern to select subformats for outputting times. """ return self._time.out_subfmt @out_subfmt.setter def out_subfmt(self, val): # Setting the out_subfmt property here does validation of ``val`` self._time.out_subfmt = val del self.cache @property def shape(self): """The shape of the time instances. Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a tuple. Note that if different instances share some but not all underlying data, setting the shape of one instance can make the other instance unusable. Hence, it is strongly recommended to get new, reshaped instances with the ``reshape`` method. Raises ------ ValueError If the new shape has the wrong total number of elements. AttributeError If the shape of the ``jd1``, ``jd2``, ``location``, ``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed without the arrays being copied. For these cases, use the `Time.reshape` method (which copies any arrays that cannot be reshaped in-place). """ return self._time.jd1.shape @shape.setter def shape(self, shape): del self.cache # We have to keep track of arrays that were already reshaped, # since we may have to return those to their original shape if a later # shape-setting fails. reshaped = [] oldshape = self.shape # In-place reshape of data/attributes. Need to access _time.jd1/2 not # self.jd1/2 because the latter are not guaranteed to be the actual # data, and in fact should not be directly changeable from the public # API. for obj, attr in ((self._time, 'jd1'), (self._time, 'jd2'), (self, '_delta_ut1_utc'), (self, '_delta_tdb_tt'), (self, 'location')): val = getattr(obj, attr, None) if val is not None and val.size > 1: try: val.shape = shape except Exception: for val2 in reshaped: val2.shape = oldshape raise else: reshaped.append(val) def _shaped_like_input(self, value): if self._time.jd1.shape: if isinstance(value, np.ndarray): return value else: raise TypeError( f"JD is an array ({self._time.jd1!r}) but value " f"is not ({value!r})") else: # zero-dimensional array, is it safe to unbox? if (isinstance(value, np.ndarray) and not value.shape and not np.ma.is_masked(value)): if value.dtype.kind == 'M': # existing test doesn't want datetime64 converted return value[()] elif value.dtype.fields: # Unpack but keep field names; .item() doesn't # Still don't get python types in the fields return value[()] else: return value.item() else: return value @property def jd1(self): """ First of the two doubles that internally store time value(s) in JD. """ jd1 = self._time.mask_if_needed(self._time.jd1) return self._shaped_like_input(jd1) @property def jd2(self): """ Second of the two doubles that internally store time value(s) in JD. """ jd2 = self._time.mask_if_needed(self._time.jd2) return self._shaped_like_input(jd2) def to_value(self, format, subfmt='*'): """Get time values expressed in specified output format. This method allows representing the ``Time`` object in the desired output ``format`` and optional sub-format ``subfmt``. Available built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each format can have its own sub-formats For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long' uses ``numpy.longdouble`` for somewhat enhanced precision (with the enhancement depending on platform), and 'decimal' :class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the number of digits is also chosen such that time values are represented accurately. For built-in date-like string formats, one of 'date_hms', 'date_hm', or 'date' (or 'longdate_hms', etc., for 5-digit years in `~astropy.time.TimeFITS`). For sub-formats including seconds, the number of digits used for the fractional seconds is as set by `~astropy.time.Time.precision`. Parameters ---------- format : str The format in which one wants the time values. Default: the current format. subfmt : str or `None`, optional Value or wildcard pattern to select the sub-format in which the values should be given. The default of '*' picks the first available for a given format, i.e., 'float' or 'date_hms'. If `None`, use the instance's ``out_subfmt``. """ # TODO: add a precision argument (but ensure it is keyword argument # only, to make life easier for TimeDelta.to_value()). if format not in self.FORMATS: raise ValueError(f'format must be one of {list(self.FORMATS)}') cache = self.cache['format'] # Try to keep cache behaviour like it was in astropy < 4.0. key = format if subfmt is None else (format, subfmt) if key not in cache: if format == self.format: tm = self else: tm = self.replicate(format=format) # Some TimeFormat subclasses may not be able to handle being passes # on a out_subfmt. This includes some core classes like # TimeBesselianEpochString that do not have any allowed subfmts. But # those do deal with `self.out_subfmt` internally, so if subfmt is # the same, we do not pass it on. kwargs = {} if subfmt is not None and subfmt != tm.out_subfmt: kwargs['out_subfmt'] = subfmt try: value = tm._time.to_value(parent=tm, **kwargs) except TypeError as exc: # Try validating subfmt, e.g. for formats like 'jyear_str' that # do not implement out_subfmt in to_value() (because there are # no allowed subformats). If subfmt is not valid this gives the # same exception as would have occurred if the call to # `to_value()` had succeeded. tm._time._select_subfmts(subfmt) # Subfmt was valid, so fall back to the original exception to see # if it was lack of support for out_subfmt as a call arg. if "unexpected keyword argument 'out_subfmt'" in str(exc): raise ValueError( f"to_value() method for format {format!r} does not " f"support passing a 'subfmt' argument") from None else: # Some unforeseen exception so raise. raise value = tm._shaped_like_input(value) cache[key] = value return cache[key] @property def value(self): """Time value(s) in current format""" return self.to_value(self.format, None) @property def masked(self): return self._time.masked @property def mask(self): return self._time.mask def insert(self, obj, values, axis=0): """ Insert values before the given indices in the column and return a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object. The values to be inserted must conform to the rules for in-place setting of ``Time`` objects (see ``Get and set values`` in the ``Time`` documentation). The API signature matches the ``np.insert`` API, but is more limited. The specification of insert index ``obj`` must be a single integer, and the ``axis`` must be ``0`` for simple row insertion before the index. Parameters ---------- obj : int Integer index before which ``values`` is inserted. values : array_like Value(s) to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. axis : int, optional Axis along which to insert ``values``. Default is 0, which is the only allowed value and will insert a row. Returns ------- out : `~astropy.time.Time` subclass New time object with inserted value(s) """ # Validate inputs: obj arg is integer, axis=0, self is not a scalar, and # input index is in bounds. try: idx0 = operator.index(obj) except TypeError: raise TypeError('obj arg must be an integer') if axis != 0: raise ValueError('axis must be 0') if not self.shape: raise TypeError('cannot insert into scalar {} object' .format(self.__class__.__name__)) if abs(idx0) > len(self): raise IndexError('index {} is out of bounds for axis 0 with size {}' .format(idx0, len(self))) # Turn negative index into positive if idx0 < 0: idx0 = len(self) + idx0 # For non-Time object, use numpy to help figure out the length. (Note annoying # case of a string input that has a length which is not the length we want). if not isinstance(values, self.__class__): values = np.asarray(values) n_values = len(values) if values.shape else 1 # Finally make the new object with the correct length and set values for the # three sections, before insert, the insert, and after the insert. out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name) out._time.jd1[:idx0] = self._time.jd1[:idx0] out._time.jd2[:idx0] = self._time.jd2[:idx0] # This uses the Time setting machinery to coerce and validate as necessary. out[idx0:idx0 + n_values] = values out._time.jd1[idx0 + n_values:] = self._time.jd1[idx0:] out._time.jd2[idx0 + n_values:] = self._time.jd2[idx0:] return out def __setitem__(self, item, value): if not self.writeable: if self.shape: raise ValueError('{} object is read-only. Make a ' 'copy() or set "writeable" attribute to True.' .format(self.__class__.__name__)) else: raise ValueError('scalar {} object is read-only.' .format(self.__class__.__name__)) # Any use of setitem results in immediate cache invalidation del self.cache # Setting invalidates transform deltas for attr in ('_delta_tdb_tt', '_delta_ut1_utc'): if hasattr(self, attr): delattr(self, attr) if value is np.ma.masked or value is np.nan: self._time.jd2[item] = np.nan return value = self._make_value_equivalent(item, value) # Finally directly set the jd1/2 values. Locations are known to match. if self.scale is not None: value = getattr(value, self.scale) self._time.jd1[item] = value._time.jd1 self._time.jd2[item] = value._time.jd2 def isclose(self, other, atol=None): """Returns a boolean or boolean array where two Time objects are element-wise equal within a time tolerance. This evaluates the expression below:: abs(self - other) <= atol Parameters ---------- other : `~astropy.time.Time` Time object for comparison. atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Absoute tolerance for equality with units of time (e.g. ``u.s`` or ``u.day``). Default is two bits in the 128-bit JD time representation, equivalent to about 40 picosecs. """ if atol is None: # Note: use 2 bits instead of 1 bit based on experience in precision # tests, since taking the difference with a UTC time means one has # to do a scale change. atol = 2 * np.finfo(float).eps * u.day if not isinstance(atol, (u.Quantity, TimeDelta)): raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got " f'{atol.__class__.__name__} instead') try: # Separate these out so user sees where the problem is dt = self - other dt = abs(dt) out = dt <= atol except Exception as err: raise TypeError("'other' argument must support subtraction with Time " f"and return a value that supports comparison with " f"{atol.__class__.__name__}: {err}") return out def copy(self, format=None): """ Return a fully independent copy the Time object, optionally changing the format. If ``format`` is supplied then the time format of the returned Time object will be set accordingly, otherwise it will be unchanged from the original. In this method a full copy of the internal time arrays will be made. The internal time arrays are normally not changeable by the user so in most cases the ``replicate()`` method should be used. Parameters ---------- format : str, optional Time format of the copy. Returns ------- tm : Time object Copy of this object """ return self._apply('copy', format=format) def replicate(self, format=None, copy=False, cls=None): """ Return a replica of the Time object, optionally changing the format. If ``format`` is supplied then the time format of the returned Time object will be set accordingly, otherwise it will be unchanged from the original. If ``copy`` is set to `True` then a full copy of the internal time arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. The internal time arrays are normally not changeable by the user so in most cases it should not be necessary to set ``copy`` to `True`. The convenience method copy() is available in which ``copy`` is `True` by default. Parameters ---------- format : str, optional Time format of the replica. copy : bool, optional Return a true copy instead of using references where possible. Returns ------- tm : Time object Replica of this object """ return self._apply('copy' if copy else 'replicate', format=format, cls=cls) def _apply(self, method, *args, format=None, cls=None, **kwargs): """Create a new time object, possibly applying a method to the arrays. Parameters ---------- method : str or callable If string, can be 'replicate' or the name of a relevant `~numpy.ndarray` method. In the former case, a new time instance with unchanged internal data is created, while in the latter the method is applied to the internal ``jd1`` and ``jd2`` arrays, as well as to possible ``location``, ``_delta_ut1_utc``, and ``_delta_tdb_tt`` arrays. If a callable, it is directly applied to the above arrays. Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. If the ``format`` keyword argument is present, this will be used as the Time format of the replica. Examples -------- Some ways this is used internally:: copy : ``_apply('copy')`` replicate : ``_apply('replicate')`` reshape : ``_apply('reshape', new_shape)`` index or slice : ``_apply('__getitem__', item)`` broadcast : ``_apply(np.broadcast, shape=new_shape)`` """ new_format = self.format if format is None else format if callable(method): apply_method = lambda array: method(array, *args, **kwargs) else: if method == 'replicate': apply_method = None else: apply_method = operator.methodcaller(method, *args, **kwargs) jd1, jd2 = self._time.jd1, self._time.jd2 if apply_method: jd1 = apply_method(jd1) jd2 = apply_method(jd2) # Get a new instance of our class and set its attributes directly. tm = super().__new__(cls or self.__class__) tm._time = TimeJD(jd1, jd2, self.scale, precision=0, in_subfmt='*', out_subfmt='*', from_jd=True) # Optional ndarray attributes. for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location'): try: val = getattr(self, attr) except AttributeError: continue if apply_method: # Apply the method to any value arrays (though skip if there is # only an array scalar and the method would return a view, # since in that case nothing would change). if getattr(val, 'shape', ()): val = apply_method(val) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. val = copy.copy(val) setattr(tm, attr, val) # Copy other 'info' attr only if it has actually been defined and the # time object is not a scalar (issue #10688). # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: tm.info = self.info # Make the new internal _time object corresponding to the format # in the copy. If the format is unchanged this process is lightweight # and does not create any new arrays. if new_format not in tm.FORMATS: raise ValueError(f'format must be one of {list(tm.FORMATS)}') NewFormat = tm.FORMATS[new_format] tm._time = NewFormat( tm._time.jd1, tm._time.jd2, tm._time._scale, precision=self.precision, in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt), out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt), from_jd=True) tm._format = new_format tm.SCALES = self.SCALES return tm def __copy__(self): """ Overrides the default behavior of the `copy.copy` function in the python stdlib to behave like `Time.copy`. Does *not* make a copy of the JD arrays - only copies by reference. """ return self.replicate() def __deepcopy__(self, memo): """ Overrides the default behavior of the `copy.deepcopy` function in the python stdlib to behave like `Time.copy`. Does make a copy of the JD arrays. """ return self.copy() def _advanced_index(self, indices, axis=None, keepdims=False): """Turn argmin, argmax output into an advanced index. Argmin, argmax output contains indices along a given axis in an array shaped like the other dimensions. To use this to get values at the correct location, a list is constructed in which the other axes are indexed sequentially. For ``keepdims`` is ``True``, the net result is the same as constructing an index grid with ``np.ogrid`` and then replacing the ``axis`` item with ``indices`` with its shaped expanded at ``axis``. For ``keepdims`` is ``False``, the result is the same but with the ``axis`` dimension removed from all list entries. For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`. Parameters ---------- indices : array Output of argmin or argmax. axis : int or None axis along which argmin or argmax was used. keepdims : bool Whether to construct indices that keep or remove the axis along which argmin or argmax was used. Default: ``False``. Returns ------- advanced_index : list of arrays Suitable for use as an advanced index. """ if axis is None: return np.unravel_index(indices, self.shape) ndim = self.ndim if axis < 0: axis = axis + ndim if keepdims and indices.ndim < self.ndim: indices = np.expand_dims(indices, axis) index = [indices if i == axis else np.arange(s).reshape( (1,) * (i if keepdims or i < axis else i - 1) + (s,) + (1,) * (ndim - i - (1 if keepdims or i > axis else 2)) ) for i, s in enumerate(self.shape)] return tuple(index) def argmin(self, axis=None, out=None): """Return indices of the minimum values along the given axis. This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. See :func:`~numpy.argmin` for detailed documentation. """ # first get the minimum at normal precision. jd = self.jd1 + self.jd2 approx = np.min(jd, axis, keepdims=True) # Approx is very close to the true minimum, and by subtracting it at # full precision, all numbers near 0 can be represented correctly, # so we can be sure we get the true minimum. # The below is effectively what would be done for # dt = (self - self.__class__(approx, format='jd')).jd # which translates to: # approx_jd1, approx_jd2 = day_frac(approx, 0.) # dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2) dt = (self.jd1 - approx) + self.jd2 return dt.argmin(axis, out) def argmax(self, axis=None, out=None): """Return indices of the maximum values along the given axis. This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. See :func:`~numpy.argmax` for detailed documentation. """ # For procedure, see comment on argmin. jd = self.jd1 + self.jd2 approx = np.max(jd, axis, keepdims=True) dt = (self.jd1 - approx) + self.jd2 return dt.argmax(axis, out) def argsort(self, axis=-1): """Returns the indices that would sort the time array. This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Internally, it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen. """ jd_approx = self.jd jd_remainder = (self - self.__class__(jd_approx, format='jd', scale=self.scale)).jd if axis is None: return np.lexsort((jd_remainder.ravel(), jd_approx.ravel())) else: return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis) def min(self, axis=None, out=None, keepdims=False): """Minimum along a given axis. This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Note that the ``out`` argument is present only for compatibility with ``np.min``; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return self[self._advanced_index(self.argmin(axis), axis, keepdims)] def max(self, axis=None, out=None, keepdims=False): """Maximum along a given axis. This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Note that the ``out`` argument is present only for compatibility with ``np.max``; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return self[self._advanced_index(self.argmax(axis), axis, keepdims)] def ptp(self, axis=None, out=None, keepdims=False): """Peak to peak (maximum - minimum) along a given axis. This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. Note that the ``out`` argument is present only for compatibility with `~numpy.ptp`; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return (self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)) def sort(self, axis=-1): """Return a copy sorted along the specified axis. This is similar to :meth:`~numpy.ndarray.sort`, but internally uses indexing with :func:`~numpy.lexsort` to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is kept, and that corresponding attributes are properly sorted and copied as well. Parameters ---------- axis : int or None Axis to be sorted. If ``None``, the flattened array is sorted. By default, sort over the last axis. """ return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)] @property def cache(self): """ Return the cache associated with this instance. """ return self._time.cache @cache.deleter def cache(self): del self._time.cache def __getattr__(self, attr): """ Get dynamic attributes to output format or do timescale conversion. """ if attr in self.SCALES and self.scale is not None: cache = self.cache['scale'] if attr not in cache: if attr == self.scale: tm = self else: tm = self.replicate() tm._set_scale(attr) if tm.shape: # Prevent future modification of cached array-like object tm.writeable = False cache[attr] = tm return cache[attr] elif attr in self.FORMATS: return self.to_value(attr, subfmt=None) elif attr in TIME_SCALES: # allowed ones done above (self.SCALES) if self.scale is None: raise ScaleValueError("Cannot convert TimeDelta with " "undefined scale to any defined scale.") else: raise ScaleValueError("Cannot convert {} with scale " "'{}' to scale '{}'" .format(self.__class__.__name__, self.scale, attr)) else: # Should raise AttributeError return self.__getattribute__(attr) @override__dir__ def __dir__(self): result = set(self.SCALES) result.update(self.FORMATS) return result def _match_shape(self, val): """ Ensure that `val` is matched to length of self. If val has length 1 then broadcast, otherwise cast to double and make sure shape matches. """ val = _make_array(val, copy=True) # be conservative and copy if val.size > 1 and val.shape != self.shape: try: # check the value can be broadcast to the shape of self. val = np.broadcast_to(val, self.shape, subok=True) except Exception: raise ValueError('Attribute shape must match or be ' 'broadcastable to that of Time object. ' 'Typically, give either a single value or ' 'one for each time.') return val def _time_comparison(self, other, op): """If other is of same class as self, compare difference in self.scale. Otherwise, return NotImplemented """ if other.__class__ is not self.__class__: try: other = self.__class__(other, scale=self.scale) except Exception: # Let other have a go. return NotImplemented if(self.scale is not None and self.scale not in other.SCALES or other.scale is not None and other.scale not in self.SCALES): # Other will also not be able to do it, so raise a TypeError # immediately, allowing us to explain why it doesn't work. raise TypeError("Cannot compare {} instances with scales " "'{}' and '{}'".format(self.__class__.__name__, self.scale, other.scale)) if self.scale is not None and other.scale is not None: other = getattr(other, self.scale) return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.) def __lt__(self, other): return self._time_comparison(other, operator.lt) def __le__(self, other): return self._time_comparison(other, operator.le) def __eq__(self, other): """ If other is an incompatible object for comparison, return `False`. Otherwise, return `True` if the time difference between self and other is zero. """ return self._time_comparison(other, operator.eq) def __ne__(self, other): """ If other is an incompatible object for comparison, return `True`. Otherwise, return `False` if the time difference between self and other is zero. """ return self._time_comparison(other, operator.ne) def __gt__(self, other): return self._time_comparison(other, operator.gt) def __ge__(self, other): return self._time_comparison(other, operator.ge) class Time(TimeBase): """ Represent and manipulate times and dates for astronomy. A `Time` object is initialized with one or more times in the ``val`` argument. The input times in ``val`` must conform to the specified ``format`` and must correspond to the specified time ``scale``. The optional ``val2`` time input should be supplied only for numeric input formats (e.g. JD) where very high precision (better than 64-bit precision) is required. The allowed values for ``format`` can be listed with:: >>> list(Time.FORMATS) ['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date', 'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] See also: http://docs.astropy.org/en/stable/time/ Parameters ---------- val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object Value(s) to initialize the time or times. Bytes are decoded as ascii. val2 : sequence, ndarray, or number; optional Value(s) to initialize the time or times. Only used for numerical input, to help preserve precision. format : str, optional Format of input value(s) scale : str, optional Time scale of input value(s), must be one of the following: ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') precision : int, optional Digits of precision in string representation of time in_subfmt : str, optional Unix glob to select subformats for parsing input times out_subfmt : str, optional Unix glob to select subformat for outputting times location : `~astropy.coordinates.EarthLocation` or tuple, optional If given as an tuple, it should be able to initialize an an EarthLocation instance, i.e., either contain 3 items with units of length for geocentric coordinates, or contain a longitude, latitude, and an optional height for geodetic coordinates. Can be a single location, or one for each input time. If not given, assumed to be the center of the Earth for time scale transformations to and from the solar-system barycenter. copy : bool, optional Make a copy of the input values """ SCALES = TIME_SCALES """List of time scales""" FORMATS = TIME_FORMATS """Dict of time formats""" def __new__(cls, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): # Because of import problems, this can only be done on # first call of Time. The initialization is complicated because # update_leap_seconds uses Time. # In principle, this may cause wrong leap seconds in # update_leap_seconds itself, but since expiration is in # units of days, that is fine. global _LEAP_SECONDS_CHECK if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE: with _LEAP_SECONDS_LOCK: # There are three ways we can get here: # 1. First call (NOT_STARTED). # 2. Re-entrant call (RUNNING). We skip the initialisation # and don't worry about leap second errors. # 3. Another thread which raced with the first call # (RUNNING). The first thread has relinquished the # lock to us, so initialization is complete. if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED: _LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING update_leap_seconds() _LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE if isinstance(val, Time): self = val.replicate(format=format, copy=copy, cls=cls) else: self = super().__new__(cls) return self def __init__(self, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if location is not None: from astropy.coordinates import EarthLocation if isinstance(location, EarthLocation): self.location = location else: self.location = EarthLocation(*location) if self.location.size == 1: self.location = self.location.squeeze() else: if not hasattr(self, 'location'): self.location = None if isinstance(val, Time): # Update _time formatting parameters if explicitly specified if precision is not None: self._time.precision = precision if in_subfmt is not None: self._time.in_subfmt = in_subfmt if out_subfmt is not None: self._time.out_subfmt = out_subfmt self.SCALES = TIME_TYPES[self.scale] if scale is not None: self._set_scale(scale) else: self._init_from_vals(val, val2, format, scale, copy, precision, in_subfmt, out_subfmt) self.SCALES = TIME_TYPES[self.scale] if self.location is not None and (self.location.size > 1 and self.location.shape != self.shape): try: # check the location can be broadcast to self's shape. self.location = np.broadcast_to(self.location, self.shape, subok=True) except Exception as err: raise ValueError('The location with shape {} cannot be ' 'broadcast against time with shape {}. ' 'Typically, either give a single location or ' 'one for each time.' .format(self.location.shape, self.shape)) from err def _make_value_equivalent(self, item, value): """Coerce setitem value into an equivalent Time object""" # If there is a vector location then broadcast to the Time shape # and then select with ``item`` if self.location is not None and self.location.shape: self_location = np.broadcast_to(self.location, self.shape, subok=True)[item] else: self_location = self.location if isinstance(value, Time): # Make sure locations are compatible. Location can be either None or # a Location object. if self_location is None and value.location is None: match = True elif ((self_location is None and value.location is not None) or (self_location is not None and value.location is None)): match = False else: match = np.all(self_location == value.location) if not match: raise ValueError('cannot set to Time with different location: ' 'expected location={} and ' 'got location={}' .format(self_location, value.location)) else: try: value = self.__class__(value, scale=self.scale, location=self_location) except Exception: try: value = self.__class__(value, scale=self.scale, format=self.format, location=self_location) except Exception as err: raise ValueError('cannot convert value to a compatible Time object: {}' .format(err)) return value @classmethod def now(cls): """ Creates a new object corresponding to the instant in time this method is called. .. note:: "Now" is determined using the `~datetime.datetime.utcnow` function, so its accuracy and precision is determined by that function. Generally that means it is set by the accuracy of your system clock. Returns ------- nowtime A new `Time` object (or a subclass of `Time` if this is called from such a subclass) at the current time. """ # call `utcnow` immediately to be sure it's ASAP dtnow = datetime.utcnow() return cls(val=dtnow, format='datetime', scale='utc') info = TimeInfo() @classmethod def strptime(cls, time_string, format_string, **kwargs): """ Parse a string to a Time according to a format specification. See `time.strptime` documentation for format specification. >>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S') <Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000> Parameters ---------- time_string : str, sequence, or ndarray Objects containing time data of type string format_string : str String specifying format of time_string. kwargs : dict Any keyword arguments for ``Time``. If the ``format`` keyword argument is present, this will be used as the Time format. Returns ------- time_obj : `~astropy.time.Time` A new `~astropy.time.Time` object corresponding to the input ``time_string``. """ time_array = np.asarray(time_string) if time_array.dtype.kind not in ('U', 'S'): err = "Expected type is string, a bytes-like object or a sequence"\ " of these. Got dtype '{}'".format(time_array.dtype.kind) raise TypeError(err) to_string = (str if time_array.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, 'U30']) for time, formatted in iterator: tt, fraction = _strptime._strptime(to_string(time), format_string) time_tuple = tt[:6] + (fraction,) formatted[...] = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}'\ .format(*time_tuple) format = kwargs.pop('format', None) out = cls(*iterator.operands[1:], format='isot', **kwargs) if format is not None: out.format = format return out def strftime(self, format_spec): """ Convert Time to a string or a numpy.array of strings according to a format specification. See `time.strftime` documentation for format specification. Parameters ---------- format_spec : str Format definition of return string. Returns ------- formatted : str or numpy.array String or numpy.array of strings formatted according to the given format string. """ formatted_strings = [] for sk in self.replicate('iso')._time.str_kwargs(): date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple() datetime_tuple = (sk['year'], sk['mon'], sk['day'], sk['hour'], sk['min'], sk['sec'], date_tuple[6], date_tuple[7], -1) fmtd_str = format_spec if '%f' in fmtd_str: fmtd_str = fmtd_str.replace('%f', '{frac:0{precision}}'.format( frac=sk['fracsec'], precision=self.precision)) fmtd_str = strftime(fmtd_str, datetime_tuple) formatted_strings.append(fmtd_str) if self.isscalar: return formatted_strings[0] else: return np.array(formatted_strings).reshape(self.shape) def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None): """Light travel time correction to the barycentre or heliocentre. The frame transformations used to calculate the location of the solar system barycentre and the heliocentre rely on the erfa routine epv00, which is consistent with the JPL DE405 ephemeris to an accuracy of 11.2 km, corresponding to a light travel time of 4 microseconds. The routine assumes the source(s) are at large distance, i.e., neglects finite-distance effects. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The sky location to calculate the correction for. kind : str, optional ``'barycentric'`` (default) or ``'heliocentric'`` location : `~astropy.coordinates.EarthLocation`, optional The location of the observatory to calculate the correction for. If no location is given, the ``location`` attribute of the Time object is used ephemeris : str, optional Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set``. For more information, see `~astropy.coordinates.solar_system_ephemeris`. Returns ------- time_offset : `~astropy.time.TimeDelta` The time offset between the barycentre or Heliocentre and Earth, in TDB seconds. Should be added to the original time to get the time in the Solar system barycentre or the Heliocentre. Also, the time conversion to BJD will then include the relativistic correction as well. """ if kind.lower() not in ('barycentric', 'heliocentric'): raise ValueError("'kind' parameter must be one of 'heliocentric' " "or 'barycentric'") if location is None: if self.location is None: raise ValueError('An EarthLocation needs to be set or passed ' 'in to calculate bary- or heliocentric ' 'corrections') location = self.location from astropy.coordinates import (UnitSphericalRepresentation, CartesianRepresentation, HCRS, ICRS, GCRS, solar_system_ephemeris) # ensure sky location is ICRS compatible if not skycoord.is_transformable_to(ICRS()): raise ValueError("Given skycoord is not transformable to the ICRS") # get location of observatory in ITRS coordinates at this Time try: itrs = location.get_itrs(obstime=self) except Exception: raise ValueError("Supplied location does not have a valid `get_itrs` method") with solar_system_ephemeris.set(ephemeris): if kind.lower() == 'heliocentric': # convert to heliocentric coordinates, aligned with ICRS cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz else: # first we need to convert to GCRS coordinates with the correct # obstime, since ICRS coordinates have no frame time gcrs_coo = itrs.transform_to(GCRS(obstime=self)) # convert to barycentric (BCRS) coordinates, aligned with ICRS cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz # get unit ICRS vector to star spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation). represent_as(CartesianRepresentation).xyz) # Move X,Y,Z to last dimension, to enable possible broadcasting below. cpos = np.rollaxis(cpos, 0, cpos.ndim) spos = np.rollaxis(spos, 0, spos.ndim) # calculate light travel time correction tcor_val = (spos * cpos).sum(axis=-1) / const.c return TimeDelta(tcor_val, scale='tdb') def sidereal_time(self, kind, longitude=None, model=None): """Calculate sidereal time. Parameters --------------- kind : str ``'mean'`` or ``'apparent'``, i.e., accounting for precession only, or also for nutation. longitude : `~astropy.units.Quantity`, `str`, or `None`; optional The longitude on the Earth at which to compute the sidereal time. Can be given as a `~astropy.units.Quantity` with angular units (or an `~astropy.coordinates.Angle` or `~astropy.coordinates.Longitude`), or as a name of an observatory (currently, only ``'greenwich'`` is supported, equivalent to 0 deg). If `None` (default), the ``lon`` attribute of the Time object is used. model : str or `None`; optional Precession (and nutation) model to use. The available ones are: - {0}: {1} - {2}: {3} If `None` (default), the last (most recent) one from the appropriate list above is used. Returns ------- sidereal time : `~astropy.coordinates.Longitude` Sidereal time as a quantity with units of hourangle """ # docstring is formatted below from astropy.coordinates import Longitude if kind.lower() not in SIDEREAL_TIME_MODELS.keys(): raise ValueError('The kind of sidereal time has to be {}'.format( ' or '.join(sorted(SIDEREAL_TIME_MODELS.keys())))) available_models = SIDEREAL_TIME_MODELS[kind.lower()] if model is None: model = sorted(available_models.keys())[-1] else: if model.upper() not in available_models: raise ValueError( 'Model {} not implemented for {} sidereal time; ' 'available models are {}' .format(model, kind, sorted(available_models.keys()))) if longitude is None: if self.location is None: raise ValueError('No longitude is given but the location for ' 'the Time object is not set.') longitude = self.location.lon elif longitude == 'greenwich': longitude = Longitude(0., u.degree, wrap_angle=180. * u.degree) else: # sanity check on input longitude = Longitude(longitude, u.degree, wrap_angle=180. * u.degree) gst = self._erfa_sidereal_time(available_models[model.upper()]) return Longitude(gst + longitude, u.hourangle) if isinstance(sidereal_time.__doc__, str): sidereal_time.__doc__ = sidereal_time.__doc__.format( 'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()), 'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys())) def _erfa_sidereal_time(self, model): """Calculate a sidereal time using a IAU precession/nutation model.""" from astropy.coordinates import Longitude erfa_function = model['function'] erfa_parameters = [getattr(getattr(self, scale)._time, jd_part) for scale in model['scales'] for jd_part in ('jd1', 'jd2_filled')] sidereal_time = erfa_function(*erfa_parameters) if self.masked: sidereal_time[self.mask] = np.nan return Longitude(sidereal_time, u.radian).to(u.hourangle) def get_delta_ut1_utc(self, iers_table=None, return_status=False): """Find UT1 - UTC differences by interpolating in IERS Table. Parameters ---------- iers_table : `~astropy.utils.iers.IERS` table, optional Table containing UT1-UTC differences from IERS Bulletins A and/or B. Default: `~astropy.utils.iers.earth_orientation_table` (which in turn defaults to the combined version provided by `~astropy.utils.iers.IERS_Auto`). return_status : bool Whether to return status values. If `False` (default), iers raises `IndexError` if any time is out of the range covered by the IERS table. Returns ------- ut1_utc : float or float array UT1-UTC, interpolated in IERS Table status : int or int array Status values (if ``return_status=`True```):: ``astropy.utils.iers.FROM_IERS_B`` ``astropy.utils.iers.FROM_IERS_A`` ``astropy.utils.iers.FROM_IERS_A_PREDICTION`` ``astropy.utils.iers.TIME_BEFORE_IERS_RANGE`` ``astropy.utils.iers.TIME_BEYOND_IERS_RANGE`` Notes ----- In normal usage, UT1-UTC differences are calculated automatically on the first instance ut1 is needed. Examples -------- To check in code whether any times are before the IERS table range:: >>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE >>> t = Time(['1961-01-01', '2000-01-01'], scale='utc') >>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA >>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA array([ True, False]...) """ if iers_table is None: from astropy.utils.iers import earth_orientation_table iers_table = earth_orientation_table.get() return iers_table.ut1_utc(self.utc, return_status=return_status) # Property for ERFA DUT arg = UT1 - UTC def _get_delta_ut1_utc(self, jd1=None, jd2=None): """ Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and jd2 args because it gets called that way when converting time scales. If delta_ut1_utc is not yet set, this will interpolate them from the the IERS table. """ # Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in # seconds. It is obtained from tables published by the IERS. if not hasattr(self, '_delta_ut1_utc'): from astropy.utils.iers import earth_orientation_table iers_table = earth_orientation_table.get() # jd1, jd2 are normally set (see above), except if delta_ut1_utc # is access directly; ensure we behave as expected for that case if jd1 is None: self_utc = self.utc jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled scale = 'utc' else: scale = self.scale # interpolate UT1-UTC in IERS table delta = iers_table.ut1_utc(jd1, jd2) # if we interpolated using UT1 jds, we may be off by one # second near leap seconds (and very slightly off elsewhere) if scale == 'ut1': # calculate UTC using the offset we got; the ERFA routine # is tolerant of leap seconds, so will do this right jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s)) # calculate a better estimate using the nearly correct UTC delta = iers_table.ut1_utc(jd1_utc, jd2_utc) self._set_delta_ut1_utc(delta) return self._delta_ut1_utc def _set_delta_ut1_utc(self, val): del self.cache if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. val = val.to(u.second).value val = self._match_shape(val) self._delta_ut1_utc = val # Note can't use @property because _get_delta_tdb_tt is explicitly # called with the optional jd1 and jd2 args. delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc) """UT1 - UTC time scale offset""" # Property for ERFA DTR arg = TDB - TT def _get_delta_tdb_tt(self, jd1=None, jd2=None): if not hasattr(self, '_delta_tdb_tt'): # If jd1 and jd2 are not provided (which is the case for property # attribute access) then require that the time scale is TT or TDB. # Otherwise the computations here are not correct. if jd1 is None or jd2 is None: if self.scale not in ('tt', 'tdb'): raise ValueError('Accessing the delta_tdb_tt attribute ' 'is only possible for TT or TDB time ' 'scales') else: jd1 = self._time.jd1 jd2 = self._time.jd2_filled # First go from the current input time (which is either # TDB or TT) to an approximate UT1. Since TT and TDB are # pretty close (few msec?), assume TT. Similarly, since the # UT1 terms are very small, use UTC instead of UT1. njd1, njd2 = erfa.tttai(jd1, jd2) njd1, njd2 = erfa.taiutc(njd1, njd2) # subtract 0.5, so UT is fraction of the day from midnight ut = day_frac(njd1 - 0.5, njd2)[1] if self.location is None: # Assume geocentric. self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0., 0., 0.) else: location = self.location # Geodetic params needed for d_tdb_tt() lon = location.lon rxy = np.hypot(location.x, location.y) z = location.z self._delta_tdb_tt = erfa.dtdb( jd1, jd2, ut, lon.to_value(u.radian), rxy.to_value(u.km), z.to_value(u.km)) return self._delta_tdb_tt def _set_delta_tdb_tt(self, val): del self.cache if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. val = val.to(u.second).value val = self._match_shape(val) self._delta_tdb_tt = val # Note can't use @property because _get_delta_tdb_tt is explicitly # called with the optional jd1 and jd2 args. delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt) """TDB - TT time scale offset""" def __sub__(self, other): # T - Tdelta = T # T - T = Tdelta other_is_delta = not isinstance(other, Time) if other_is_delta: # T - Tdelta # Check other is really a TimeDelta or something that can initialize. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # we need a constant scale to calculate, which is guaranteed for # TimeDelta, but not for Time (which can be UTC) out = self.replicate() if self.scale in other.SCALES: if other.scale not in (out.scale, None): other = getattr(other, out.scale) else: if other.scale is None: out._set_scale('tai') else: if self.scale not in TIME_TYPES[other.scale]: raise TypeError("Cannot subtract Time and TimeDelta instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) out._set_scale(other.scale) # remove attributes that are invalidated by changing time for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): if hasattr(out, attr): delattr(out, attr) else: # T - T # the scales should be compatible (e.g., cannot convert TDB to LOCAL) if other.scale not in self.SCALES: raise TypeError("Cannot subtract Time instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) self_time = (self._time if self.scale in TIME_DELTA_SCALES else self.tai._time) # set up TimeDelta, subtraction to be done shortly out = TimeDelta(self_time.jd1, self_time.jd2, format='jd', scale=self_time.scale) if other.scale != out.scale: other = getattr(other, out.scale) jd1 = out._time.jd1 - other._time.jd1 jd2 = out._time.jd2 - other._time.jd2 out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) if other_is_delta: # Go back to left-side scale if needed out._set_scale(self.scale) return out def __add__(self, other): # T + Tdelta = T # T + T = error if isinstance(other, Time): raise OperandTypeError(self, other, '+') # Check other is really a TimeDelta or something that can initialize. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # ideally, we calculate in the scale of the Time item, since that is # what we want the output in, but this may not be possible, since # TimeDelta cannot be converted arbitrarily out = self.replicate() if self.scale in other.SCALES: if other.scale not in (out.scale, None): other = getattr(other, out.scale) else: if other.scale is None: out._set_scale('tai') else: if self.scale not in TIME_TYPES[other.scale]: raise TypeError("Cannot add Time and TimeDelta instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) out._set_scale(other.scale) # remove attributes that are invalidated by changing time for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): if hasattr(out, attr): delattr(out, attr) jd1 = out._time.jd1 + other._time.jd1 jd2 = out._time.jd2 + other._time.jd2 out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) # Go back to left-side scale if needed out._set_scale(self.scale) return out # Reverse addition is possible: <something-Tdelta-ish> + T # but there is no case of <something> - T, so no __rsub__. def __radd__(self, other): return self.__add__(other) def to_datetime(self, timezone=None): # TODO: this could likely go through to_value, as long as that # had an **kwargs part that was just passed on to _time. tm = self.replicate(format='datetime') return tm._shaped_like_input(tm._time.to_value(timezone)) to_datetime.__doc__ = TimeDatetime.to_value.__doc__ class TimeDelta(TimeBase): """ Represent the time difference between two times. A TimeDelta object is initialized with one or more times in the ``val`` argument. The input times in ``val`` must conform to the specified ``format``. The optional ``val2`` time input should be supplied only for numeric input formats (e.g. JD) where very high precision (better than 64-bit precision) is required. The allowed values for ``format`` can be listed with:: >>> list(TimeDelta.FORMATS) ['sec', 'jd', 'datetime'] Note that for time differences, the scale can be among three groups: geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational ('ut1'). Within each of these, the scales for time differences are the same. Conversion between geocentric and barycentric is possible, as there is only a scale factor change, but one cannot convert to or from 'ut1', as this requires knowledge of the actual times, not just their difference. For a similar reason, 'utc' is not a valid scale for a time difference: a UTC day is not always 86400 seconds. See also: - https://docs.astropy.org/en/stable/time/ - https://docs.astropy.org/en/stable/time/index.html#time-deltas Parameters ---------- val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object Value(s) to initialize the time difference(s). Any quantities will be converted appropriately (with care taken to avoid rounding errors for regular time units). val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional Additional values, as needed to preserve precision. format : str, optional Format of input value(s) scale : str, optional Time scale of input value(s), must be one of the following values: ('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or ``None``), the scale is arbitrary; when added or subtracted from a ``Time`` instance, it will be used without conversion. copy : bool, optional Make a copy of the input values """ SCALES = TIME_DELTA_SCALES """List of time delta scales.""" FORMATS = TIME_DELTA_FORMATS """Dict of time delta formats.""" info = TimeDeltaInfo() def __new__(cls, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if isinstance(val, TimeDelta): self = val.replicate(format=format, copy=copy, cls=cls) else: self = super().__new__(cls) return self def __init__(self, val, val2=None, format=None, scale=None, copy=False): if isinstance(val, TimeDelta): if scale is not None: self._set_scale(scale) else: if format is None: format = 'datetime' if isinstance(val, timedelta) else 'jd' self._init_from_vals(val, val2, format, scale, copy) if scale is not None: self.SCALES = TIME_DELTA_TYPES[scale] def replicate(self, *args, **kwargs): out = super().replicate(*args, **kwargs) out.SCALES = self.SCALES return out def to_datetime(self): """ Convert to ``datetime.timedelta`` object. """ tm = self.replicate(format='datetime') return tm._shaped_like_input(tm._time.value) def _set_scale(self, scale): """ This is the key routine that actually does time scale conversions. This is not public and not connected to the read-only scale property. """ if scale == self.scale: return if scale not in self.SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(self.SCALES))) # For TimeDelta, there can only be a change in scale factor, # which is written as time2 - time1 = scale_offset * time1 scale_offset = SCALE_OFFSETS[(self.scale, scale)] if scale_offset is None: self._time.scale = scale else: jd1, jd2 = self._time.jd1, self._time.jd2 offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset) self._time = self.FORMATS[self.format]( jd1 + offset1, jd2 + offset2, scale, self.precision, self.in_subfmt, self.out_subfmt, from_jd=True) def _add_sub(self, other, op): """Perform common elements of addition / subtraction for two delta times""" # If not a TimeDelta then see if it can be turned into a TimeDelta. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # the scales should be compatible (e.g., cannot convert TDB to TAI) if(self.scale is not None and self.scale not in other.SCALES or other.scale is not None and other.scale not in self.SCALES): raise TypeError("Cannot add TimeDelta instances with scales " "'{}' and '{}'".format(self.scale, other.scale)) # adjust the scale of other if the scale of self is set (or no scales) if self.scale is not None or other.scale is None: out = self.replicate() if other.scale is not None: other = getattr(other, self.scale) else: out = other.replicate() jd1 = op(self._time.jd1, other._time.jd1) jd2 = op(self._time.jd2, other._time.jd2) out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) return out def __add__(self, other): # If other is a Time then use Time.__add__ to do the calculation. if isinstance(other, Time): return other.__add__(self) return self._add_sub(other, operator.add) def __sub__(self, other): # TimeDelta - Time is an error if isinstance(other, Time): raise OperandTypeError(self, other, '-') return self._add_sub(other, operator.sub) def __radd__(self, other): return self.__add__(other) def __rsub__(self, other): out = self.__sub__(other) return -out def __neg__(self): """Negation of a `TimeDelta` object.""" new = self.copy() new._time.jd1 = -self._time.jd1 new._time.jd2 = -self._time.jd2 return new def __abs__(self): """Absolute value of a `TimeDelta` object.""" jd1, jd2 = self._time.jd1, self._time.jd2 negative = jd1 + jd2 < 0 new = self.copy() new._time.jd1 = np.where(negative, -jd1, jd1) new._time.jd2 = np.where(negative, -jd2, jd2) return new def __mul__(self, other): """Multiplication of `TimeDelta` objects by numbers/arrays.""" # Check needed since otherwise the self.jd1 * other multiplication # would enter here again (via __rmul__) if isinstance(other, Time): raise OperandTypeError(self, other, '*') elif ((isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (isinstance(other, str) and other == '')): return self.copy() # If other is something consistent with a dimensionless quantity # (could just be a float or an array), then we can just multiple in. try: other = u.Quantity(other, u.dimensionless_unscaled, copy=False) except Exception: # If not consistent with a dimensionless quantity, try downgrading # self to a quantity and see if things work. try: return self.to(u.day) * other except Exception: # The various ways we could multiply all failed; # returning NotImplemented to give other a final chance. return NotImplemented jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value) out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) if self.format != 'jd': out = out.replicate(format=self.format) return out def __rmul__(self, other): """Multiplication of numbers/arrays with `TimeDelta` objects.""" return self.__mul__(other) def __truediv__(self, other): """Division of `TimeDelta` objects by numbers/arrays.""" # Cannot do __mul__(1./other) as that looses precision if ((isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (isinstance(other, str) and other == '')): return self.copy() # If other is something consistent with a dimensionless quantity # (could just be a float or an array), then we can just divide in. try: other = u.Quantity(other, u.dimensionless_unscaled, copy=False) except Exception: # If not consistent with a dimensionless quantity, try downgrading # self to a quantity and see if things work. try: return self.to(u.day) / other except Exception: # The various ways we could divide all failed; # returning NotImplemented to give other a final chance. return NotImplemented jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value) out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) if self.format != 'jd': out = out.replicate(format=self.format) return out def __rtruediv__(self, other): """Division by `TimeDelta` objects of numbers/arrays.""" # Here, we do not have to worry about returning NotImplemented, # since other has already had a chance to look at us. return other / self.to(u.day) def to(self, unit, equivalencies=[]): """ Convert to a quantity in the specified unit. Parameters ---------- unit : `~astropy.units.UnitBase` instance, str The unit to convert to. equivalencies : list of equivalence pairs, optional A list of equivalence pairs to try if the units are not directly convertible (see :ref:`unit_equivalencies`). If `None`, no equivalencies will be applied at all, not even any set globallyq or within a context. Returns ------- quantity : `~astropy.units.Quantity` The quantity in the units specified. See also -------- to_value : get the numerical value in a given unit. """ return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(unit, equivalencies=equivalencies) def to_value(self, *args, **kwargs): """Get time delta values expressed in specified output format or unit. This method is flexible and handles both conversion to a specified ``TimeDelta`` format / sub-format AND conversion to a specified unit. If positional argument(s) are provided then the first one is checked to see if it is a valid ``TimeDelta`` format, and next it is checked to see if it is a valid unit or unit string. To convert to a ``TimeDelta`` format and optional sub-format the options are:: tm = TimeDelta(1.0 * u.s) tm.to_value('jd') # equivalent of tm.jd tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object tm.to_value('jd', subfmt='decimal') tm.to_value(format='jd', subfmt='decimal') To convert to a unit with optional equivalencies, the options are:: tm.to_value('hr') # convert to u.hr (hours) tm.to_value('hr', []) # specify equivalencies as a positional arg tm.to_value('hr', equivalencies=[]) tm.to_value(unit='hr', equivalencies=[]) The built-in `~astropy.time.TimeDelta` options for ``format`` are: {'jd', 'sec', 'datetime'}. For the two numerical formats 'jd' and 'sec', the available ``subfmt`` options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long' uses ``numpy.longdouble`` for somewhat enhanced precision (with the enhancement depending on platform), and 'decimal' instances of :class:`decimal.Decimal` for full precision. For the 'str' and 'bytes' sub-formats, the number of digits is also chosen such that time values are represented accurately. Default: as set by ``out_subfmt`` (which by default picks the first available for a given format, i.e., 'float'). Parameters ---------- format : str, optional The format in which one wants the `~astropy.time.TimeDelta` values. Default: the current format. subfmt : str, optional Possible sub-format in which the values should be given. Default: as set by ``out_subfmt`` (which by default picks the first available for a given format, i.e., 'float' or 'date_hms'). unit : `~astropy.units.UnitBase` instance or str, optional The unit in which the value should be given. equivalencies : list of equivalence pairs, optional A list of equivalence pairs to try if the units are not directly convertible (see :ref:`unit_equivalencies`). If `None`, no equivalencies will be applied at all, not even any set globally or within a context. Returns ------- value : `~numpy.ndarray` or scalar The value in the format or units specified. See also -------- to : Convert to a `~astropy.units.Quantity` instance in a given unit. value : The time value in the current format. """ if not (args or kwargs): raise TypeError('to_value() missing required format or unit argument') # TODO: maybe allow 'subfmt' also for units, keeping full precision # (effectively, by doing the reverse of quantity_day_frac)? # This way, only equivalencies could lead to possible precision loss. if ('format' in kwargs or (args != () and (args[0] is None or args[0] in self.FORMATS))): # Super-class will error with duplicate arguments, etc. return super().to_value(*args, **kwargs) # With positional arguments, we try parsing the first one as a unit, # so that on failure we can give a more informative exception. if args: try: unit = u.Unit(args[0]) except ValueError as exc: raise ValueError("first argument is not one of the known " "formats ({}) and failed to parse as a unit." .format(list(self.FORMATS))) from exc args = (unit,) + args[1:] return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(*args, **kwargs) def _make_value_equivalent(self, item, value): """Coerce setitem value into an equivalent TimeDelta object""" if not isinstance(value, TimeDelta): try: value = self.__class__(value, scale=self.scale, format=self.format) except Exception as err: raise ValueError('cannot convert value to a compatible TimeDelta ' 'object: {}'.format(err)) return value def isclose(self, other, atol=None, rtol=0.0): """Returns a boolean or boolean array where two TimeDelta objects are element-wise equal within a time tolerance. This effectively evaluates the expression below:: abs(self - other) <= atol + rtol * abs(other) Parameters ---------- other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Quantity or TimeDelta object for comparison. atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Absolute tolerance for equality with units of time (e.g. ``u.s`` or ``u.day``). Default is one bit in the 128-bit JD time representation, equivalent to about 20 picosecs. rtol : float Relative tolerance for equality """ try: other_day = other.to_value(u.day) except Exception as err: raise TypeError(f"'other' argument must support conversion to days: {err}") if atol is None: atol = np.finfo(float).eps * u.day if not isinstance(atol, (u.Quantity, TimeDelta)): raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got " f'{atol.__class__.__name__} instead') return np.isclose(self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)) class ScaleValueError(Exception): pass def _make_array(val, copy=False): """ Take ``val`` and convert/reshape to an array. If ``copy`` is `True` then copy input values. Returns ------- val : ndarray Array version of ``val``. """ if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time): dtype = object else: dtype = None val = np.array(val, copy=copy, subok=True, dtype=dtype) # Allow only float64, string or object arrays as input # (object is for datetime, maybe add more specific test later?) # This also ensures the right byteorder for float64 (closes #2942). if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize: pass elif val.dtype.kind in 'OSUMaV': pass else: val = np.asanyarray(val, dtype=np.float64) return val def _check_for_masked_and_fill(val, val2): """ If ``val`` or ``val2`` are masked arrays then fill them and cast to ndarray. Returns a mask corresponding to the logical-or of masked elements in ``val`` and ``val2``. If neither is masked then the return ``mask`` is ``None``. If either ``val`` or ``val2`` are masked then they are replaced with filled versions of themselves. Parameters ---------- val : ndarray or MaskedArray Input val val2 : ndarray or MaskedArray Input val2 Returns ------- mask, val, val2: ndarray or None Mask: (None or bool ndarray), val, val2: ndarray """ def get_as_filled_ndarray(mask, val): """ Fill the given MaskedArray ``val`` from the first non-masked element in the array. This ensures that upstream Time initialization will succeed. Note that nothing happens if there are no masked elements. """ fill_value = None if np.any(val.mask): # Final mask is the logical-or of inputs mask = mask | val.mask # First unmasked element. If all elements are masked then # use fill_value=None from above which will use val.fill_value. # As long as the user has set this appropriately then all will # be fine. val_unmasked = val.compressed() # 1-d ndarray of unmasked values if len(val_unmasked) > 0: fill_value = val_unmasked[0] # Fill the input ``val``. If fill_value is None then this just returns # an ndarray view of val (no copy). val = val.filled(fill_value) return mask, val mask = False if isinstance(val, np.ma.MaskedArray): mask, val = get_as_filled_ndarray(mask, val) if isinstance(val2, np.ma.MaskedArray): mask, val2 = get_as_filled_ndarray(mask, val2) return mask, val, val2 class OperandTypeError(TypeError): def __init__(self, left, right, op=None): op_string = '' if op is None else f' for {op}' super().__init__( "Unsupported operand type(s){}: " "'{}' and '{}'".format(op_string, left.__class__.__name__, right.__class__.__name__)) def update_leap_seconds(files=None): """If the current ERFA leap second table is out of date, try to update it. Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an up-to-date table. See that routine for the definition of "out of date". In order to make it safe to call this any time, all exceptions are turned into warnings, Parameters ---------- files : list of path, optional List of files/URLs to attempt to open. By default, uses defined by `astropy.utils.iers.LeapSeconds.auto_open`, which includes the table used by ERFA itself, so if that is up to date, nothing will happen. Returns ------- n_update : int Number of items updated. """ try: from astropy.utils import iers table = iers.LeapSeconds.auto_open(files) return erfa.leap_seconds.update(table) except Exception as exc: warn("leap-second auto-update failed due to the following " f"exception: {exc!r}", AstropyWarning) return 0
unknown
codeparrot/codeparrot-clean
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class BackupManagementUsage(Model): """Backup management usages of a vault. :param unit: Unit of the usage. Possible values include: 'Count', 'Bytes', 'Seconds', 'Percent', 'CountPerSecond', 'BytesPerSecond' :type unit: str or :class:`UsagesUnit <azure.mgmt.recoveryservicesbackup.models.UsagesUnit>` :param quota_period: Quota period of usage. :type quota_period: str :param next_reset_time: Next reset time of usage. :type next_reset_time: datetime :param current_value: Current value of usage. :type current_value: long :param limit: Limit of usage. :type limit: long :param name: Name of usage. :type name: :class:`NameInfo <azure.mgmt.recoveryservicesbackup.models.NameInfo>` """ _attribute_map = { 'unit': {'key': 'unit', 'type': 'str'}, 'quota_period': {'key': 'quotaPeriod', 'type': 'str'}, 'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'}, 'current_value': {'key': 'currentValue', 'type': 'long'}, 'limit': {'key': 'limit', 'type': 'long'}, 'name': {'key': 'name', 'type': 'NameInfo'}, } def __init__(self, unit=None, quota_period=None, next_reset_time=None, current_value=None, limit=None, name=None): self.unit = unit self.quota_period = quota_period self.next_reset_time = next_reset_time self.current_value = current_value self.limit = limit self.name = name
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import json from eve.utils import ParsedRequest import superdesk import logging from copy import deepcopy from flask import current_app as app from superdesk import get_resource_service, config from superdesk.errors import SuperdeskApiError from superdesk.media.media_operations import crop_image, process_file_from_stream from superdesk.upload import url_for_media from superdesk.metadata.item import CONTENT_TYPE, ITEM_TYPE, MEDIA_TYPES, ASSOCIATIONS from .renditions import _resize_image logger = logging.getLogger(__name__) class CropService: crop_sizes = [] def validate_crop(self, original, updates, crop_name): """Validate crop info on media item. :param dict original: original item :param dict updates: updated renditions :param str crop_name: name of the crop :param dict doc: crop co-ordinates :raises SuperdeskApiError.badRequestError: For following conditions: 1) if type != picture 2) if renditions are missing in the original image 3) if original rendition is missing 4) Crop name is invalid """ # Check if type is picture if original[ITEM_TYPE] != CONTENT_TYPE.PICTURE: raise SuperdeskApiError.badRequestError(message="Only images can be cropped!") # Check if the renditions exists if not original.get("renditions"): raise SuperdeskApiError.badRequestError(message="Missing renditions!") # Check if the original rendition exists if not original.get("renditions").get("original"): raise SuperdeskApiError.badRequestError(message="Missing original rendition!") # Check if the crop name is valid crop = self.get_crop_by_name(crop_name) crop_data = updates.get("renditions", {}).get(crop_name, {}) if not crop and "CropLeft" in crop_data: raise SuperdeskApiError.badRequestError(message="Unknown crop name! (name=%s)" % crop_name) self._validate_values(crop_data) self._validate_poi(original, updates, crop_name) self._validate_aspect_ratio(crop, crop_data) def _validate_values(self, crop): int_fields = ("CropLeft", "CropTop", "CropRight", "CropBottom", "width", "height") for field in int_fields: if field in crop: try: crop[field] = int(crop[field]) except (TypeError, ValueError): raise SuperdeskApiError.badRequestError("Invalid value for %s in renditions" % field) def _validate_poi(self, original, updates, crop_name): """Validate the crop point of interest in the renditions dictionary for the given crop :param dict original: original item :param dict updates: updated renditions """ renditions = original.get("renditions", {}) updated_renditions = updates.get("renditions", {}) original_image = deepcopy(renditions["original"]) original_image.update(updated_renditions.get("original", {})) if "poi" in updates: if "x" not in updates["poi"] or "y" not in updates["poi"]: del updates["poi"] return poi = updates["poi"] elif "poi" not in original: return else: if crop_name not in updated_renditions: return poi = original["poi"] crop_data = updated_renditions[crop_name] if crop_name in updated_renditions else renditions[crop_name] orig_poi_x = int(original_image["width"] * poi["x"]) orig_poi_y = int(original_image["height"] * poi["y"]) if ( orig_poi_y < crop_data.get("CropTop", 0) or orig_poi_y > crop_data.get("CropBottom", original_image["height"]) or orig_poi_x < crop_data.get("CropLeft", 0) or orig_poi_x > crop_data.get("CropRight", original_image["width"]) ): raise SuperdeskApiError("Point of interest outside the crop %s limits" % crop_name) def _validate_aspect_ratio(self, crop, doc): """Checks if the aspect ratio is consistent with one in defined in spec :param crop: Spec parameters :param doc: Posted parameters :raises SuperdeskApiError.badRequestError: """ if "CropLeft" not in doc: return width = doc["CropRight"] - doc["CropLeft"] height = doc["CropBottom"] - doc["CropTop"] if not (crop.get("width") or crop.get("height") or crop.get("ratio")): raise SuperdeskApiError.badRequestError( message="Crop data are missing. width, height or ratio need to be defined" ) if crop.get("width") and crop.get("height"): expected_crop_width = int(crop["width"]) expected_crop_height = int(crop["height"]) if width < expected_crop_width or height < expected_crop_height: raise SuperdeskApiError.badRequestError( message="Wrong crop size. Minimum crop size is {}x{}.".format(crop["width"], crop["height"]) ) doc_ratio = round(width / height, 1) spec_ratio = round(expected_crop_width / expected_crop_height, 1) if doc_ratio != spec_ratio: raise SuperdeskApiError.badRequestError(message="Wrong aspect ratio!") elif crop.get("ratio"): ratio = crop.get("ratio") if type(ratio) not in [int, float]: ratio = ratio.split(":") ratio = int(ratio[0]) / int(ratio[1]) if abs((width / height) - ratio) > 0.1: raise SuperdeskApiError.badRequestError( message="Ratio %s is not respected. We got %f" % (crop.get("ratio"), abs((width / height))) ) def get_crop_by_name(self, crop_name): """Finds the crop in the list of crops by name :param crop_name: Crop name :return: Matching crop or None """ if not self.crop_sizes: self.crop_sizes = get_resource_service("vocabularies").find_one(req=None, _id="crop_sizes").get("items") if not self.crop_sizes: raise SuperdeskApiError.badRequestError(message="Crops sizes couldn't be loaded!") return next((c for c in self.crop_sizes if c.get("name", "").lower() == crop_name.lower()), None) def create_crop(self, original_image, crop_name, crop_data): """Create a new crop based on the crop co-ordinates :param original: Article to add the crop :param crop_name: Name of the crop :param doc: Crop details :raises SuperdeskApiError.badRequestError :return dict: rendition """ original_file = app.media.fetch_rendition(original_image) if not original_file: raise SuperdeskApiError.badRequestError("Original file couldn't be found") try: cropped, out = crop_image(original_file, crop_name, crop_data) crop = self.get_crop_by_name(crop_name) if not cropped: raise SuperdeskApiError.badRequestError("Saving crop failed.") # resize if needed if crop.get("width") or crop.get("height"): out, width, height = _resize_image( out, size=(crop.get("width"), crop.get("height")), keepProportions=crop.get("keep_proportions", True), ) crop["width"] = width crop["height"] = height out.seek(0) return self._save_cropped_image(out, original_image, crop_data) except SuperdeskApiError: raise except Exception as ex: raise SuperdeskApiError.badRequestError("Generating crop failed: {}".format(str(ex))) def _save_cropped_image(self, file_stream, original, doc): """Saves the cropped image and returns the crop dictionary :param file_stream: cropped image stream :param original: original rendition :param doc: crop data :return dict: Crop values :raises SuperdeskApiError.internalError """ crop = {} try: file_name, content_type, metadata = process_file_from_stream( file_stream, content_type=original.get("mimetype") ) file_stream.seek(0) file_id = app.media.put( file_stream, filename=file_name, content_type=content_type, resource="upload", metadata=metadata ) crop["media"] = file_id crop["mimetype"] = content_type crop["href"] = url_for_media(file_id, content_type) crop["CropTop"] = doc.get("CropTop", None) crop["CropLeft"] = doc.get("CropLeft", None) crop["CropRight"] = doc.get("CropRight", None) crop["CropBottom"] = doc.get("CropBottom", None) return crop except Exception as ex: try: app.media.delete(file_id) except Exception: pass raise SuperdeskApiError.internalError("Generating crop failed: {}".format(str(ex)), exception=ex) def _delete_crop_file(self, file_id): """Delete the crop file :param Object_id file_id: Object_Id of the file. """ try: app.media.delete(file_id) except Exception: logger.exception("Crop File cannot be deleted. File_Id {}".format(file_id)) def create_multiple_crops(self, updates, original): """Create multiple crops based on the renditions. :param dict updates: update item :param dict original: original of the updated item """ if original.get(ITEM_TYPE) != CONTENT_TYPE.PICTURE: return update_renditions = updates.get("renditions", {}) renditions = deepcopy(original.get("renditions", {})) # keep renditions updates (urls may have changed) renditions.update(update_renditions) renditions = {k: renditions[k] for k in renditions if renditions[k]} if "original" in updates.get("renditions", {}): original_image = updates["renditions"]["original"] else: try: original_image = original["renditions"]["original"] except KeyError: return for key in [k for k in update_renditions if update_renditions[k]]: if not self.get_crop_by_name(key): continue original_crop = original.get("renditions", {}).get(key, {}) fields = ("CropLeft", "CropTop", "CropRight", "CropBottom") crop_data = update_renditions.get(key, {}) if any(crop_data.get(name) != original_crop.get(name) for name in fields) and not crop_data.get("media"): rendition = self.create_crop(original_image, key, crop_data) renditions[key] = rendition poi = updates.get("poi") if poi: for crop_name in renditions: self._set_crop_poi(renditions, crop_name, poi) updates["renditions"] = renditions def _set_crop_poi(self, renditions, crop_name, poi): """Set the crop point of interest in the renditions dictionary for the given crop :param dict renditions: updated renditions :param string crop_name: the crop for which to set the poi :param dict poi: the point of interest dictionary """ fields = ("CropLeft", "CropTop", "CropRight", "CropBottom") if "x" in poi and "y" in poi: original_image = renditions["original"] crop_data = renditions[crop_name] orig_poi_x = int(original_image["width"] * poi["x"]) orig_poi_y = int(original_image["height"] * poi["y"]) if any(name in crop_data for name in fields): crop_poi_x = orig_poi_x - crop_data.get("CropLeft", 0) crop_poi_y = orig_poi_y - crop_data.get("CropTop", 0) else: crop_poi_x = int(crop_data.get("width", original_image["width"]) * poi["x"]) crop_poi_y = int(crop_data.get("height", original_image["height"]) * poi["y"]) renditions[crop_name]["poi"] = {"x": crop_poi_x, "y": crop_poi_y} def validate_multiple_crops(self, updates, original): """Validate crops for the image :param dict updates: update item :param dict original: original of the updated item """ renditions = updates.get("renditions", {}) if not (renditions and original.get(ITEM_TYPE) == CONTENT_TYPE.PICTURE): return for key in [k for k in renditions if renditions[k]]: self.validate_crop(original, updates, key) def delete_replaced_crop_files(self, updates, original): """Delete the replaced crop files. :param dict updates: update item :param dict original: original of the updated item """ update_renditions = updates.get("renditions", {}) if original.get(ITEM_TYPE) == CONTENT_TYPE.PICTURE and update_renditions: renditions = original.get("renditions", {}) for key in update_renditions: if self.get_crop_by_name(key) and update_renditions.get(key, {}).get("media") != renditions.get( key, {} ).get("media"): self._delete_crop_file(renditions.get(key, {}).get("media")) def update_media_references(self, updates, original, published=False): """Update the media references collection. When item (media item or associated media) is updated or created, media_references are created. These media_references are updated to published state once the item is published. :param dict updates: Updates of the item :param dict original: Original item :param boolean published: True if publishing the item else False """ item_id = original.get(config.ID_FIELD) references = {} if updates.get("renditions", original.get("renditions", {})): references = {item_id: updates.get("renditions", original.get("renditions", {}))} if original.get(ITEM_TYPE) not in MEDIA_TYPES: associations = updates.get(ASSOCIATIONS) or original.get(ASSOCIATIONS) if not associations: return references = { assoc.get(config.ID_FIELD): assoc.get("renditions") for assoc in associations.values() if assoc and assoc.get("renditions") } if not references: return for assoc_id, renditions in references.items(): associated_id = assoc_id if assoc_id != item_id else None for rendition in [r for r in renditions.values() if r]: if not rendition.get("media"): continue media = str(rendition.get("media")) reference = get_resource_service("media_references").find_one(req=None, item_id=item_id, media_id=media) if not reference: try: get_resource_service("media_references").post( [ { "item_id": item_id, "media_id": media, "associated_id": associated_id, "published": False, } ] ) except Exception: logger.exception("Failed to insert media reference item {} media {}".format(item_id, media)) # item is publish if not published: return req = ParsedRequest() req.where = json.dumps({"item_id": item_id, "published": False}) refs = list(get_resource_service("media_references").get(req=req, lookup=None)) for ref in refs: try: get_resource_service("media_references").patch(ref.get(config.ID_FIELD), updates={"published": True}) except Exception: logger.exception( "Failed to update media " "reference item {} media {}".format(ref.get("item_id"), ref.get("media_id")) )
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 # # Copyright 2016 Red Hat, Inc. # # Authors: # Fam Zheng <famz@redhat.com> # # This work is licensed under the MIT License. Please see the LICENSE file or # http://opensource.org/licenses/MIT. from django.conf.urls import url from django.contrib.auth import views as auth_views from . import views from mod import dispatch_module_hook urlpatterns = [] dispatch_module_hook("www_url_hook", urlpatterns=urlpatterns) urlpatterns += [ url("^login/$", auth_views.LoginView.as_view(template_name="login.html"), name="login"), url("^logout/$", auth_views.LogoutView.as_view(), name="logout"), url("^change-password/$", auth_views.PasswordChangeView.as_view(template_name="password-change.html"), name="password_change"), url("^change-password/done/$", auth_views.PasswordChangeDoneView.as_view(template_name="password-change-done.html"), name="password_change_done"), url(r"^search$", views.view_search, name="search"), url(r"^search-help$", views.view_search_help, name="search_help"), url(r"^(?P<project>[^/]*)/$", views.view_series_list, name="series_list"), url(r"^(?P<project>[^/]*)/info$", views.view_project_detail, name="project_detail"), url( r"^(?P<project>[^/]*)/(?P<message_id>[^/]*)/$", views.view_series_detail, name="series_detail", ), url( r"^(?P<project>[^/]*)/(?P<thread_id>[^/]*)/(?P<message_id>[^/]*)/$", views.view_series_message, name="series_message", ), url( r"^(?P<project>[^/]*)/(?P<message_id>[^/]*)/mbox$", views.view_mbox, name="mbox" ), url(r"^$", views.view_project_list, name="project_list"), ]
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package configs import ( "strings" "testing" version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" tfaddr "github.com/hashicorp/terraform-registry-address" "github.com/hashicorp/terraform/internal/configs/configschema" "github.com/zclconf/go-cty/cty" ) // The Hash method assumes that the state_store schema doesn't include a provider block, // and it requires calling code to remove the nested provider block from state_store config data. func TestStateStore_Hash(t *testing.T) { // Normally these schemas would come from a provider's GetProviderSchema data exampleStateStoreSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "path": { Type: cty.String, Required: true, }, "workspace_dir": { Type: cty.String, Optional: true, }, }, } exampleProviderSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foobar": { Type: cty.String, Required: true, }, }, } // These values are all coupled. // The test case below asserts that given these inputs, the expected hash is returned. exampleProviderVersion := version.Must(version.NewSemver("1.2.3")) exampleProviderAddr := tfaddr.NewProvider(tfaddr.DefaultProviderRegistryHost, "hashicorp", "foobar") exampleConfig := configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`) exampleHash := 614398732 t.Run("example happy path with all attrs set in the configuration", func(t *testing.T) { // Construct a configs.StateStore for the test. content, _, cfgDiags := exampleConfig.PartialContent(terraformBlockSchema) if len(cfgDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", cfgDiags) } var ssDiags hcl.Diagnostics s, ssDiags := decodeStateStoreBlock(content.Blocks.OfType("state_store")[0]) if len(ssDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", ssDiags) } s.ProviderAddr = exampleProviderAddr // Test Hash method. gotHash, diags := s.Hash(exampleStateStoreSchema, exampleProviderSchema, exampleProviderVersion) if diags.HasErrors() { t.Fatalf("unexpected error: %s", diags.Err()) } if gotHash != exampleHash { t.Fatalf("expected hash for state_store to be %d, but got %d", exampleHash, gotHash) } }) // Test cases each change a single input that affects the output hash // Assertions check that the output hash doesn't match the hash above, following the changed input. cases := map[string]struct { config hcl.Body stateStoreSchema *configschema.Block providerVersion *version.Version providerAddr tfaddr.Provider }{ "changing the state store type affects the hash value": { config: configBodyForTest(t, `state_store "foobar_CHANGED_VALUE_HERE" { provider "foobar" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`), }, "changing the provider affects the hash value": { providerAddr: tfaddr.NewProvider(tfaddr.DefaultProviderRegistryHost, "hashicorp", "different-provider"), config: configBodyForTest(t, `state_store "different-provider_fs" { provider "different-provider" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`), }, "changing the provider version affects the hash value": { providerVersion: version.Must(version.NewSemver("9.9.9")), }, } for tn, tc := range cases { t.Run(tn, func(t *testing.T) { // If a test case doesn't set an override for these inputs, // instead use a default value from the example above. var config hcl.Body var schema *configschema.Block var providerVersion *version.Version var providerAddr tfaddr.Provider if tc.config == nil { config = exampleConfig } else { config = tc.config } if tc.stateStoreSchema == nil { schema = exampleStateStoreSchema } else { schema = tc.stateStoreSchema } if tc.providerVersion == nil { providerVersion = exampleProviderVersion } else { providerVersion = tc.providerVersion } if tc.providerAddr.IsZero() { providerAddr = exampleProviderAddr } else { providerAddr = tc.providerAddr } // Construct a configs.StateStore for the test. content, _, cfgDiags := config.PartialContent(terraformBlockSchema) if len(cfgDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", cfgDiags) } var ssDiags hcl.Diagnostics s, ssDiags := decodeStateStoreBlock(content.Blocks.OfType("state_store")[0]) if len(ssDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", ssDiags) } s.ProviderAddr = providerAddr // Test Hash method. gotHash, diags := s.Hash(schema, exampleProviderSchema, providerVersion) if diags.HasErrors() { t.Fatalf("unexpected error: %s", diags.Err()) } if gotHash == exampleHash { t.Fatal("expected hash for state_store to be different from the example due to a changed input, but it matched.") } }) } } func TestStateStore_Hash_edgeCases(t *testing.T) { // Normally these schemas would come from a provider's GetProviderSchema data stateStoreSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "path": { Type: cty.String, Required: true, }, "workspace_dir": { Type: cty.String, Optional: true, }, }, } providerSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foobar": { Type: cty.String, Required: true, }, }, } providerAddr := tfaddr.NewProvider(tfaddr.DefaultProviderRegistryHost, "hashicorp", "foobar") providerVersion := version.Must(version.NewSemver("1.2.3")) config := configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`) cases := map[string]struct { config hcl.Body providerAddr tfaddr.Provider providerVersion *version.Version reattachConfig string }{ "tolerates empty config block for the provider even when schema has Required field(s)": { config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { # required field "foobar" is missing } path = "mystate.tfstate" workspace_dir = "foobar" }`), providerAddr: providerAddr, providerVersion: providerVersion, }, "tolerates missing Required field(s) in state_store config": { config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } # required field "path" is missing workspace_dir = "foobar" }`), providerAddr: providerAddr, providerVersion: providerVersion, }, "tolerates missing provider version data when using a builtin provider": { config: config, providerAddr: tfaddr.NewProvider(tfaddr.BuiltInProviderHost, "hashicorp", "foobar"), // Builtin providerVersion: nil, // No version }, "tolerates missing provider version data when using a reattached provider": { config: config, providerAddr: providerAddr, providerVersion: nil, // No version reattachConfig: `{ "foobar": { "Protocol": "grpc", "ProtocolVersion": 6, "Pid": 12345, "Test": true, "Addr": { "Network": "unix", "String":"/var/folders/xx/abcde12345/T/plugin12345" } } }`, }, } for tn, tc := range cases { t.Run(tn, func(t *testing.T) { if tc.reattachConfig != "" { t.Setenv("TF_REATTACH_PROVIDERS", tc.reattachConfig) } // Construct a configs.StateStore for the test. content, _, cfgDiags := config.PartialContent(terraformBlockSchema) if len(cfgDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", cfgDiags) } var ssDiags hcl.Diagnostics s, ssDiags := decodeStateStoreBlock(content.Blocks.OfType("state_store")[0]) if len(ssDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", ssDiags) } s.ProviderAddr = tc.providerAddr // Test Hash method. _, diags := s.Hash(stateStoreSchema, providerSchema, tc.providerVersion) if diags.HasErrors() { t.Fatalf("unexpected error: %s", diags.Err()) } }) } } func TestStateStore_Hash_errorConditions(t *testing.T) { // Normally these schemas would come from a provider's GetProviderSchema data exampleStateStoreSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "path": { Type: cty.String, Required: true, }, "workspace_dir": { Type: cty.String, Optional: true, }, }, } exampleProviderSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foobar": { Type: cty.String, Required: true, }, }, } exampleProviderVersion := version.Must(version.NewSemver("1.2.3")) // Cases where an error would occur cases := map[string]struct { config hcl.Body stateStoreSchema *configschema.Block providerVersion *version.Version wantErrorString string }{ "returns errors when the state_store config doesn't match the schema": { providerVersion: exampleProviderVersion, stateStoreSchema: exampleStateStoreSchema, config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } unexpected_block { foobar = "foobar" } unexpected_attr = "foobar" path = "mystate.tfstate" workspace_dir = "foobar" }`), wantErrorString: "Unsupported argument", }, "returns errors when the provider config doesn't match the schema": { providerVersion: exampleProviderVersion, stateStoreSchema: exampleStateStoreSchema, config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" unexpected_attr = "foobar" unexpected_block { foobar = "foobar" } } path = "mystate.tfstate" workspace_dir = "foobar" }`), wantErrorString: "Unsupported argument", }, "returns an error if the state_store schema includes a provider block": { providerVersion: exampleProviderVersion, stateStoreSchema: &configschema.Block{ BlockTypes: map[string]*configschema.NestedBlock{ "provider": { Block: configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": { Type: cty.String, Optional: true, }, }, }, Nesting: configschema.NestingSingle, }, }, }, config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`), wantErrorString: `Protected block name "provider" in state store schema`, }, "returns an error if the state_store schema includes a provider attribute": { providerVersion: exampleProviderVersion, stateStoreSchema: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "provider": { Type: cty.String, Optional: true, }, }, }, config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`), wantErrorString: `Protected argument name "provider" in state store schema`, }, "returns an error if the provider version is missing when using a non-builtin, non-reattached provider": { providerVersion: nil, // No value provided in this test case stateStoreSchema: exampleStateStoreSchema, config: configBodyForTest(t, `state_store "foobar_fs" { provider "foobar" { foobar = "foobar" } path = "mystate.tfstate" workspace_dir = "foobar" }`), wantErrorString: `Provider version data was missing during hash generation`, }, } for tn, tc := range cases { t.Run(tn, func(t *testing.T) { // Construct a configs.StateStore for the test. content, _, cfgDiags := tc.config.PartialContent(terraformBlockSchema) if len(cfgDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", cfgDiags) } var ssDiags hcl.Diagnostics s, ssDiags := decodeStateStoreBlock(content.Blocks.OfType("state_store")[0]) if len(ssDiags) > 0 { t.Fatalf("unexpected diagnostics: %s", ssDiags) } s.ProviderAddr = tfaddr.NewProvider(tfaddr.DefaultProviderRegistryHost, "hashicorp", "foobar") // Test Hash method. _, diags := s.Hash(tc.stateStoreSchema, exampleProviderSchema, tc.providerVersion) if !diags.HasErrors() { t.Fatal("expected error but got none") } if !strings.Contains(diags.Err().Error(), tc.wantErrorString) { t.Fatalf("expected error to contain %q but got: %s", tc.wantErrorString, diags.Err()) } }) } } func configBodyForTest(t *testing.T, config string) hcl.Body { t.Helper() f, diags := hclsyntax.ParseConfig([]byte(config), "", hcl.Pos{Line: 1, Column: 1}) if diags.HasErrors() { t.Fatalf("failure creating hcl.Body during test setup: %s", diags.Error()) } return f.Body }
go
github
https://github.com/hashicorp/terraform
internal/configs/state_store_test.go
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ # noqa from horizon import browsers from openstack_dashboard.dashboards.project.containers import tables class ContainerBrowser(browsers.ResourceBrowser): name = "swift" verbose_name = _("Swift") navigation_table_class = tables.ContainersTable content_table_class = tables.ObjectsTable navigable_item_name = _("Container") navigation_kwarg_name = "container_name" content_kwarg_name = "subfolder_path" has_breadcrumb = True breadcrumb_url = "horizon:project:containers:index"
unknown
codeparrot/codeparrot-clean
# pylint:disable=pointless-string-statement, fixme, misplaced-comparison-constant, comparison-with-itself """Stray backslash escapes may be missing a raw-string prefix.""" __revision__ = '$Id$' # Bad escape sequences, which probably don't do what you expect. A = "\[\]\\" # [anomalous-backslash-in-string,anomalous-backslash-in-string] assert '\/' == '\\/' # [anomalous-backslash-in-string] ESCAPE_BACKSLASH = '\`' # [anomalous-backslash-in-string] # Valid escape sequences. NEWLINE = "\n" OLD_ESCAPES = '\a\b\f\n\t\r\v' HEX = '\xad\x0a\x0d' # +1:[anomalous-backslash-in-string,anomalous-backslash-in-string] FALSE_OCTAL = '\o123\o000' # Not octal in Python OCTAL = '\123\000' NOT_OCTAL = '\888\999' # [anomalous-backslash-in-string,anomalous-backslash-in-string] NUL = '\0' UNICODE = u'\u1234' HIGH_UNICODE = u'\U0000abcd' QUOTES = '\'\"' LITERAL_NEWLINE = '\ ' ESCAPE_UNICODE = "\\\\n" # Bad docstring # +3:[anomalous-backslash-in-string] """Even in a docstring You shouldn't have ambiguous text like: C:\Program Files\alpha """
unknown
codeparrot/codeparrot-clean
/* * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include <Python.h> #include "pycore_object.h" // _PyObject_VisitType() #include "pycore_pystate.h" // _PyThreadState_GET() #include "pycore_typeobject.h" #include <mpdecimal.h> // Reuse config from mpdecimal.h if present. #if defined(MPD_CONFIG_64) #ifndef CONFIG_64 #define CONFIG_64 MPD_CONFIG_64 #endif #elif defined(MPD_CONFIG_32) #ifndef CONFIG_32 #define CONFIG_32 MPD_CONFIG_32 #endif #endif #include <ctype.h> // isascii() #include <stdlib.h> #ifdef EXTRA_FUNCTIONALITY #define _PY_DEC_ROUND_GUARD MPD_ROUND_GUARD #else #define _PY_DEC_ROUND_GUARD (MPD_ROUND_GUARD-1) #endif #include "clinic/_decimal.c.h" #define MPD_SPEC_VERSION "1.70" // Highest version of the spec this complies with // See https://speleotrove.com/decimal/decarith.html /*[clinic input] module _decimal class _decimal.Decimal "PyObject *" "&dec_spec" class _decimal.Context "PyObject *" "&context_spec" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a6a6c0bdf4e576ef]*/ struct PyDecContextObject; struct DecCondMap; typedef struct { PyTypeObject *PyDecContextManager_Type; PyTypeObject *PyDecContext_Type; PyTypeObject *PyDecSignalDictMixin_Type; PyTypeObject *PyDec_Type; PyTypeObject *PyDecSignalDict_Type; PyTypeObject *DecimalTuple; /* Top level Exception; inherits from ArithmeticError */ PyObject *DecimalException; #ifndef WITH_DECIMAL_CONTEXTVAR /* Key for thread state dictionary */ PyObject *tls_context_key; /* Invariant: NULL or a strong reference to the most recently accessed thread local context. */ struct PyDecContextObject *cached_context; /* Not borrowed */ #else PyObject *current_context_var; #endif /* Template for creating new thread contexts, calling Context() without * arguments and initializing the module_context on first access. */ PyObject *default_context_template; /* Basic and extended context templates */ PyObject *basic_context_template; PyObject *extended_context_template; PyObject *round_map[_PY_DEC_ROUND_GUARD]; /* Convert rationals for comparison */ PyObject *Rational; /* Invariant: NULL or pointer to _pydecimal.Decimal */ PyObject *PyDecimal; PyObject *SignalTuple; struct DecCondMap *signal_map; struct DecCondMap *cond_map; /* External C-API functions */ binaryfunc _py_long_multiply; binaryfunc _py_long_floor_divide; ternaryfunc _py_long_power; unaryfunc _py_float_abs; PyCFunction _py_long_bit_length; PyCFunction _py_float_as_integer_ratio; } decimal_state; static inline decimal_state * get_module_state(PyObject *mod) { decimal_state *state = _PyModule_GetState(mod); assert(state != NULL); return state; } static struct PyModuleDef _decimal_module; static PyType_Spec dec_spec; static PyType_Spec context_spec; static inline decimal_state * get_module_state_by_def(PyTypeObject *tp) { PyObject *mod = PyType_GetModuleByDef(tp, &_decimal_module); assert(mod != NULL); return get_module_state(mod); } static inline decimal_state * find_state_left_or_right(PyObject *left, PyObject *right) { PyTypeObject *base; if (PyType_GetBaseByToken(Py_TYPE(left), &dec_spec, &base) != 1) { assert(!PyErr_Occurred()); PyType_GetBaseByToken(Py_TYPE(right), &dec_spec, &base); } assert(base != NULL); void *state = _PyType_GetModuleState(base); assert(state != NULL); Py_DECREF(base); return (decimal_state *)state; } static inline decimal_state * find_state_ternary(PyObject *left, PyObject *right, PyObject *modulus) { PyTypeObject *base; if (PyType_GetBaseByToken(Py_TYPE(left), &dec_spec, &base) != 1) { assert(!PyErr_Occurred()); if (PyType_GetBaseByToken(Py_TYPE(right), &dec_spec, &base) != 1) { assert(!PyErr_Occurred()); PyType_GetBaseByToken(Py_TYPE(modulus), &dec_spec, &base); } } assert(base != NULL); void *state = _PyType_GetModuleState(base); assert(state != NULL); Py_DECREF(base); return (decimal_state *)state; } #if !defined(MPD_VERSION_HEX) || MPD_VERSION_HEX < 0x02050000 #error "libmpdec version >= 2.5.0 required" #endif /* * Type sizes with assertions in mpdecimal.h and pyport.h: * sizeof(size_t) == sizeof(Py_ssize_t) * sizeof(size_t) == sizeof(mpd_uint_t) == sizeof(mpd_ssize_t) */ #ifdef TEST_COVERAGE #undef Py_LOCAL_INLINE #define Py_LOCAL_INLINE Py_LOCAL #endif #define MPD_Float_operation MPD_Not_implemented #define BOUNDS_CHECK(x, MIN, MAX) x = (x < MIN || MAX < x) ? MAX : x /* _Py_DEC_MINALLOC >= MPD_MINALLOC */ #define _Py_DEC_MINALLOC 4 typedef struct { PyObject_HEAD Py_hash_t hash; mpd_t dec; mpd_uint_t data[_Py_DEC_MINALLOC]; } PyDecObject; #define _PyDecObject_CAST(op) ((PyDecObject *)(op)) typedef struct { PyObject_HEAD uint32_t *flags; } PyDecSignalDictObject; #define _PyDecSignalDictObject_CAST(op) ((PyDecSignalDictObject *)(op)) typedef struct PyDecContextObject { PyObject_HEAD mpd_context_t ctx; PyObject *traps; PyObject *flags; int capitals; PyThreadState *tstate; decimal_state *modstate; } PyDecContextObject; #define _PyDecContextObject_CAST(op) ((PyDecContextObject *)(op)) typedef struct { PyObject_HEAD PyObject *local; PyObject *global; } PyDecContextManagerObject; #define _PyDecContextManagerObject_CAST(op) ((PyDecContextManagerObject *)(op)) #undef MPD #undef CTX #define PyDec_CheckExact(st, v) Py_IS_TYPE(v, (st)->PyDec_Type) #define PyDec_Check(st, v) PyObject_TypeCheck(v, (st)->PyDec_Type) #define PyDecSignalDict_Check(st, v) Py_IS_TYPE(v, (st)->PyDecSignalDict_Type) #define PyDecContext_Check(st, v) PyObject_TypeCheck(v, (st)->PyDecContext_Type) #define MPD(v) (&_PyDecObject_CAST(v)->dec) #define SdFlagAddr(v) (_PyDecSignalDictObject_CAST(v)->flags) #define SdFlags(v) (*_PyDecSignalDictObject_CAST(v)->flags) #define CTX(v) (&_PyDecContextObject_CAST(v)->ctx) #define CtxCaps(v) (_PyDecContextObject_CAST(v)->capitals) static inline decimal_state * get_module_state_from_ctx(PyObject *v) { assert(PyType_GetBaseByToken(Py_TYPE(v), &context_spec, NULL) == 1); decimal_state *state = ((PyDecContextObject *)v)->modstate; assert(state != NULL); return state; } Py_LOCAL_INLINE(PyObject *) incr_true(void) { return Py_NewRef(Py_True); } Py_LOCAL_INLINE(PyObject *) incr_false(void) { return Py_NewRef(Py_False); } /* Error codes for functions that return signals or conditions */ #define DEC_INVALID_SIGNALS (MPD_Max_status+1U) #define DEC_ERR_OCCURRED (DEC_INVALID_SIGNALS<<1) #define DEC_ERRORS (DEC_INVALID_SIGNALS|DEC_ERR_OCCURRED) typedef struct DecCondMap { const char *name; /* condition or signal name */ const char *fqname; /* fully qualified name */ uint32_t flag; /* libmpdec flag */ PyObject *ex; /* corresponding exception */ } DecCondMap; /* Exceptions that correspond to IEEE signals */ #define SUBNORMAL 5 #define INEXACT 6 #define ROUNDED 7 #define SIGNAL_MAP_LEN 9 static DecCondMap signal_map_template[] = { {"InvalidOperation", "decimal.InvalidOperation", MPD_IEEE_Invalid_operation, NULL}, {"FloatOperation", "decimal.FloatOperation", MPD_Float_operation, NULL}, {"DivisionByZero", "decimal.DivisionByZero", MPD_Division_by_zero, NULL}, {"Overflow", "decimal.Overflow", MPD_Overflow, NULL}, {"Underflow", "decimal.Underflow", MPD_Underflow, NULL}, {"Subnormal", "decimal.Subnormal", MPD_Subnormal, NULL}, {"Inexact", "decimal.Inexact", MPD_Inexact, NULL}, {"Rounded", "decimal.Rounded", MPD_Rounded, NULL}, {"Clamped", "decimal.Clamped", MPD_Clamped, NULL}, {NULL} }; /* Exceptions that inherit from InvalidOperation */ static DecCondMap cond_map_template[] = { {"InvalidOperation", "decimal.InvalidOperation", MPD_Invalid_operation, NULL}, {"ConversionSyntax", "decimal.ConversionSyntax", MPD_Conversion_syntax, NULL}, {"DivisionImpossible", "decimal.DivisionImpossible", MPD_Division_impossible, NULL}, {"DivisionUndefined", "decimal.DivisionUndefined", MPD_Division_undefined, NULL}, {"InvalidContext", "decimal.InvalidContext", MPD_Invalid_context, NULL}, #ifdef EXTRA_FUNCTIONALITY {"MallocError", "decimal.MallocError", MPD_Malloc_error, NULL}, #endif {NULL} }; /* Return a duplicate of DecCondMap template */ static inline DecCondMap * dec_cond_map_init(DecCondMap *template, Py_ssize_t size) { DecCondMap *cm; cm = PyMem_Malloc(size); if (cm == NULL) { PyErr_NoMemory(); return NULL; } memcpy(cm, template, size); return cm; } static const char *dec_signal_string[MPD_NUM_FLAGS] = { "Clamped", "InvalidOperation", "DivisionByZero", "InvalidOperation", "InvalidOperation", "InvalidOperation", "Inexact", "InvalidOperation", "InvalidOperation", "InvalidOperation", "FloatOperation", "Overflow", "Rounded", "Subnormal", "Underflow", }; static const char *invalid_rounding_err = "valid values for rounding are:\n\ [ROUND_CEILING, ROUND_FLOOR, ROUND_UP, ROUND_DOWN,\n\ ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,\n\ ROUND_05UP]"; static const char *invalid_signals_err = "valid values for signals are:\n\ [InvalidOperation, FloatOperation, DivisionByZero,\n\ Overflow, Underflow, Subnormal, Inexact, Rounded,\n\ Clamped]"; #ifdef EXTRA_FUNCTIONALITY static const char *invalid_flags_err = "valid values for _flags or _traps are:\n\ signals:\n\ [DecIEEEInvalidOperation, DecFloatOperation, DecDivisionByZero,\n\ DecOverflow, DecUnderflow, DecSubnormal, DecInexact, DecRounded,\n\ DecClamped]\n\ conditions which trigger DecIEEEInvalidOperation:\n\ [DecInvalidOperation, DecConversionSyntax, DecDivisionImpossible,\n\ DecDivisionUndefined, DecFpuError, DecInvalidContext, DecMallocError]"; #endif static int value_error_int(const char *mesg) { PyErr_SetString(PyExc_ValueError, mesg); return -1; } static PyObject * value_error_ptr(const char *mesg) { PyErr_SetString(PyExc_ValueError, mesg); return NULL; } static int type_error_int(const char *mesg) { PyErr_SetString(PyExc_TypeError, mesg); return -1; } static int runtime_error_int(const char *mesg) { PyErr_SetString(PyExc_RuntimeError, mesg); return -1; } #define INTERNAL_ERROR_INT(funcname) \ return runtime_error_int("internal error in " funcname) static PyObject * runtime_error_ptr(const char *mesg) { PyErr_SetString(PyExc_RuntimeError, mesg); return NULL; } #define INTERNAL_ERROR_PTR(funcname) \ return runtime_error_ptr("internal error in " funcname) static void dec_traphandler(mpd_context_t *Py_UNUSED(ctx)) /* GCOV_NOT_REACHED */ { /* GCOV_NOT_REACHED */ return; /* GCOV_NOT_REACHED */ } static PyObject * flags_as_exception(decimal_state *state, uint32_t flags) { DecCondMap *cm; for (cm = state->signal_map; cm->name != NULL; cm++) { if (flags&cm->flag) { return cm->ex; } } INTERNAL_ERROR_PTR("flags_as_exception"); /* GCOV_NOT_REACHED */ } Py_LOCAL_INLINE(uint32_t) exception_as_flag(decimal_state *state, PyObject *ex) { DecCondMap *cm; for (cm = state->signal_map; cm->name != NULL; cm++) { if (cm->ex == ex) { return cm->flag; } } PyErr_SetString(PyExc_KeyError, invalid_signals_err); return DEC_INVALID_SIGNALS; } static PyObject * flags_as_list(decimal_state *state, uint32_t flags) { PyObject *list; DecCondMap *cm; list = PyList_New(0); if (list == NULL) { return NULL; } for (cm = state->cond_map; cm->name != NULL; cm++) { if (flags&cm->flag) { if (PyList_Append(list, cm->ex) < 0) { goto error; } } } for (cm = state->signal_map+1; cm->name != NULL; cm++) { if (flags&cm->flag) { if (PyList_Append(list, cm->ex) < 0) { goto error; } } } return list; error: Py_DECREF(list); return NULL; } static PyObject * signals_as_list(decimal_state *state, uint32_t flags) { PyObject *list; DecCondMap *cm; list = PyList_New(0); if (list == NULL) { return NULL; } for (cm = state->signal_map; cm->name != NULL; cm++) { if (flags&cm->flag) { if (PyList_Append(list, cm->ex) < 0) { Py_DECREF(list); return NULL; } } } return list; } static uint32_t list_as_flags(decimal_state *state, PyObject *list) { PyObject *item; uint32_t flags, x; Py_ssize_t n, j; assert(PyList_Check(list)); n = PyList_Size(list); flags = 0; for (j = 0; j < n; j++) { item = PyList_GetItem(list, j); x = exception_as_flag(state, item); if (x & DEC_ERRORS) { return x; } flags |= x; } return flags; } static PyObject * flags_as_dict(decimal_state *state, uint32_t flags) { DecCondMap *cm; PyObject *dict; dict = PyDict_New(); if (dict == NULL) { return NULL; } for (cm = state->signal_map; cm->name != NULL; cm++) { PyObject *b = flags&cm->flag ? Py_True : Py_False; if (PyDict_SetItem(dict, cm->ex, b) < 0) { Py_DECREF(dict); return NULL; } } return dict; } static uint32_t dict_as_flags(decimal_state *state, PyObject *val) { PyObject *b; DecCondMap *cm; uint32_t flags = 0; int x; if (!PyDict_Check(val)) { PyErr_SetString(PyExc_TypeError, "argument must be a signal dict"); return DEC_INVALID_SIGNALS; } if (PyDict_Size(val) != SIGNAL_MAP_LEN) { PyErr_SetString(PyExc_KeyError, "invalid signal dict"); return DEC_INVALID_SIGNALS; } for (cm = state->signal_map; cm->name != NULL; cm++) { b = PyDict_GetItemWithError(val, cm->ex); if (b == NULL) { if (PyErr_Occurred()) { return DEC_ERR_OCCURRED; } PyErr_SetString(PyExc_KeyError, "invalid signal dict"); return DEC_INVALID_SIGNALS; } x = PyObject_IsTrue(b); if (x < 0) { return DEC_ERR_OCCURRED; } if (x == 1) { flags |= cm->flag; } } return flags; } #ifdef EXTRA_FUNCTIONALITY static uint32_t long_as_flags(PyObject *v) { long x; x = PyLong_AsLong(v); if (x == -1 && PyErr_Occurred()) { return DEC_ERR_OCCURRED; } if (x < 0 || x > (long)MPD_Max_status) { PyErr_SetString(PyExc_TypeError, invalid_flags_err); return DEC_INVALID_SIGNALS; } return x; } #endif static int dec_addstatus(PyObject *context, uint32_t status) { mpd_context_t *ctx = CTX(context); decimal_state *state = get_module_state_from_ctx(context); ctx->status |= status; if (status & (ctx->traps|MPD_Malloc_error)) { PyObject *ex, *siglist; if (status & MPD_Malloc_error) { PyErr_NoMemory(); return 1; } ex = flags_as_exception(state, ctx->traps&status); if (ex == NULL) { return 1; /* GCOV_NOT_REACHED */ } siglist = flags_as_list(state, ctx->traps&status); if (siglist == NULL) { return 1; } PyErr_SetObject(ex, siglist); Py_DECREF(siglist); return 1; } return 0; } static int getround(decimal_state *state, PyObject *v) { int i; if (PyUnicode_Check(v)) { for (i = 0; i < _PY_DEC_ROUND_GUARD; i++) { if (v == state->round_map[i]) { return i; } } for (i = 0; i < _PY_DEC_ROUND_GUARD; i++) { if (PyUnicode_Compare(v, state->round_map[i]) == 0) { return i; } } } return type_error_int(invalid_rounding_err); } /******************************************************************************/ /* SignalDict Object */ /******************************************************************************/ /* The SignalDict is a MutableMapping that provides access to the mpd_context_t flags, which reside in the context object. When a new context is created, context.traps and context.flags are initialized to new SignalDicts. Once a SignalDict is tied to a context, it cannot be deleted. */ static const char *INVALID_SIGNALDICT_ERROR_MSG = "invalid signal dict"; static int signaldict_init(PyObject *self, PyObject *Py_UNUSED(args), PyObject *Py_UNUSED(kwds)) { SdFlagAddr(self) = NULL; return 0; } static Py_ssize_t signaldict_len(PyObject *self) { if (SdFlagAddr(self) == NULL) { return value_error_int(INVALID_SIGNALDICT_ERROR_MSG); } return SIGNAL_MAP_LEN; } static PyObject * signaldict_iter(PyObject *self) { if (SdFlagAddr(self) == NULL) { return value_error_ptr(INVALID_SIGNALDICT_ERROR_MSG); } decimal_state *state = get_module_state_by_def(Py_TYPE(self)); return PyTuple_Type.tp_iter(state->SignalTuple); } static PyObject * signaldict_getitem(PyObject *self, PyObject *key) { uint32_t flag; if (SdFlagAddr(self) == NULL) { return value_error_ptr(INVALID_SIGNALDICT_ERROR_MSG); } decimal_state *state = get_module_state_by_def(Py_TYPE(self)); flag = exception_as_flag(state, key); if (flag & DEC_ERRORS) { return NULL; } return SdFlags(self)&flag ? incr_true() : incr_false(); } static int signaldict_setitem(PyObject *self, PyObject *key, PyObject *value) { uint32_t flag; int x; if (SdFlagAddr(self) == NULL) { return value_error_int(INVALID_SIGNALDICT_ERROR_MSG); } if (value == NULL) { return value_error_int("signal keys cannot be deleted"); } decimal_state *state = get_module_state_by_def(Py_TYPE(self)); flag = exception_as_flag(state, key); if (flag & DEC_ERRORS) { return -1; } x = PyObject_IsTrue(value); if (x < 0) { return -1; } if (x == 1) { SdFlags(self) |= flag; } else { SdFlags(self) &= ~flag; } return 0; } static void signaldict_dealloc(PyObject *self) { PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); tp->tp_free(self); Py_DECREF(tp); } static PyObject * signaldict_repr(PyObject *self) { DecCondMap *cm; const char *n[SIGNAL_MAP_LEN]; /* name */ const char *b[SIGNAL_MAP_LEN]; /* bool */ int i; if (SdFlagAddr(self) == NULL) { return value_error_ptr(INVALID_SIGNALDICT_ERROR_MSG); } assert(SIGNAL_MAP_LEN == 9); decimal_state *state = get_module_state_by_def(Py_TYPE(self)); for (cm=state->signal_map, i=0; cm->name != NULL; cm++, i++) { n[i] = cm->fqname; b[i] = SdFlags(self)&cm->flag ? "True" : "False"; } return PyUnicode_FromFormat( "{<class '%s'>:%s, <class '%s'>:%s, <class '%s'>:%s, " "<class '%s'>:%s, <class '%s'>:%s, <class '%s'>:%s, " "<class '%s'>:%s, <class '%s'>:%s, <class '%s'>:%s}", n[0], b[0], n[1], b[1], n[2], b[2], n[3], b[3], n[4], b[4], n[5], b[5], n[6], b[6], n[7], b[7], n[8], b[8]); } static PyObject * signaldict_richcompare(PyObject *v, PyObject *w, int op) { PyObject *res = Py_NotImplemented; decimal_state *state = get_module_state_by_def(Py_TYPE(v)); assert(PyDecSignalDict_Check(state, v)); if ((SdFlagAddr(v) == NULL) || (SdFlagAddr(w) == NULL)) { return value_error_ptr(INVALID_SIGNALDICT_ERROR_MSG); } if (op == Py_EQ || op == Py_NE) { if (PyDecSignalDict_Check(state, w)) { res = (SdFlags(v)==SdFlags(w)) ^ (op==Py_NE) ? Py_True : Py_False; } else if (PyDict_Check(w)) { uint32_t flags = dict_as_flags(state, w); if (flags & DEC_ERRORS) { if (flags & DEC_INVALID_SIGNALS) { /* non-comparable: Py_NotImplemented */ PyErr_Clear(); } else { return NULL; } } else { res = (SdFlags(v)==flags) ^ (op==Py_NE) ? Py_True : Py_False; } } } return Py_NewRef(res); } static PyObject * signaldict_copy(PyObject *self, PyObject *Py_UNUSED(dummy)) { if (SdFlagAddr(self) == NULL) { return value_error_ptr(INVALID_SIGNALDICT_ERROR_MSG); } decimal_state *state = get_module_state_by_def(Py_TYPE(self)); return flags_as_dict(state, SdFlags(self)); } static PyMethodDef signaldict_methods[] = { { "copy", signaldict_copy, METH_NOARGS, NULL}, {NULL, NULL} }; static PyType_Slot signaldict_slots[] = { {Py_tp_dealloc, signaldict_dealloc}, {Py_tp_traverse, _PyObject_VisitType}, {Py_tp_repr, signaldict_repr}, {Py_tp_hash, PyObject_HashNotImplemented}, {Py_tp_getattro, PyObject_GenericGetAttr}, {Py_tp_richcompare, signaldict_richcompare}, {Py_tp_iter, signaldict_iter}, {Py_tp_methods, signaldict_methods}, {Py_tp_init, signaldict_init}, // Mapping protocol {Py_mp_length, signaldict_len}, {Py_mp_subscript, signaldict_getitem}, {Py_mp_ass_subscript, signaldict_setitem}, {0, NULL}, }; static PyType_Spec signaldict_spec = { .name = "decimal.SignalDictMixin", .basicsize = sizeof(PyDecSignalDictObject), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = signaldict_slots, }; /******************************************************************************/ /* Context Object, Part 1 */ /******************************************************************************/ #define Dec_CONTEXT_GET_SSIZE(mem) \ static PyObject * \ context_get##mem(PyObject *self, void *Py_UNUSED(closure)) \ { \ return PyLong_FromSsize_t(mpd_get##mem(CTX(self))); \ } #define Dec_CONTEXT_GET_ULONG(mem) \ static PyObject * \ context_get##mem(PyObject *self, void *Py_UNUSED(closure)) \ { \ return PyLong_FromUnsignedLong(mpd_get##mem(CTX(self))); \ } Dec_CONTEXT_GET_SSIZE(prec) Dec_CONTEXT_GET_SSIZE(emax) Dec_CONTEXT_GET_SSIZE(emin) Dec_CONTEXT_GET_SSIZE(clamp) #ifdef EXTRA_FUNCTIONALITY Dec_CONTEXT_GET_ULONG(traps) Dec_CONTEXT_GET_ULONG(status) #endif static PyObject * context_getround(PyObject *self, void *Py_UNUSED(closure)) { int i = mpd_getround(CTX(self)); decimal_state *state = get_module_state_from_ctx(self); return Py_NewRef(state->round_map[i]); } static PyObject * context_getcapitals(PyObject *self, void *Py_UNUSED(closure)) { return PyLong_FromLong(CtxCaps(self)); } #ifdef EXTRA_FUNCTIONALITY static PyObject * context_getallcr(PyObject *self, void *Py_UNUSED(closure)) { return PyLong_FromLong(mpd_getcr(CTX(self))); } #endif /*[clinic input] _decimal.Context.Etiny Return a value equal to Emin - prec + 1. This is the minimum exponent value for subnormal results. When underflow occurs, the exponent is set to Etiny. [clinic start generated code]*/ static PyObject * _decimal_Context_Etiny_impl(PyObject *self) /*[clinic end generated code: output=c9a4a1a3e3575289 input=1274040f303f2244]*/ { return PyLong_FromSsize_t(mpd_etiny(CTX(self))); } /*[clinic input] _decimal.Context.Etop Return a value equal to Emax - prec + 1. This is the maximum exponent if the _clamp field of the context is set to 1 (IEEE clamp mode). Etop() must not be negative. [clinic start generated code]*/ static PyObject * _decimal_Context_Etop_impl(PyObject *self) /*[clinic end generated code: output=f0a3f6e1b829074e input=838a4409316ec728]*/ { return PyLong_FromSsize_t(mpd_etop(CTX(self))); } static int context_setprec(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; mpd_ssize_t x; x = PyLong_AsSsize_t(value); if (x == -1 && PyErr_Occurred()) { return -1; } ctx = CTX(self); if (!mpd_qsetprec(ctx, x)) { return value_error_int( "valid range for prec is [1, MAX_PREC]"); } return 0; } static int context_setemin(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; mpd_ssize_t x; x = PyLong_AsSsize_t(value); if (x == -1 && PyErr_Occurred()) { return -1; } ctx = CTX(self); if (!mpd_qsetemin(ctx, x)) { return value_error_int( "valid range for Emin is [MIN_EMIN, 0]"); } return 0; } static int context_setemax(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; mpd_ssize_t x; x = PyLong_AsSsize_t(value); if (x == -1 && PyErr_Occurred()) { return -1; } ctx = CTX(self); if (!mpd_qsetemax(ctx, x)) { return value_error_int( "valid range for Emax is [0, MAX_EMAX]"); } return 0; } #ifdef CONFIG_32 /*[clinic input] _decimal.Context._unsafe_setprec x: Py_ssize_t / [clinic start generated code]*/ static PyObject * _decimal_Context__unsafe_setprec_impl(PyObject *self, Py_ssize_t x) /*[clinic end generated code: output=dd838edf08e12dd9 input=23a1b19ceb1569be]*/ { mpd_context_t *ctx = CTX(self); if (x < 1 || x > 1070000000L) { return value_error_ptr( "valid range for unsafe prec is [1, 1070000000]"); } ctx->prec = x; Py_RETURN_NONE; } /*[clinic input] _decimal.Context._unsafe_setemin x: Py_ssize_t / [clinic start generated code]*/ static PyObject * _decimal_Context__unsafe_setemin_impl(PyObject *self, Py_ssize_t x) /*[clinic end generated code: output=0c49cafee8a65846 input=652f1ecacca7e0ce]*/ { mpd_context_t *ctx = CTX(self); if (x < -1070000000L || x > 0) { return value_error_ptr( "valid range for unsafe emin is [-1070000000, 0]"); } ctx->emin = x; Py_RETURN_NONE; } /*[clinic input] _decimal.Context._unsafe_setemax x: Py_ssize_t / [clinic start generated code]*/ static PyObject * _decimal_Context__unsafe_setemax_impl(PyObject *self, Py_ssize_t x) /*[clinic end generated code: output=776563e0377a00e8 input=b2a32a9a2750e7a8]*/ { mpd_context_t *ctx = CTX(self); if (x < 0 || x > 1070000000L) { return value_error_ptr( "valid range for unsafe emax is [0, 1070000000]"); } ctx->emax = x; Py_RETURN_NONE; } #endif static int context_setround(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; int x; decimal_state *state = get_module_state_from_ctx(self); x = getround(state, value); if (x == -1) { return -1; } ctx = CTX(self); if (!mpd_qsetround(ctx, x)) { INTERNAL_ERROR_INT("context_setround"); /* GCOV_NOT_REACHED */ } return 0; } static int context_setcapitals(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_ssize_t x; x = PyLong_AsSsize_t(value); if (x == -1 && PyErr_Occurred()) { return -1; } if (x != 0 && x != 1) { return value_error_int( "valid values for capitals are 0 or 1"); } CtxCaps(self) = (int)x; return 0; } #ifdef EXTRA_FUNCTIONALITY static int context_settraps(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; uint32_t flags; flags = long_as_flags(value); if (flags & DEC_ERRORS) { return -1; } ctx = CTX(self); if (!mpd_qsettraps(ctx, flags)) { INTERNAL_ERROR_INT("context_settraps"); } return 0; } #endif static int context_settraps_list(PyObject *self, PyObject *value) { mpd_context_t *ctx; uint32_t flags; decimal_state *state = get_module_state_from_ctx(self); flags = list_as_flags(state, value); if (flags & DEC_ERRORS) { return -1; } ctx = CTX(self); if (!mpd_qsettraps(ctx, flags)) { INTERNAL_ERROR_INT("context_settraps_list"); } return 0; } static int context_settraps_dict(PyObject *self, PyObject *value) { mpd_context_t *ctx; uint32_t flags; decimal_state *state = get_module_state_from_ctx(self); if (PyDecSignalDict_Check(state, value)) { flags = SdFlags(value); } else { flags = dict_as_flags(state, value); if (flags & DEC_ERRORS) { return -1; } } ctx = CTX(self); if (!mpd_qsettraps(ctx, flags)) { INTERNAL_ERROR_INT("context_settraps_dict"); } return 0; } #ifdef EXTRA_FUNCTIONALITY static int context_setstatus(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; uint32_t flags; flags = long_as_flags(value); if (flags & DEC_ERRORS) { return -1; } ctx = CTX(self); if (!mpd_qsetstatus(ctx, flags)) { INTERNAL_ERROR_INT("context_setstatus"); } return 0; } #endif static int context_setstatus_list(PyObject *self, PyObject *value) { mpd_context_t *ctx; uint32_t flags; decimal_state *state = get_module_state_from_ctx(self); flags = list_as_flags(state, value); if (flags & DEC_ERRORS) { return -1; } ctx = CTX(self); if (!mpd_qsetstatus(ctx, flags)) { INTERNAL_ERROR_INT("context_setstatus_list"); } return 0; } static int context_setstatus_dict(PyObject *self, PyObject *value) { mpd_context_t *ctx; uint32_t flags; decimal_state *state = get_module_state_from_ctx(self); if (PyDecSignalDict_Check(state, value)) { flags = SdFlags(value); } else { flags = dict_as_flags(state, value); if (flags & DEC_ERRORS) { return -1; } } ctx = CTX(self); if (!mpd_qsetstatus(ctx, flags)) { INTERNAL_ERROR_INT("context_setstatus_dict"); } return 0; } static int context_setclamp(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; mpd_ssize_t x; x = PyLong_AsSsize_t(value); if (x == -1 && PyErr_Occurred()) { return -1; } BOUNDS_CHECK(x, INT_MIN, INT_MAX); ctx = CTX(self); if (!mpd_qsetclamp(ctx, (int)x)) { return value_error_int("valid values for clamp are 0 or 1"); } return 0; } #ifdef EXTRA_FUNCTIONALITY static int context_setallcr(PyObject *self, PyObject *value, void *Py_UNUSED(closure)) { mpd_context_t *ctx; mpd_ssize_t x; x = PyLong_AsSsize_t(value); if (x == -1 && PyErr_Occurred()) { return -1; } BOUNDS_CHECK(x, INT_MIN, INT_MAX); ctx = CTX(self); if (!mpd_qsetcr(ctx, (int)x)) { return value_error_int("valid values for _allcr are 0 or 1"); } return 0; } #endif static PyObject * context_getattr(PyObject *self, PyObject *name) { PyObject *retval; if (PyUnicode_Check(name)) { if (PyUnicode_CompareWithASCIIString(name, "traps") == 0) { retval = ((PyDecContextObject *)self)->traps; return Py_NewRef(retval); } if (PyUnicode_CompareWithASCIIString(name, "flags") == 0) { retval = ((PyDecContextObject *)self)->flags; return Py_NewRef(retval); } } return PyObject_GenericGetAttr(self, name); } static int context_setattr(PyObject *self, PyObject *name, PyObject *value) { if (value == NULL) { PyErr_SetString(PyExc_AttributeError, "context attributes cannot be deleted"); return -1; } if (PyUnicode_Check(name)) { if (PyUnicode_CompareWithASCIIString(name, "traps") == 0) { return context_settraps_dict(self, value); } if (PyUnicode_CompareWithASCIIString(name, "flags") == 0) { return context_setstatus_dict(self, value); } } return PyObject_GenericSetAttr(self, name, value); } static int context_setattrs(PyObject *self, PyObject *prec, PyObject *rounding, PyObject *emin, PyObject *emax, PyObject *capitals, PyObject *clamp, PyObject *status, PyObject *traps) { int ret; if (prec != Py_None && context_setprec(self, prec, NULL) < 0) { return -1; } if (rounding != Py_None && context_setround(self, rounding, NULL) < 0) { return -1; } if (emin != Py_None && context_setemin(self, emin, NULL) < 0) { return -1; } if (emax != Py_None && context_setemax(self, emax, NULL) < 0) { return -1; } if (capitals != Py_None && context_setcapitals(self, capitals, NULL) < 0) { return -1; } if (clamp != Py_None && context_setclamp(self, clamp, NULL) < 0) { return -1; } if (traps != Py_None) { if (PyList_Check(traps)) { ret = context_settraps_list(self, traps); } #ifdef EXTRA_FUNCTIONALITY else if (PyLong_Check(traps)) { ret = context_settraps(self, traps, NULL); } #endif else { ret = context_settraps_dict(self, traps); } if (ret < 0) { return ret; } } if (status != Py_None) { if (PyList_Check(status)) { ret = context_setstatus_list(self, status); } #ifdef EXTRA_FUNCTIONALITY else if (PyLong_Check(status)) { ret = context_setstatus(self, status, NULL); } #endif else { ret = context_setstatus_dict(self, status); } if (ret < 0) { return ret; } } return 0; } /*[clinic input] _decimal.Context.clear_traps Set all traps to False. [clinic start generated code]*/ static PyObject * _decimal_Context_clear_traps_impl(PyObject *self) /*[clinic end generated code: output=b47cfa6e32407d40 input=3872e80637148035]*/ { CTX(self)->traps = 0; Py_RETURN_NONE; } /*[clinic input] _decimal.Context.clear_flags Reset all flags to False. [clinic start generated code]*/ static PyObject * _decimal_Context_clear_flags_impl(PyObject *self) /*[clinic end generated code: output=c86719a70177d0b6 input=a06055e2f3e7edb1]*/ { CTX(self)->status = 0; Py_RETURN_NONE; } #define DEC_DFLT_EMAX 999999 #define DEC_DFLT_EMIN -999999 static mpd_context_t dflt_ctx = { 28, DEC_DFLT_EMAX, DEC_DFLT_EMIN, MPD_IEEE_Invalid_operation|MPD_Division_by_zero|MPD_Overflow, 0, 0, MPD_ROUND_HALF_EVEN, 0, 1 }; static PyObject * context_new(PyTypeObject *type, PyObject *Py_UNUSED(args), PyObject *Py_UNUSED(kwds)) { PyDecContextObject *self = NULL; mpd_context_t *ctx; decimal_state *state = get_module_state_by_def(type); if (type == state->PyDecContext_Type) { self = PyObject_GC_New(PyDecContextObject, state->PyDecContext_Type); } else { self = (PyDecContextObject *)type->tp_alloc(type, 0); } if (self == NULL) { return NULL; } self->traps = PyObject_CallObject((PyObject *)state->PyDecSignalDict_Type, NULL); if (self->traps == NULL) { self->flags = NULL; Py_DECREF(self); return NULL; } self->flags = PyObject_CallObject((PyObject *)state->PyDecSignalDict_Type, NULL); if (self->flags == NULL) { Py_DECREF(self); return NULL; } ctx = CTX(self); if (state->default_context_template) { *ctx = *CTX(state->default_context_template); } else { *ctx = dflt_ctx; } SdFlagAddr(self->traps) = &ctx->traps; SdFlagAddr(self->flags) = &ctx->status; CtxCaps(self) = 1; self->tstate = NULL; self->modstate = state; if (type == state->PyDecContext_Type) { PyObject_GC_Track(self); } assert(PyObject_GC_IsTracked((PyObject *)self)); return (PyObject *)self; } static int context_traverse(PyObject *op, visitproc visit, void *arg) { PyDecContextObject *self = _PyDecContextObject_CAST(op); Py_VISIT(Py_TYPE(self)); Py_VISIT(self->traps); Py_VISIT(self->flags); return 0; } static int context_clear(PyObject *op) { PyDecContextObject *self = _PyDecContextObject_CAST(op); Py_CLEAR(self->traps); Py_CLEAR(self->flags); return 0; } static void context_dealloc(PyObject *self) { PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); (void)context_clear(self); tp->tp_free(self); Py_DECREF(tp); } /*[clinic input] _decimal.Context.__init__ as context_init prec: object = None rounding: object = None Emin as emin: object = None Emax as emax: object = None capitals: object = None clamp: object = None flags as status: object = None traps: object = None Create context. The context affects almost all operations and controls rounding, Over/Underflow, raising of exceptions and much more. A new context can be constructed as follows: >>> c = Context(prec=28, Emin=-425000000, Emax=425000000, ... rounding=ROUND_HALF_EVEN, capitals=1, clamp=1, ... traps=[InvalidOperation, DivisionByZero, Overflow], ... flags=[]) >>> [clinic start generated code]*/ static int context_init_impl(PyObject *self, PyObject *prec, PyObject *rounding, PyObject *emin, PyObject *emax, PyObject *capitals, PyObject *clamp, PyObject *status, PyObject *traps) /*[clinic end generated code: output=8bfdc59fbe862f44 input=45c704b93cd02959]*/ { return context_setattrs( self, prec, rounding, emin, emax, capitals, clamp, status, traps ); } static PyObject * context_repr(PyObject *self) { mpd_context_t *ctx; char flags[MPD_MAX_SIGNAL_LIST]; char traps[MPD_MAX_SIGNAL_LIST]; int n, mem; #ifdef Py_DEBUG decimal_state *state = get_module_state_from_ctx(self); assert(PyDecContext_Check(state, self)); #endif ctx = CTX(self); mem = MPD_MAX_SIGNAL_LIST; n = mpd_lsnprint_signals(flags, mem, ctx->status, dec_signal_string); if (n < 0 || n >= mem) { INTERNAL_ERROR_PTR("context_repr"); } n = mpd_lsnprint_signals(traps, mem, ctx->traps, dec_signal_string); if (n < 0 || n >= mem) { INTERNAL_ERROR_PTR("context_repr"); } return PyUnicode_FromFormat( "Context(prec=%zd, rounding=%s, Emin=%zd, Emax=%zd, " "capitals=%d, clamp=%d, flags=%s, traps=%s)", ctx->prec, mpd_round_string[ctx->round], ctx->emin, ctx->emax, CtxCaps(self), ctx->clamp, flags, traps); } static void init_basic_context(PyObject *v) { mpd_context_t ctx = dflt_ctx; ctx.prec = 9; ctx.traps |= (MPD_Underflow|MPD_Clamped); ctx.round = MPD_ROUND_HALF_UP; *CTX(v) = ctx; CtxCaps(v) = 1; } static void init_extended_context(PyObject *v) { mpd_context_t ctx = dflt_ctx; ctx.prec = 9; ctx.traps = 0; *CTX(v) = ctx; CtxCaps(v) = 1; } /* Factory function for creating IEEE interchange format contexts */ /*[clinic input] _decimal.IEEEContext bits: Py_ssize_t / Return a context, initialized as one of the IEEE interchange formats. The argument must be a multiple of 32 and less than IEEE_CONTEXT_MAX_BITS. [clinic start generated code]*/ static PyObject * _decimal_IEEEContext_impl(PyObject *module, Py_ssize_t bits) /*[clinic end generated code: output=19a35f320fe19789 input=5cff864d899eb2d7]*/ { PyObject *context; mpd_context_t ctx; if (bits <= 0 || bits > INT_MAX) { goto error; } if (mpd_ieee_context(&ctx, (int)bits) < 0) { goto error; } decimal_state *state = get_module_state(module); context = PyObject_CallObject((PyObject *)state->PyDecContext_Type, NULL); if (context == NULL) { return NULL; } *CTX(context) = ctx; return context; error: PyErr_Format(PyExc_ValueError, "argument must be a multiple of 32, with a maximum of %d", MPD_IEEE_CONTEXT_MAX_BITS); return NULL; } static PyObject * context_copy(decimal_state *state, PyObject *v) { PyObject *copy = PyObject_CallObject((PyObject *)state->PyDecContext_Type, NULL); if (copy == NULL) { return NULL; } *CTX(copy) = *CTX(v); CTX(copy)->newtrap = 0; CtxCaps(copy) = CtxCaps(v); return copy; } /*[clinic input] _decimal.Context.copy cls: defining_class Return a duplicate of the context with all flags cleared. [clinic start generated code]*/ static PyObject * _decimal_Context_copy_impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=31c9c8eeb0c0cf77 input=aef1c0bddabdf8f0]*/ { decimal_state *state = PyType_GetModuleState(cls); return context_copy(state, self); } /*[clinic input] _decimal.Context.__copy__ = _decimal.Context.copy [clinic start generated code]*/ static PyObject * _decimal_Context___copy___impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=93552486e5fb0ab4 input=4a55dd22f6d31bcc]*/ { decimal_state *state = PyType_GetModuleState(cls); return context_copy(state, self); } /*[clinic input] _decimal.Context.__reduce__ = _decimal.Context.copy [clinic start generated code]*/ static PyObject * _decimal_Context___reduce___impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=4e77de55efdbb56a input=787683f13d047ce8]*/ { PyObject *flags; PyObject *traps; PyObject *ret; mpd_context_t *ctx; decimal_state *state = PyType_GetModuleState(cls); ctx = CTX(self); flags = signals_as_list(state, ctx->status); if (flags == NULL) { return NULL; } traps = signals_as_list(state, ctx->traps); if (traps == NULL) { Py_DECREF(flags); return NULL; } ret = Py_BuildValue( "O(nsnniiOO)", Py_TYPE(self), ctx->prec, mpd_round_string[ctx->round], ctx->emin, ctx->emax, CtxCaps(self), ctx->clamp, flags, traps ); Py_DECREF(flags); Py_DECREF(traps); return ret; } static PyGetSetDef context_getsets [] = { { "prec", context_getprec, context_setprec, NULL, NULL}, { "Emax", context_getemax, context_setemax, NULL, NULL}, { "Emin", context_getemin, context_setemin, NULL, NULL}, { "rounding", context_getround, context_setround, NULL, NULL}, { "capitals", context_getcapitals, context_setcapitals, NULL, NULL}, { "clamp", context_getclamp, context_setclamp, NULL, NULL}, #ifdef EXTRA_FUNCTIONALITY { "_allcr", context_getallcr, context_setallcr, NULL, NULL}, { "_traps", context_gettraps, context_settraps, NULL, NULL}, { "_flags", context_getstatus, context_setstatus, NULL, NULL}, #endif {NULL} }; #define CONTEXT_CHECK(state, obj) \ if (!PyDecContext_Check(state, obj)) { \ PyErr_SetString(PyExc_TypeError, \ "argument must be a context"); \ return NULL; \ } #define CONTEXT_CHECK_VA(state, obj) \ if (obj == Py_None) { \ CURRENT_CONTEXT(state, obj); \ } \ else if (!PyDecContext_Check(state, obj)) { \ PyErr_SetString(PyExc_TypeError, \ "optional argument must be a context"); \ return NULL; \ } /******************************************************************************/ /* Global, thread local and temporary contexts */ /******************************************************************************/ /* * Thread local storage currently has a speed penalty of about 4%. * All functions that map Python's arithmetic operators to mpdecimal * functions have to look up the current context for each and every * operation. */ #ifndef WITH_DECIMAL_CONTEXTVAR /* Get the context from the thread state dictionary. */ static PyObject * current_context_from_dict(decimal_state *modstate) { PyThreadState *tstate = _PyThreadState_GET(); #ifdef Py_DEBUG // The caller must hold the GIL _Py_EnsureTstateNotNULL(tstate); #endif PyObject *dict = _PyThreadState_GetDict(tstate); if (dict == NULL) { PyErr_SetString(PyExc_RuntimeError, "cannot get thread state"); return NULL; } PyObject *tl_context; tl_context = PyDict_GetItemWithError(dict, modstate->tls_context_key); if (tl_context != NULL) { /* We already have a thread local context. */ CONTEXT_CHECK(modstate, tl_context); } else { if (PyErr_Occurred()) { return NULL; } /* Set up a new thread local context. */ tl_context = context_copy(modstate, modstate->default_context_template); if (tl_context == NULL) { return NULL; } CTX(tl_context)->status = 0; if (PyDict_SetItem(dict, modstate->tls_context_key, tl_context) < 0) { Py_DECREF(tl_context); return NULL; } Py_DECREF(tl_context); } /* Cache the context of the current thread, assuming that it * will be accessed several times before a thread switch. */ Py_XSETREF(modstate->cached_context, (PyDecContextObject *)Py_NewRef(tl_context)); modstate->cached_context->tstate = tstate; /* Borrowed reference with refcount==1 */ return tl_context; } /* Return borrowed reference to thread local context. */ static PyObject * current_context(decimal_state *modstate) { PyThreadState *tstate = _PyThreadState_GET(); if (modstate->cached_context && modstate->cached_context->tstate == tstate) { return (PyObject *)(modstate->cached_context); } return current_context_from_dict(modstate); } /* ctxobj := borrowed reference to the current context */ #define CURRENT_CONTEXT(STATE, CTXOBJ) \ do { \ CTXOBJ = current_context(STATE); \ if (CTXOBJ == NULL) { \ return NULL; \ } \ } while (0) /* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(PyObject *self) { PyObject *context; decimal_state *state = get_module_state(self); CURRENT_CONTEXT(state, context); return Py_NewRef(context); } /* Set the thread local context to a new context, decrement old reference */ static PyObject * PyDec_SetCurrentContext(PyObject *self, PyObject *v) { PyObject *dict; decimal_state *state = get_module_state(self); CONTEXT_CHECK(state, v); dict = PyThreadState_GetDict(); if (dict == NULL) { PyErr_SetString(PyExc_RuntimeError, "cannot get thread state"); return NULL; } /* If the new context is one of the templates, make a copy. * This is the current behavior of decimal.py. */ if (v == state->default_context_template || v == state->basic_context_template || v == state->extended_context_template) { v = context_copy(state, v); if (v == NULL) { return NULL; } CTX(v)->status = 0; } else { Py_INCREF(v); } Py_CLEAR(state->cached_context); if (PyDict_SetItem(dict, state->tls_context_key, v) < 0) { Py_DECREF(v); return NULL; } Py_DECREF(v); Py_RETURN_NONE; } #else static PyObject * init_current_context(decimal_state *state) { PyObject *tl_context = context_copy(state, state->default_context_template); if (tl_context == NULL) { return NULL; } CTX(tl_context)->status = 0; PyObject *tok = PyContextVar_Set(state->current_context_var, tl_context); if (tok == NULL) { Py_DECREF(tl_context); return NULL; } Py_DECREF(tok); return tl_context; } static inline PyObject * current_context(decimal_state *state) { PyObject *tl_context; if (PyContextVar_Get(state->current_context_var, NULL, &tl_context) < 0) { return NULL; } if (tl_context != NULL) { return tl_context; } return init_current_context(state); } /* ctxobj := borrowed reference to the current context */ #define CURRENT_CONTEXT(STATE, CTXOBJ) \ do { \ CTXOBJ = current_context(STATE); \ if (CTXOBJ == NULL) { \ return NULL; \ } \ Py_DECREF(CTXOBJ); \ } while (0) /* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(PyObject *self) { decimal_state *state = get_module_state(self); return current_context(state); } /* Set the thread local context to a new context, decrement old reference */ static PyObject * PyDec_SetCurrentContext(PyObject *self, PyObject *v) { decimal_state *state = get_module_state(self); CONTEXT_CHECK(state, v); /* If the new context is one of the templates, make a copy. * This is the current behavior of decimal.py. */ if (v == state->default_context_template || v == state->basic_context_template || v == state->extended_context_template) { v = context_copy(state, v); if (v == NULL) { return NULL; } CTX(v)->status = 0; } else { Py_INCREF(v); } PyObject *tok = PyContextVar_Set(state->current_context_var, v); Py_DECREF(v); if (tok == NULL) { return NULL; } Py_DECREF(tok); Py_RETURN_NONE; } #endif /*[clinic input] _decimal.getcontext Get the current default context. [clinic start generated code]*/ static PyObject * _decimal_getcontext_impl(PyObject *module) /*[clinic end generated code: output=5982062c4d39e3dd input=7ac316fe42a1b6f5]*/ { return PyDec_GetCurrentContext(module); } /*[clinic input] _decimal.setcontext context: object / Set a new default context. [clinic start generated code]*/ static PyObject * _decimal_setcontext(PyObject *module, PyObject *context) /*[clinic end generated code: output=8065f870be2852ce input=b57d7ee786b022a6]*/ { return PyDec_SetCurrentContext(module, context); } /* Context manager object for the 'with' statement. The manager * owns one reference to the global (outer) context and one * to the local (inner) context. */ /*[clinic input] @text_signature "($module, /, ctx=None, **kwargs)" _decimal.localcontext ctx as local: object = None * prec: object = None rounding: object = None Emin: object = None Emax: object = None capitals: object = None clamp: object = None flags: object = None traps: object = None Return a context manager for a copy of the supplied context. That will set the default context to a copy of ctx on entry to the with-statement and restore the previous default context when exiting the with-statement. If no context is specified, a copy of the current default context is used. [clinic start generated code]*/ static PyObject * _decimal_localcontext_impl(PyObject *module, PyObject *local, PyObject *prec, PyObject *rounding, PyObject *Emin, PyObject *Emax, PyObject *capitals, PyObject *clamp, PyObject *flags, PyObject *traps) /*[clinic end generated code: output=9bf4e47742a809b0 input=490307b9689c3856]*/ { PyObject *global; decimal_state *state = get_module_state(module); CURRENT_CONTEXT(state, global); if (local == Py_None) { local = global; } else if (!PyDecContext_Check(state, local)) { PyErr_SetString(PyExc_TypeError, "optional argument must be a context"); return NULL; } PyObject *local_copy = context_copy(state, local); if (local_copy == NULL) { return NULL; } int ret = context_setattrs( local_copy, prec, rounding, Emin, Emax, capitals, clamp, flags, traps ); if (ret < 0) { Py_DECREF(local_copy); return NULL; } PyDecContextManagerObject *self; self = PyObject_GC_New(PyDecContextManagerObject, state->PyDecContextManager_Type); if (self == NULL) { Py_DECREF(local_copy); return NULL; } self->local = local_copy; self->global = Py_NewRef(global); PyObject_GC_Track(self); return (PyObject *)self; } static int ctxmanager_traverse(PyObject *op, visitproc visit, void *arg) { PyDecContextManagerObject *self = _PyDecContextManagerObject_CAST(op); Py_VISIT(Py_TYPE(self)); Py_VISIT(self->local); Py_VISIT(self->global); return 0; } static int ctxmanager_clear(PyObject *op) { PyDecContextManagerObject *self = _PyDecContextManagerObject_CAST(op); Py_CLEAR(self->local); Py_CLEAR(self->global); return 0; } static void ctxmanager_dealloc(PyObject *self) { PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); (void)ctxmanager_clear(self); tp->tp_free(self); Py_DECREF(tp); } static PyObject * ctxmanager_set_local(PyObject *op, PyObject *Py_UNUSED(dummy)) { PyObject *ret; PyDecContextManagerObject *self = _PyDecContextManagerObject_CAST(op); ret = PyDec_SetCurrentContext(PyType_GetModule(Py_TYPE(self)), self->local); if (ret == NULL) { return NULL; } Py_DECREF(ret); return Py_NewRef(self->local); } static PyObject * ctxmanager_restore_global(PyObject *op, PyObject *Py_UNUSED(args)) { PyObject *ret; PyDecContextManagerObject *self = _PyDecContextManagerObject_CAST(op); ret = PyDec_SetCurrentContext(PyType_GetModule(Py_TYPE(self)), self->global); if (ret == NULL) { return NULL; } Py_DECREF(ret); Py_RETURN_NONE; } static PyMethodDef ctxmanager_methods[] = { {"__enter__", ctxmanager_set_local, METH_NOARGS, NULL}, {"__exit__", ctxmanager_restore_global, METH_VARARGS, NULL}, {NULL, NULL} }; static PyType_Slot ctxmanager_slots[] = { {Py_tp_dealloc, ctxmanager_dealloc}, {Py_tp_getattro, PyObject_GenericGetAttr}, {Py_tp_traverse, ctxmanager_traverse}, {Py_tp_clear, ctxmanager_clear}, {Py_tp_methods, ctxmanager_methods}, {0, NULL}, }; static PyType_Spec ctxmanager_spec = { .name = "decimal.ContextManager", .basicsize = sizeof(PyDecContextManagerObject), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_DISALLOW_INSTANTIATION), .slots = ctxmanager_slots, }; /******************************************************************************/ /* New Decimal Object */ /******************************************************************************/ static PyObject * PyDecType_New(decimal_state *state, PyTypeObject *type) { PyDecObject *dec; if (type == state->PyDec_Type) { dec = PyObject_GC_New(PyDecObject, state->PyDec_Type); } else { dec = (PyDecObject *)type->tp_alloc(type, 0); } if (dec == NULL) { return NULL; } dec->hash = -1; MPD(dec)->flags = MPD_STATIC|MPD_STATIC_DATA; MPD(dec)->exp = 0; MPD(dec)->digits = 0; MPD(dec)->len = 0; MPD(dec)->alloc = _Py_DEC_MINALLOC; MPD(dec)->data = dec->data; if (type == state->PyDec_Type) { PyObject_GC_Track(dec); } assert(PyObject_GC_IsTracked((PyObject *)dec)); return (PyObject *)dec; } #define dec_alloc(st) PyDecType_New(st, (st)->PyDec_Type) static void dec_dealloc(PyObject *dec) { PyTypeObject *tp = Py_TYPE(dec); PyObject_GC_UnTrack(dec); mpd_del(MPD(dec)); tp->tp_free(dec); Py_DECREF(tp); } /******************************************************************************/ /* Conversions to Decimal */ /******************************************************************************/ Py_LOCAL_INLINE(int) is_space(int kind, const void *data, Py_ssize_t pos) { Py_UCS4 ch = PyUnicode_READ(kind, data, pos); return Py_UNICODE_ISSPACE(ch); } /* Return the ASCII representation of a numeric Unicode string. The numeric string may contain ascii characters in the range [1, 127], any Unicode space and any unicode digit. If strip_ws is true, leading and trailing whitespace is stripped. If ignore_underscores is true, underscores are ignored. Return NULL if malloc fails and an empty string if invalid characters are found. */ static char * numeric_as_ascii(PyObject *u, int strip_ws, int ignore_underscores) { int kind; const void *data; Py_UCS4 ch; char *res, *cp; Py_ssize_t j, len; int d; kind = PyUnicode_KIND(u); data = PyUnicode_DATA(u); len = PyUnicode_GET_LENGTH(u); cp = res = PyMem_Malloc(len+1); if (res == NULL) { PyErr_NoMemory(); return NULL; } j = 0; if (strip_ws) { while (len > 0 && is_space(kind, data, len-1)) { len--; } while (j < len && is_space(kind, data, j)) { j++; } } for (; j < len; j++) { ch = PyUnicode_READ(kind, data, j); if (ignore_underscores && ch == '_') { continue; } if (0 < ch && ch <= 127) { *cp++ = ch; continue; } if (Py_UNICODE_ISSPACE(ch)) { *cp++ = ' '; continue; } d = Py_UNICODE_TODECIMAL(ch); if (d < 0) { /* empty string triggers ConversionSyntax */ *res = '\0'; return res; } *cp++ = '0' + d; } *cp = '\0'; return res; } /* Return a new PyDecObject or a subtype from a C string. Use the context during conversion. */ static PyObject * PyDecType_FromCString(PyTypeObject *type, const char *s, PyObject *context) { PyObject *dec; uint32_t status = 0; decimal_state *state = get_module_state_from_ctx(context); dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } mpd_qset_string(MPD(dec), s, CTX(context), &status); if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Return a new PyDecObject or a subtype from a C string. Attempt exact conversion. If the operand cannot be converted exactly, set InvalidOperation. */ static PyObject * PyDecType_FromCStringExact(PyTypeObject *type, const char *s, PyObject *context) { PyObject *dec; uint32_t status = 0; mpd_context_t maxctx; decimal_state *state = get_module_state_from_ctx(context); dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } mpd_maxcontext(&maxctx); mpd_qset_string(MPD(dec), s, &maxctx, &status); if (status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) { /* we want exact results */ mpd_seterror(MPD(dec), MPD_Invalid_operation, &status); } status &= MPD_Errors; if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Return a new PyDecObject or a subtype from a PyUnicodeObject. */ static PyObject * PyDecType_FromUnicode(PyTypeObject *type, PyObject *u, PyObject *context) { PyObject *dec; char *s; s = numeric_as_ascii(u, 0, 0); if (s == NULL) { return NULL; } dec = PyDecType_FromCString(type, s, context); PyMem_Free(s); return dec; } /* Return a new PyDecObject or a subtype from a PyUnicodeObject. Attempt exact * conversion. If the conversion is not exact, fail with InvalidOperation. * Allow leading and trailing whitespace in the input operand. */ static PyObject * PyDecType_FromUnicodeExactWS(PyTypeObject *type, PyObject *u, PyObject *context) { PyObject *dec; char *s; s = numeric_as_ascii(u, 1, 1); if (s == NULL) { return NULL; } dec = PyDecType_FromCStringExact(type, s, context); PyMem_Free(s); return dec; } /* Set PyDecObject from triple without any error checking. */ Py_LOCAL_INLINE(void) _dec_settriple(PyObject *dec, uint8_t sign, uint32_t v, mpd_ssize_t exp) { #ifdef CONFIG_64 MPD(dec)->data[0] = v; MPD(dec)->len = 1; #else uint32_t q, r; q = v / MPD_RADIX; r = v - q * MPD_RADIX; MPD(dec)->data[1] = q; MPD(dec)->data[0] = r; MPD(dec)->len = q ? 2 : 1; #endif mpd_set_flags(MPD(dec), sign); MPD(dec)->exp = exp; mpd_setdigits(MPD(dec)); } /* Return a new PyDecObject from an mpd_ssize_t. */ static PyObject * PyDecType_FromSsize(PyTypeObject *type, mpd_ssize_t v, PyObject *context) { PyObject *dec; uint32_t status = 0; decimal_state *state = get_module_state_from_ctx(context); dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } mpd_qset_ssize(MPD(dec), v, CTX(context), &status); if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Return a new PyDecObject from an mpd_ssize_t. Conversion is exact. */ static PyObject * PyDecType_FromSsizeExact(PyTypeObject *type, mpd_ssize_t v, PyObject *context) { PyObject *dec; uint32_t status = 0; mpd_context_t maxctx; decimal_state *state = get_module_state_from_ctx(context); dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } mpd_maxcontext(&maxctx); mpd_qset_ssize(MPD(dec), v, &maxctx, &status); if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Convert from a PyLongObject. The context is not modified; flags set during conversion are accumulated in the status parameter. */ static PyObject * dec_from_long(decimal_state *state, PyTypeObject *type, PyObject *v, const mpd_context_t *ctx, uint32_t *status) { PyObject *dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } PyLongExport export_long; if (PyLong_Export(v, &export_long) == -1) { Py_DECREF(dec); return NULL; } if (export_long.digits) { const PyLongLayout *layout = PyLong_GetNativeLayout(); assert(layout->bits_per_digit < 32); assert(layout->digits_order == -1); assert(layout->digit_endianness == (PY_LITTLE_ENDIAN ? -1 : 1)); assert(layout->digit_size == 2 || layout->digit_size == 4); uint32_t base = (uint32_t)1 << layout->bits_per_digit; uint8_t sign = export_long.negative ? MPD_NEG : MPD_POS; Py_ssize_t len = export_long.ndigits; if (layout->digit_size == 4) { mpd_qimport_u32(MPD(dec), export_long.digits, len, sign, base, ctx, status); } else { mpd_qimport_u16(MPD(dec), export_long.digits, len, sign, base, ctx, status); } PyLong_FreeExport(&export_long); } else { mpd_qset_i64(MPD(dec), export_long.value, ctx, status); } return dec; } /* Return a new PyDecObject from a PyLongObject. Use the context for conversion. */ static PyObject * PyDecType_FromLong(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dec; uint32_t status = 0; if (!PyLong_Check(v)) { PyErr_SetString(PyExc_TypeError, "argument must be an integer"); return NULL; } decimal_state *state = get_module_state_from_ctx(context); dec = dec_from_long(state, type, v, CTX(context), &status); if (dec == NULL) { return NULL; } if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Return a new PyDecObject from a PyLongObject. Use a maximum context for conversion. If the conversion is not exact, set InvalidOperation. */ static PyObject * PyDecType_FromLongExact(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dec; uint32_t status = 0; mpd_context_t maxctx; if (!PyLong_Check(v)) { PyErr_SetString(PyExc_TypeError, "argument must be an integer"); return NULL; } mpd_maxcontext(&maxctx); decimal_state *state = get_module_state_from_ctx(context); dec = dec_from_long(state, type, v, &maxctx, &status); if (dec == NULL) { return NULL; } if (status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) { /* we want exact results */ mpd_seterror(MPD(dec), MPD_Invalid_operation, &status); } status &= MPD_Errors; if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Return a PyDecObject or a subtype from a PyFloatObject. Conversion is exact. */ static PyObject * PyDecType_FromFloatExact(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dec, *tmp; PyObject *n, *d, *n_d; mpd_ssize_t k; double x; int sign; mpd_t *d1, *d2; uint32_t status = 0; mpd_context_t maxctx; decimal_state *state = get_module_state_from_ctx(context); #ifdef Py_DEBUG assert(PyType_IsSubtype(type, state->PyDec_Type)); #endif if (PyLong_Check(v)) { return PyDecType_FromLongExact(type, v, context); } if (!PyFloat_Check(v)) { PyErr_SetString(PyExc_TypeError, "argument must be int or float"); return NULL; } x = PyFloat_AsDouble(v); if (x == -1.0 && PyErr_Occurred()) { return NULL; } sign = (copysign(1.0, x) == 1.0) ? 0 : 1; if (isnan(x) || isinf(x)) { dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } if (isnan(x)) { /* decimal.py calls repr(float(+-nan)), * which always gives a positive result. */ mpd_setspecial(MPD(dec), MPD_POS, MPD_NAN); } else { mpd_setspecial(MPD(dec), sign, MPD_INF); } return dec; } /* absolute value of the float */ tmp = state->_py_float_abs(v); if (tmp == NULL) { return NULL; } /* float as integer ratio: numerator/denominator */ n_d = state->_py_float_as_integer_ratio(tmp, NULL); Py_DECREF(tmp); if (n_d == NULL) { return NULL; } n = PyTuple_GET_ITEM(n_d, 0); d = PyTuple_GET_ITEM(n_d, 1); tmp = state->_py_long_bit_length(d, NULL); if (tmp == NULL) { Py_DECREF(n_d); return NULL; } k = PyLong_AsSsize_t(tmp); Py_DECREF(tmp); if (k == -1 && PyErr_Occurred()) { Py_DECREF(n_d); return NULL; } k--; dec = PyDecType_FromLongExact(type, n, context); Py_DECREF(n_d); if (dec == NULL) { return NULL; } d1 = mpd_qnew(); if (d1 == NULL) { Py_DECREF(dec); PyErr_NoMemory(); return NULL; } d2 = mpd_qnew(); if (d2 == NULL) { mpd_del(d1); Py_DECREF(dec); PyErr_NoMemory(); return NULL; } mpd_maxcontext(&maxctx); mpd_qset_uint(d1, 5, &maxctx, &status); mpd_qset_ssize(d2, k, &maxctx, &status); mpd_qpow(d1, d1, d2, &maxctx, &status); if (dec_addstatus(context, status)) { mpd_del(d1); mpd_del(d2); Py_DECREF(dec); return NULL; } /* result = n * 5**k */ mpd_qmul(MPD(dec), MPD(dec), d1, &maxctx, &status); mpd_del(d1); mpd_del(d2); if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } /* result = +- n * 5**k * 10**-k */ mpd_set_sign(MPD(dec), sign); MPD(dec)->exp = -k; return dec; } static PyObject * PyDecType_FromFloat(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dec; uint32_t status = 0; dec = PyDecType_FromFloatExact(type, v, context); if (dec == NULL) { return NULL; } mpd_qfinalize(MPD(dec), CTX(context), &status); if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } /* Return a new PyDecObject or a subtype from a Decimal. */ static PyObject * PyDecType_FromDecimalExact(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dec; uint32_t status = 0; decimal_state *state = get_module_state_from_ctx(context); if (type == state->PyDec_Type && PyDec_CheckExact(state, v)) { return Py_NewRef(v); } dec = PyDecType_New(state, type); if (dec == NULL) { return NULL; } mpd_qcopy(MPD(dec), MPD(v), &status); if (dec_addstatus(context, status)) { Py_DECREF(dec); return NULL; } return dec; } static PyObject * sequence_as_tuple(PyObject *v, PyObject *ex, const char *mesg) { if (PyTuple_Check(v)) { return Py_NewRef(v); } if (PyList_Check(v)) { return PyList_AsTuple(v); } PyErr_SetString(ex, mesg); return NULL; } /* Return a new C string representation of a DecimalTuple. */ static char * dectuple_as_str(PyObject *dectuple) { PyObject *digits = NULL, *tmp; char *decstring = NULL; char sign_special[6]; char *cp; long sign, l; mpd_ssize_t exp = 0; Py_ssize_t i, mem, tsize; int is_infinite = 0; int n; assert(PyTuple_Check(dectuple)); if (PyTuple_Size(dectuple) != 3) { PyErr_SetString(PyExc_ValueError, "argument must be a sequence of length 3"); goto error; } /* sign */ tmp = PyTuple_GET_ITEM(dectuple, 0); if (!PyLong_Check(tmp)) { PyErr_SetString(PyExc_ValueError, "sign must be an integer with the value 0 or 1"); goto error; } sign = PyLong_AsLong(tmp); if (sign == -1 && PyErr_Occurred()) { goto error; } if (sign != 0 && sign != 1) { PyErr_SetString(PyExc_ValueError, "sign must be an integer with the value 0 or 1"); goto error; } sign_special[0] = sign ? '-' : '+'; sign_special[1] = '\0'; /* exponent or encoding for a special number */ tmp = PyTuple_GET_ITEM(dectuple, 2); if (PyUnicode_Check(tmp)) { /* special */ if (PyUnicode_CompareWithASCIIString(tmp, "F") == 0) { strcat(sign_special, "Inf"); is_infinite = 1; } else if (PyUnicode_CompareWithASCIIString(tmp, "n") == 0) { strcat(sign_special, "NaN"); } else if (PyUnicode_CompareWithASCIIString(tmp, "N") == 0) { strcat(sign_special, "sNaN"); } else { PyErr_SetString(PyExc_ValueError, "string argument in the third position " "must be 'F', 'n' or 'N'"); goto error; } } else { /* exponent */ if (!PyLong_Check(tmp)) { PyErr_SetString(PyExc_ValueError, "exponent must be an integer"); goto error; } exp = PyLong_AsSsize_t(tmp); if (exp == -1 && PyErr_Occurred()) { goto error; } } /* coefficient */ digits = sequence_as_tuple(PyTuple_GET_ITEM(dectuple, 1), PyExc_ValueError, "coefficient must be a tuple of digits"); if (digits == NULL) { goto error; } tsize = PyTuple_Size(digits); /* [sign][coeffdigits+1][E][-][expdigits+1]['\0'] */ mem = 1 + tsize + 3 + MPD_EXPDIGITS + 2; cp = decstring = PyMem_Malloc(mem); if (decstring == NULL) { PyErr_NoMemory(); goto error; } n = snprintf(cp, mem, "%s", sign_special); if (n < 0 || n >= mem) { PyErr_SetString(PyExc_RuntimeError, "internal error in dec_sequence_as_str"); goto error; } cp += n; if (tsize == 0 && sign_special[1] == '\0') { /* empty tuple: zero coefficient, except for special numbers */ *cp++ = '0'; } for (i = 0; i < tsize; i++) { tmp = PyTuple_GET_ITEM(digits, i); if (!PyLong_Check(tmp)) { PyErr_SetString(PyExc_ValueError, "coefficient must be a tuple of digits"); goto error; } l = PyLong_AsLong(tmp); if (l == -1 && PyErr_Occurred()) { goto error; } if (l < 0 || l > 9) { PyErr_SetString(PyExc_ValueError, "coefficient must be a tuple of digits"); goto error; } if (is_infinite) { /* accept but ignore any well-formed coefficient for compatibility with decimal.py */ continue; } *cp++ = (char)l + '0'; } *cp = '\0'; if (sign_special[1] == '\0') { /* not a special number */ *cp++ = 'E'; n = snprintf(cp, MPD_EXPDIGITS+2, "%" PRI_mpd_ssize_t, exp); if (n < 0 || n >= MPD_EXPDIGITS+2) { PyErr_SetString(PyExc_RuntimeError, "internal error in dec_sequence_as_str"); goto error; } } Py_XDECREF(digits); return decstring; error: Py_XDECREF(digits); if (decstring) PyMem_Free(decstring); return NULL; } /* Currently accepts tuples and lists. */ static PyObject * PyDecType_FromSequence(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dectuple; PyObject *dec; char *s; dectuple = sequence_as_tuple(v, PyExc_TypeError, "argument must be a tuple or list"); if (dectuple == NULL) { return NULL; } s = dectuple_as_str(dectuple); Py_DECREF(dectuple); if (s == NULL) { return NULL; } dec = PyDecType_FromCString(type, s, context); PyMem_Free(s); return dec; } /* Currently accepts tuples and lists. */ static PyObject * PyDecType_FromSequenceExact(PyTypeObject *type, PyObject *v, PyObject *context) { PyObject *dectuple; PyObject *dec; char *s; dectuple = sequence_as_tuple(v, PyExc_TypeError, "argument must be a tuple or list"); if (dectuple == NULL) { return NULL; } s = dectuple_as_str(dectuple); Py_DECREF(dectuple); if (s == NULL) { return NULL; } dec = PyDecType_FromCStringExact(type, s, context); PyMem_Free(s); return dec; } #define PyDec_FromCString(st, str, context) \ PyDecType_FromCString((st)->PyDec_Type, str, context) #define PyDec_FromCStringExact(st, str, context) \ PyDecType_FromCStringExact((st)->PyDec_Type, str, context) #define PyDec_FromUnicode(st, unicode, context) \ PyDecType_FromUnicode((st)->PyDec_Type, unicode, context) #define PyDec_FromUnicodeExact(st, unicode, context) \ PyDecType_FromUnicodeExact((st)->PyDec_Type, unicode, context) #define PyDec_FromUnicodeExactWS(st, unicode, context) \ PyDecType_FromUnicodeExactWS((st)->PyDec_Type, unicode, context) #define PyDec_FromSsize(st, v, context) \ PyDecType_FromSsize((st)->PyDec_Type, v, context) #define PyDec_FromSsizeExact(st, v, context) \ PyDecType_FromSsizeExact((st)->PyDec_Type, v, context) #define PyDec_FromLong(st, pylong, context) \ PyDecType_FromLong((st)->PyDec_Type, pylong, context) #define PyDec_FromLongExact(st, pylong, context) \ PyDecType_FromLongExact((st)->PyDec_Type, pylong, context) #define PyDec_FromFloat(st, pyfloat, context) \ PyDecType_FromFloat((st)->PyDec_Type, pyfloat, context) #define PyDec_FromFloatExact(st, pyfloat, context) \ PyDecType_FromFloatExact((st)->PyDec_Type, pyfloat, context) #define PyDec_FromSequence(st, sequence, context) \ PyDecType_FromSequence((st)->PyDec_Type, sequence, context) #define PyDec_FromSequenceExact(st, sequence, context) \ PyDecType_FromSequenceExact((st)->PyDec_Type, sequence, context) /*[clinic input] @classmethod _decimal.Decimal.from_float cls: defining_class f as pyfloat: object / Class method that converts a float to a decimal number, exactly. Since 0.1 is not exactly representable in binary floating point, Decimal.from_float(0.1) is not the same as Decimal('0.1'). >>> Decimal.from_float(0.1) Decimal('0.1000000000000000055511151231257827021181583404541015625') >>> Decimal.from_float(float('nan')) Decimal('NaN') >>> Decimal.from_float(float('inf')) Decimal('Infinity') >>> Decimal.from_float(float('-inf')) Decimal('-Infinity') [clinic start generated code]*/ static PyObject * _decimal_Decimal_from_float_impl(PyTypeObject *type, PyTypeObject *cls, PyObject *pyfloat) /*[clinic end generated code: output=fcb7d55d2f9dc790 input=03bc8dbe963e52ca]*/ { PyObject *context; PyObject *result; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); result = PyDecType_FromFloatExact(state->PyDec_Type, pyfloat, context); if (type != state->PyDec_Type && result != NULL) { Py_SETREF(result, PyObject_CallFunctionObjArgs((PyObject *)type, result, NULL)); } return result; } /* 'v' can have any numeric type accepted by the Decimal constructor. Attempt an exact conversion. If the result does not meet the restrictions for an mpd_t, fail with InvalidOperation. */ static PyObject * PyDecType_FromNumberExact(PyTypeObject *type, PyTypeObject *cls, PyObject *v, PyObject *context) { decimal_state *state = PyType_GetModuleState(cls); assert(v != NULL); if (PyDec_Check(state, v)) { return PyDecType_FromDecimalExact(type, v, context); } else if (PyLong_Check(v)) { return PyDecType_FromLongExact(type, v, context); } else if (PyFloat_Check(v)) { if (dec_addstatus(context, MPD_Float_operation)) { return NULL; } return PyDecType_FromFloatExact(type, v, context); } else { PyErr_Format(PyExc_TypeError, "conversion from %s to Decimal is not supported", Py_TYPE(v)->tp_name); return NULL; } } /*[clinic input] @classmethod _decimal.Decimal.from_number cls: defining_class number: object / Class method that converts a real number to a decimal number, exactly. >>> Decimal.from_number(314) # int Decimal('314') >>> Decimal.from_number(0.1) # float Decimal('0.1000000000000000055511151231257827021181583404541015625') >>> Decimal.from_number(Decimal('3.14')) # another decimal instance Decimal('3.14') [clinic start generated code]*/ static PyObject * _decimal_Decimal_from_number_impl(PyTypeObject *type, PyTypeObject *cls, PyObject *number) /*[clinic end generated code: output=4d3ec722b7acfd8b input=271cb4feb3148804]*/ { PyObject *context; PyObject *result; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); result = PyDecType_FromNumberExact(state->PyDec_Type, cls, number, context); if (type != state->PyDec_Type && result != NULL) { Py_SETREF(result, PyObject_CallFunctionObjArgs((PyObject *)type, result, NULL)); } return result; } /* create_decimal_from_float */ /*[clinic input] _decimal.Context.create_decimal_from_float self as context: self cls: defining_class f: object / Create a new Decimal instance from float f. Unlike the Decimal.from_float() class method, this function observes the context limits. [clinic start generated code]*/ static PyObject * _decimal_Context_create_decimal_from_float_impl(PyObject *context, PyTypeObject *cls, PyObject *f) /*[clinic end generated code: output=a5548f5140fa0870 input=8c66eeb22b01ddd4]*/ { decimal_state *state = PyType_GetModuleState(cls); return PyDec_FromFloat(state, f, context); } /* Apply the context to the input operand. Return a new PyDecObject. */ static PyObject * dec_apply(PyObject *v, PyObject *context) { PyObject *result; uint32_t status = 0; decimal_state *state = get_module_state_from_ctx(context); result = dec_alloc(state); if (result == NULL) { return NULL; } mpd_qcopy(MPD(result), MPD(v), &status); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } mpd_qfinalize(MPD(result), CTX(context), &status); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /* 'v' can have any type accepted by the Decimal constructor. Attempt an exact conversion. If the result does not meet the restrictions for an mpd_t, fail with InvalidOperation. */ static PyObject * PyDecType_FromObjectExact(PyTypeObject *type, PyObject *v, PyObject *context) { decimal_state *state = get_module_state_from_ctx(context); if (v == NULL) { return PyDecType_FromSsizeExact(type, 0, context); } else if (PyDec_Check(state, v)) { return PyDecType_FromDecimalExact(type, v, context); } else if (PyUnicode_Check(v)) { return PyDecType_FromUnicodeExactWS(type, v, context); } else if (PyLong_Check(v)) { return PyDecType_FromLongExact(type, v, context); } else if (PyTuple_Check(v) || PyList_Check(v)) { return PyDecType_FromSequenceExact(type, v, context); } else if (PyFloat_Check(v)) { if (dec_addstatus(context, MPD_Float_operation)) { return NULL; } return PyDecType_FromFloatExact(type, v, context); } else { PyErr_Format(PyExc_TypeError, "conversion from %s to Decimal is not supported", Py_TYPE(v)->tp_name); return NULL; } } /* The context is used during conversion. This function is the equivalent of context.create_decimal(). */ static PyObject * PyDec_FromObject(PyObject *v, PyObject *context) { decimal_state *state = get_module_state_from_ctx(context); if (v == NULL) { return PyDec_FromSsize(state, 0, context); } else if (PyDec_Check(state, v)) { mpd_context_t *ctx = CTX(context); if (mpd_isnan(MPD(v)) && MPD(v)->digits > ctx->prec - ctx->clamp) { /* Special case: too many NaN payload digits */ PyObject *result; if (dec_addstatus(context, MPD_Conversion_syntax)) { return NULL; } result = dec_alloc(state); if (result == NULL) { return NULL; } mpd_setspecial(MPD(result), MPD_POS, MPD_NAN); return result; } return dec_apply(v, context); } else if (PyUnicode_Check(v)) { return PyDec_FromUnicode(state, v, context); } else if (PyLong_Check(v)) { return PyDec_FromLong(state, v, context); } else if (PyTuple_Check(v) || PyList_Check(v)) { return PyDec_FromSequence(state, v, context); } else if (PyFloat_Check(v)) { if (dec_addstatus(context, MPD_Float_operation)) { return NULL; } return PyDec_FromFloat(state, v, context); } else { PyErr_Format(PyExc_TypeError, "conversion from %s to Decimal is not supported", Py_TYPE(v)->tp_name); return NULL; } } /*[clinic input] @classmethod _decimal.Decimal.__new__ as dec_new value: object(c_default="NULL") = "0" context: object = None Construct a new Decimal object. value can be an integer, string, tuple, or another Decimal object. If no value is given, return Decimal('0'). The context does not affect the conversion and is only passed to determine if the InvalidOperation trap is active. [clinic start generated code]*/ static PyObject * dec_new_impl(PyTypeObject *type, PyObject *value, PyObject *context) /*[clinic end generated code: output=35f48a40c65625ba input=5f8a0892d3fcef80]*/ { decimal_state *state = get_module_state_by_def(type); CONTEXT_CHECK_VA(state, context); return PyDecType_FromObjectExact(type, value, context); } /*[clinic input] _decimal.Context.create_decimal self as context: self num: object(c_default="NULL") = "0" / Create a new Decimal instance from num, using self as the context. Unlike the Decimal constructor, this function observes the context limits. [clinic start generated code]*/ static PyObject * _decimal_Context_create_decimal_impl(PyObject *context, PyObject *num) /*[clinic end generated code: output=85e08ae02f3b34da input=d2c4946cf7804fbe]*/ { return PyDec_FromObject(num, context); } /******************************************************************************/ /* Implicit conversions to Decimal */ /******************************************************************************/ /* Try to convert PyObject v to a new PyDecObject conv. If the conversion fails, set conv to NULL (exception is set). If the conversion is not implemented, set conv to Py_NotImplemented. */ #define NOT_IMPL 0 #define TYPE_ERR 1 Py_LOCAL_INLINE(int) convert_op(int type_err, PyObject **conv, PyObject *v, PyObject *context) { decimal_state *state = get_module_state_from_ctx(context); if (PyDec_Check(state, v)) { *conv = Py_NewRef(v); return 1; } if (PyLong_Check(v)) { *conv = PyDec_FromLongExact(state, v, context); if (*conv == NULL) { return 0; } return 1; } if (type_err) { PyErr_Format(PyExc_TypeError, "conversion from %s to Decimal is not supported", Py_TYPE(v)->tp_name); } else { *conv = Py_NewRef(Py_NotImplemented); } return 0; } /* Return NotImplemented for unsupported types. */ #define CONVERT_OP(a, v, context) \ if (!convert_op(NOT_IMPL, a, v, context)) { \ return *(a); \ } #define CONVERT_BINOP(a, b, v, w, context) \ if (!convert_op(NOT_IMPL, a, v, context)) { \ return *(a); \ } \ if (!convert_op(NOT_IMPL, b, w, context)) { \ Py_DECREF(*(a)); \ return *(b); \ } #define CONVERT_TERNOP(a, b, c, v, w, x, context) \ if (!convert_op(NOT_IMPL, a, v, context)) { \ return *(a); \ } \ if (!convert_op(NOT_IMPL, b, w, context)) { \ Py_DECREF(*(a)); \ return *(b); \ } \ if (!convert_op(NOT_IMPL, c, x, context)) { \ Py_DECREF(*(a)); \ Py_DECREF(*(b)); \ return *(c); \ } /* Raise TypeError for unsupported types. */ #define CONVERT_OP_RAISE(a, v, context) \ if (!convert_op(TYPE_ERR, a, v, context)) { \ return NULL; \ } #define CONVERT_BINOP_RAISE(a, b, v, w, context) \ if (!convert_op(TYPE_ERR, a, v, context)) { \ return NULL; \ } \ if (!convert_op(TYPE_ERR, b, w, context)) { \ Py_DECREF(*(a)); \ return NULL; \ } #define CONVERT_TERNOP_RAISE(a, b, c, v, w, x, context) \ if (!convert_op(TYPE_ERR, a, v, context)) { \ return NULL; \ } \ if (!convert_op(TYPE_ERR, b, w, context)) { \ Py_DECREF(*(a)); \ return NULL; \ } \ if (!convert_op(TYPE_ERR, c, x, context)) { \ Py_DECREF(*(a)); \ Py_DECREF(*(b)); \ return NULL; \ } /******************************************************************************/ /* Implicit conversions to Decimal for comparison */ /******************************************************************************/ static PyObject * multiply_by_denominator(PyObject *v, PyObject *r, PyObject *context) { PyObject *result; PyObject *tmp = NULL; PyObject *denom = NULL; uint32_t status = 0; mpd_context_t maxctx; mpd_ssize_t exp; mpd_t *vv; /* v is not special, r is a rational */ tmp = PyObject_GetAttrString(r, "denominator"); if (tmp == NULL) { return NULL; } decimal_state *state = get_module_state_from_ctx(context); denom = PyDec_FromLongExact(state, tmp, context); Py_DECREF(tmp); if (denom == NULL) { return NULL; } vv = mpd_qncopy(MPD(v)); if (vv == NULL) { Py_DECREF(denom); PyErr_NoMemory(); return NULL; } result = dec_alloc(state); if (result == NULL) { Py_DECREF(denom); mpd_del(vv); return NULL; } mpd_maxcontext(&maxctx); /* Prevent Overflow in the following multiplication. The result of the multiplication is only used in mpd_qcmp, which can handle values that are technically out of bounds, like (for 32-bit) 99999999999999999999...99999999e+425000000. */ exp = vv->exp; vv->exp = 0; mpd_qmul(MPD(result), vv, MPD(denom), &maxctx, &status); MPD(result)->exp = exp; Py_DECREF(denom); mpd_del(vv); /* If any status has been accumulated during the multiplication, the result is invalid. This is very unlikely, since even the 32-bit version supports 425000000 digits. */ if (status) { PyErr_SetString(PyExc_ValueError, "exact conversion for comparison failed"); Py_DECREF(result); return NULL; } return result; } static PyObject * numerator_as_decimal(PyObject *r, PyObject *context) { PyObject *tmp, *num; tmp = PyObject_GetAttrString(r, "numerator"); if (tmp == NULL) { return NULL; } decimal_state *state = get_module_state_from_ctx(context); num = PyDec_FromLongExact(state, tmp, context); Py_DECREF(tmp); return num; } /* Convert v and w for comparison. v is a Decimal. If w is a Rational, both v and w have to be transformed. Return 1 for success, with new references to the converted objects in vcmp and wcmp. Return 0 for failure. In that case wcmp is either NULL or Py_NotImplemented (new reference) and vcmp is undefined. */ static int convert_op_cmp(PyObject **vcmp, PyObject **wcmp, PyObject *v, PyObject *w, int op, PyObject *context) { mpd_context_t *ctx = CTX(context); *vcmp = v; decimal_state *state = get_module_state_from_ctx(context); if (PyDec_Check(state, w)) { *wcmp = Py_NewRef(w); } else if (PyLong_Check(w)) { *wcmp = PyDec_FromLongExact(state, w, context); } else if (PyFloat_Check(w)) { if (op != Py_EQ && op != Py_NE && dec_addstatus(context, MPD_Float_operation)) { *wcmp = NULL; } else { ctx->status |= MPD_Float_operation; *wcmp = PyDec_FromFloatExact(state, w, context); } } else if (PyComplex_Check(w) && (op == Py_EQ || op == Py_NE)) { Py_complex c = PyComplex_AsCComplex(w); if (c.real == -1.0 && PyErr_Occurred()) { *wcmp = NULL; } else if (c.imag == 0.0) { PyObject *tmp = PyFloat_FromDouble(c.real); if (tmp == NULL) { *wcmp = NULL; } else { ctx->status |= MPD_Float_operation; *wcmp = PyDec_FromFloatExact(state, tmp, context); Py_DECREF(tmp); } } else { *wcmp = Py_NewRef(Py_NotImplemented); } } else { int is_rational = PyObject_IsInstance(w, state->Rational); if (is_rational < 0) { *wcmp = NULL; } else if (is_rational > 0) { *wcmp = numerator_as_decimal(w, context); if (*wcmp && !mpd_isspecial(MPD(v))) { *vcmp = multiply_by_denominator(v, w, context); if (*vcmp == NULL) { Py_CLEAR(*wcmp); } } } else { *wcmp = Py_NewRef(Py_NotImplemented); } } if (*wcmp == NULL || *wcmp == Py_NotImplemented) { return 0; } if (*vcmp == v) { Py_INCREF(v); } return 1; } #define CONVERT_BINOP_CMP(vcmp, wcmp, v, w, op, ctx) \ if (!convert_op_cmp(vcmp, wcmp, v, w, op, ctx)) { \ return *(wcmp); \ } \ /******************************************************************************/ /* Conversions from decimal */ /******************************************************************************/ static PyObject * unicode_fromascii(const char *s, Py_ssize_t size) { PyObject *res; res = PyUnicode_New(size, 127); if (res == NULL) { return NULL; } memcpy(PyUnicode_1BYTE_DATA(res), s, size); return res; } /* PyDecObject as a string. The default module context is only used for the value of 'capitals'. */ static PyObject * dec_str(PyObject *dec) { PyObject *res, *context; mpd_ssize_t size; char *cp; decimal_state *state = get_module_state_by_def(Py_TYPE(dec)); CURRENT_CONTEXT(state, context); size = mpd_to_sci_size(&cp, MPD(dec), CtxCaps(context)); if (size < 0) { PyErr_NoMemory(); return NULL; } res = unicode_fromascii(cp, size); mpd_free(cp); return res; } /* Representation of a PyDecObject. */ static PyObject * dec_repr(PyObject *dec) { PyObject *res, *context; char *cp; decimal_state *state = get_module_state_by_def(Py_TYPE(dec)); CURRENT_CONTEXT(state, context); cp = mpd_to_sci(MPD(dec), CtxCaps(context)); if (cp == NULL) { PyErr_NoMemory(); return NULL; } res = PyUnicode_FromFormat("Decimal('%s')", cp); mpd_free(cp); return res; } /* Return a duplicate of src, copy embedded null characters. */ static char * dec_strdup(const char *src, Py_ssize_t size) { char *dest = PyMem_Malloc(size+1); if (dest == NULL) { PyErr_NoMemory(); return NULL; } memcpy(dest, src, size); dest[size] = '\0'; return dest; } static void dec_replace_fillchar(char *dest) { while (*dest != '\0') { if (*dest == '\xff') *dest = '\0'; dest++; } } /* Convert decimal_point or thousands_sep, which may be multibyte or in the range [128, 255], to a UTF8 string. */ static PyObject * dotsep_as_utf8(const char *s) { PyObject *utf8; PyObject *tmp; wchar_t buf[2]; size_t n; n = mbstowcs(buf, s, 2); if (n != 1) { /* Issue #7442 */ PyErr_SetString(PyExc_ValueError, "invalid decimal point or unsupported " "combination of LC_CTYPE and LC_NUMERIC"); return NULL; } tmp = PyUnicode_FromWideChar(buf, n); if (tmp == NULL) { return NULL; } utf8 = PyUnicode_AsUTF8String(tmp); Py_DECREF(tmp); return utf8; } static int dict_get_item_string(PyObject *dict, const char *key, PyObject **valueobj, const char **valuestr) { *valueobj = NULL; PyObject *keyobj = PyUnicode_FromString(key); if (keyobj == NULL) { return -1; } PyObject *value = PyDict_GetItemWithError(dict, keyobj); Py_DECREF(keyobj); if (value == NULL) { if (PyErr_Occurred()) { return -1; } return 0; } value = PyUnicode_AsUTF8String(value); if (value == NULL) { return -1; } *valueobj = value; *valuestr = PyBytes_AS_STRING(value); return 0; } /* * Fallback _pydecimal formatting for new format specifiers that mpdecimal does * not yet support. As documented, libmpdec follows the PEP-3101 format language: * https://www.bytereef.org/mpdecimal/doc/libmpdec/assign-convert.html#to-string */ static PyObject * pydec_format(PyObject *dec, PyObject *context, PyObject *fmt, decimal_state *state) { PyObject *result; PyObject *pydec; PyObject *u; if (state->PyDecimal == NULL) { state->PyDecimal = PyImport_ImportModuleAttrString("_pydecimal", "Decimal"); if (state->PyDecimal == NULL) { return NULL; } } u = dec_str(dec); if (u == NULL) { return NULL; } pydec = PyObject_CallOneArg(state->PyDecimal, u); Py_DECREF(u); if (pydec == NULL) { return NULL; } result = PyObject_CallMethod(pydec, "__format__", "(OO)", fmt, context); Py_DECREF(pydec); if (result == NULL && PyErr_ExceptionMatches(PyExc_ValueError)) { /* Do not confuse users with the _pydecimal exception */ PyErr_Clear(); PyErr_SetString(PyExc_ValueError, "invalid format string"); } return result; } /* Formatted representation of a PyDecObject. */ /*[clinic input] _decimal.Decimal.__format__ self as dec: self cls: defining_class format_spec as fmtarg: unicode override: object = NULL / Formats the Decimal according to format_spec. [clinic start generated code]*/ static PyObject * _decimal_Decimal___format___impl(PyObject *dec, PyTypeObject *cls, PyObject *fmtarg, PyObject *override) /*[clinic end generated code: output=6d95f91bbb28b3ed input=2dbfaa0cbe243e9e]*/ { PyObject *result = NULL; PyObject *dot = NULL; PyObject *sep = NULL; PyObject *grouping = NULL; PyObject *context; mpd_spec_t spec; char *fmt; char *decstring = NULL; uint32_t status = 0; int replace_fillchar = 0; Py_ssize_t size; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); fmt = (char *)PyUnicode_AsUTF8AndSize(fmtarg, &size); if (fmt == NULL) { return NULL; } if (size > 0 && fmt[size-1] == 'N') { if (PyErr_WarnEx(PyExc_DeprecationWarning, "Format specifier 'N' is deprecated and " "slated for removal in Python 3.18", 1) < 0) { return NULL; } } if (size > 0 && fmt[0] == '\0') { /* NUL fill character: must be replaced with a valid UTF-8 char before calling mpd_parse_fmt_str(). */ replace_fillchar = 1; fmt = dec_strdup(fmt, size); if (fmt == NULL) { return NULL; } fmt[0] = '_'; } if (!mpd_parse_fmt_str(&spec, fmt, CtxCaps(context))) { if (replace_fillchar) { PyMem_Free(fmt); } return pydec_format(dec, context, fmtarg, state); } if (replace_fillchar) { /* In order to avoid clobbering parts of UTF-8 thousands separators or decimal points when the substitution is reversed later, the actual placeholder must be an invalid UTF-8 byte. */ spec.fill[0] = '\xff'; spec.fill[1] = '\0'; } if (override) { /* Values for decimal_point, thousands_sep and grouping can be explicitly specified in the override dict. These values take precedence over the values obtained from localeconv() in mpd_parse_fmt_str(). The feature is not documented and is only used in test_decimal. */ if (!PyDict_Check(override)) { PyErr_SetString(PyExc_TypeError, "optional argument must be a dict"); goto finish; } if (dict_get_item_string(override, "decimal_point", &dot, &spec.dot) || dict_get_item_string(override, "thousands_sep", &sep, &spec.sep) || dict_get_item_string(override, "grouping", &grouping, &spec.grouping)) { goto finish; } if (mpd_validate_lconv(&spec) < 0) { PyErr_SetString(PyExc_ValueError, "invalid override dict"); goto finish; } } else { size_t n = strlen(spec.dot); if (n > 1 || (n == 1 && !isascii((unsigned char)spec.dot[0]))) { /* fix locale dependent non-ascii characters */ dot = dotsep_as_utf8(spec.dot); if (dot == NULL) { goto finish; } spec.dot = PyBytes_AS_STRING(dot); } n = strlen(spec.sep); if (n > 1 || (n == 1 && !isascii((unsigned char)spec.sep[0]))) { /* fix locale dependent non-ascii characters */ sep = dotsep_as_utf8(spec.sep); if (sep == NULL) { goto finish; } spec.sep = PyBytes_AS_STRING(sep); } } decstring = mpd_qformat_spec(MPD(dec), &spec, CTX(context), &status); if (decstring == NULL) { if (status & MPD_Malloc_error) { PyErr_NoMemory(); } else { PyErr_SetString(PyExc_ValueError, "format specification exceeds internal limits of _decimal"); } goto finish; } size = strlen(decstring); if (replace_fillchar) { dec_replace_fillchar(decstring); } result = PyUnicode_DecodeUTF8(decstring, size, NULL); finish: Py_XDECREF(grouping); Py_XDECREF(sep); Py_XDECREF(dot); if (replace_fillchar) PyMem_Free(fmt); if (decstring) mpd_free(decstring); return result; } /* Return a PyLongObject from a PyDecObject, using the specified rounding * mode. The context precision is not observed. */ static PyObject * dec_as_long(PyObject *dec, PyObject *context, int round) { if (mpd_isspecial(MPD(dec))) { if (mpd_isnan(MPD(dec))) { PyErr_SetString(PyExc_ValueError, "cannot convert NaN to integer"); } else { PyErr_SetString(PyExc_OverflowError, "cannot convert Infinity to integer"); } return NULL; } mpd_t *x = mpd_qnew(); if (x == NULL) { PyErr_NoMemory(); return NULL; } mpd_context_t workctx = *CTX(context); uint32_t status = 0; workctx.round = round; mpd_qround_to_int(x, MPD(dec), &workctx, &status); if (dec_addstatus(context, status)) { mpd_del(x); return NULL; } status = 0; int64_t val = mpd_qget_i64(x, &status); if (!status) { mpd_del(x); return PyLong_FromInt64(val); } assert(!mpd_iszero(x)); const PyLongLayout *layout = PyLong_GetNativeLayout(); assert(layout->bits_per_digit < 32); assert(layout->digits_order == -1); assert(layout->digit_endianness == (PY_LITTLE_ENDIAN ? -1 : 1)); assert(layout->digit_size == 2 || layout->digit_size == 4); uint32_t base = (uint32_t)1 << layout->bits_per_digit; /* We use a temporary buffer for digits for now, as for nonzero rdata mpd_qexport_u32/u16() require either space "allocated by one of libmpdec’s allocation functions" or "rlen MUST be correct" (to avoid reallocation). This can be further optimized by using rlen from mpd_sizeinbase(). See gh-127925. */ void *tmp_digits = NULL; size_t n; status = 0; if (layout->digit_size == 4) { n = mpd_qexport_u32((uint32_t **)&tmp_digits, 0, base, x, &status); } else { n = mpd_qexport_u16((uint16_t **)&tmp_digits, 0, base, x, &status); } if (n == SIZE_MAX) { PyErr_NoMemory(); mpd_del(x); mpd_free(tmp_digits); return NULL; } void *digits; PyLongWriter *writer = PyLongWriter_Create(mpd_isnegative(x), n, &digits); mpd_del(x); if (writer == NULL) { mpd_free(tmp_digits); return NULL; } memcpy(digits, tmp_digits, layout->digit_size*n); mpd_free(tmp_digits); return PyLongWriter_Finish(writer); } /*[clinic input] _decimal.Decimal.as_integer_ratio cls: defining_class Return a pair of integers whose ratio is exactly equal to the original. The ratio is in lowest terms and with a positive denominator. Raise OverflowError on infinities and a ValueError on NaNs. [clinic start generated code]*/ static PyObject * _decimal_Decimal_as_integer_ratio_impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=eb49c512701f844b input=07e33d8852184761]*/ { PyObject *numerator = NULL; PyObject *denominator = NULL; PyObject *exponent = NULL; PyObject *result = NULL; PyObject *tmp; mpd_ssize_t exp; PyObject *context; uint32_t status = 0; if (mpd_isspecial(MPD(self))) { if (mpd_isnan(MPD(self))) { PyErr_SetString(PyExc_ValueError, "cannot convert NaN to integer ratio"); } else { PyErr_SetString(PyExc_OverflowError, "cannot convert Infinity to integer ratio"); } return NULL; } decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); tmp = dec_alloc(state); if (tmp == NULL) { return NULL; } if (!mpd_qcopy(MPD(tmp), MPD(self), &status)) { Py_DECREF(tmp); PyErr_NoMemory(); return NULL; } exp = mpd_iszero(MPD(tmp)) ? 0 : MPD(tmp)->exp; MPD(tmp)->exp = 0; /* context and rounding are unused here: the conversion is exact */ numerator = dec_as_long(tmp, context, MPD_ROUND_FLOOR); Py_DECREF(tmp); if (numerator == NULL) { goto error; } exponent = PyLong_FromSsize_t(exp < 0 ? -exp : exp); if (exponent == NULL) { goto error; } tmp = PyLong_FromLong(10); if (tmp == NULL) { goto error; } Py_SETREF(exponent, state->_py_long_power(tmp, exponent, Py_None)); Py_DECREF(tmp); if (exponent == NULL) { goto error; } if (exp >= 0) { Py_SETREF(numerator, state->_py_long_multiply(numerator, exponent)); if (numerator == NULL) { goto error; } denominator = PyLong_FromLong(1); if (denominator == NULL) { goto error; } } else { denominator = exponent; exponent = NULL; tmp = _PyLong_GCD(numerator, denominator); if (tmp == NULL) { goto error; } Py_SETREF(numerator, state->_py_long_floor_divide(numerator, tmp)); if (numerator == NULL) { Py_DECREF(tmp); goto error; } Py_SETREF(denominator, state->_py_long_floor_divide(denominator, tmp)); Py_DECREF(tmp); if (denominator == NULL) { goto error; } } result = PyTuple_Pack(2, numerator, denominator); error: Py_XDECREF(exponent); Py_XDECREF(denominator); Py_XDECREF(numerator); return result; } /*[clinic input] _decimal.Decimal.to_integral_value cls: defining_class rounding: object = None context: object = None Round to the nearest integer without signaling Inexact or Rounded. The rounding mode is determined by the rounding parameter if given, else by the given context. If neither parameter is given, then the rounding mode of the current default context is used. [clinic start generated code]*/ static PyObject * _decimal_Decimal_to_integral_value_impl(PyObject *self, PyTypeObject *cls, PyObject *rounding, PyObject *context) /*[clinic end generated code: output=23047d848ef84db1 input=85aa9499a21ea8d7]*/ { PyObject *result; uint32_t status = 0; mpd_context_t workctx; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); workctx = *CTX(context); if (rounding != Py_None) { int round = getround(state, rounding); if (round < 0) { return NULL; } if (!mpd_qsetround(&workctx, round)) { INTERNAL_ERROR_PTR("PyDec_ToIntegralValue"); /* GCOV_NOT_REACHED */ } } result = dec_alloc(state); if (result == NULL) { return NULL; } mpd_qround_to_int(MPD(result), MPD(self), &workctx, &status); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /*[clinic input] _decimal.Decimal.to_integral = _decimal.Decimal.to_integral_value Identical to the to_integral_value() method. The to_integral() name has been kept for compatibility with older versions. [clinic start generated code]*/ static PyObject * _decimal_Decimal_to_integral_impl(PyObject *self, PyTypeObject *cls, PyObject *rounding, PyObject *context) /*[clinic end generated code: output=5dac8f54c2a3ed26 input=709b54618ecd0d8b]*/ { return _decimal_Decimal_to_integral_value_impl(self, cls, rounding, context); } /*[clinic input] _decimal.Decimal.to_integral_exact = _decimal.Decimal.to_integral_value Round to the nearest integer. Decimal.to_integral_exact() signals Inexact or Rounded as appropriate if rounding occurs. The rounding mode is determined by the rounding parameter if given, else by the given context. If neither parameter is given, then the rounding mode of the current default context is used. [clinic start generated code]*/ static PyObject * _decimal_Decimal_to_integral_exact_impl(PyObject *self, PyTypeObject *cls, PyObject *rounding, PyObject *context) /*[clinic end generated code: output=543a39a02eea9917 input=fabce7a744b8087c]*/ { PyObject *result; uint32_t status = 0; mpd_context_t workctx; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); workctx = *CTX(context); if (rounding != Py_None) { int round = getround(state, rounding); if (round < 0) { return NULL; } if (!mpd_qsetround(&workctx, round)) { INTERNAL_ERROR_PTR("PyDec_ToIntegralExact"); /* GCOV_NOT_REACHED */ } } result = dec_alloc(state); if (result == NULL) { return NULL; } mpd_qround_to_intx(MPD(result), MPD(self), &workctx, &status); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } static PyObject * PyDec_AsFloat(PyObject *dec) { PyObject *f, *s; if (mpd_isnan(MPD(dec))) { if (mpd_issnan(MPD(dec))) { PyErr_SetString(PyExc_ValueError, "cannot convert signaling NaN to float"); return NULL; } if (mpd_isnegative(MPD(dec))) { s = PyUnicode_FromString("-nan"); } else { s = PyUnicode_FromString("nan"); } } else { s = dec_str(dec); } if (s == NULL) { return NULL; } f = PyFloat_FromString(s); Py_DECREF(s); return f; } /*[clinic input] _decimal.Decimal.__round__ cls: defining_class ndigits: object = NULL / Return the Integral closest to self, rounding half toward even. [clinic start generated code]*/ static PyObject * _decimal_Decimal___round___impl(PyObject *self, PyTypeObject *cls, PyObject *ndigits) /*[clinic end generated code: output=790c2c6bd57890e6 input=d69e7178a58a66b1]*/ { PyObject *result; uint32_t status = 0; PyObject *context; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); if (ndigits) { mpd_uint_t dq[1] = {1}; mpd_t q = {MPD_STATIC|MPD_CONST_DATA,0,1,1,1,dq}; mpd_ssize_t y; if (!PyLong_Check(ndigits)) { PyErr_SetString(PyExc_TypeError, "optional arg must be an integer"); return NULL; } y = PyLong_AsSsize_t(ndigits); if (y == -1 && PyErr_Occurred()) { return NULL; } result = dec_alloc(state); if (result == NULL) { return NULL; } q.exp = (y == MPD_SSIZE_MIN) ? MPD_SSIZE_MAX : -y; mpd_qquantize(MPD(result), MPD(self), &q, CTX(context), &status); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } else { return dec_as_long(self, context, MPD_ROUND_HALF_EVEN); } } /*[clinic input] _decimal.Decimal.as_tuple cls: defining_class Return a tuple representation of the number. [clinic start generated code]*/ static PyObject * _decimal_Decimal_as_tuple_impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=d68b967becee8ab9 input=bfa86d640224d9f5]*/ { PyObject *result = NULL; PyObject *sign = NULL; PyObject *coeff = NULL; PyObject *expt = NULL; PyObject *tmp = NULL; mpd_t *x = NULL; char *intstring = NULL; Py_ssize_t intlen, i; x = mpd_qncopy(MPD(self)); if (x == NULL) { PyErr_NoMemory(); goto out; } sign = PyLong_FromUnsignedLong(mpd_sign(MPD(self))); if (sign == NULL) { goto out; } if (mpd_isinfinite(x)) { expt = PyUnicode_FromString("F"); if (expt == NULL) { goto out; } /* decimal.py has non-compliant infinity payloads. */ coeff = Py_BuildValue("(i)", 0); if (coeff == NULL) { goto out; } } else { if (mpd_isnan(x)) { expt = PyUnicode_FromString(mpd_isqnan(x)?"n":"N"); } else { expt = PyLong_FromSsize_t(MPD(self)->exp); } if (expt == NULL) { goto out; } /* coefficient is defined */ if (x->len > 0) { /* make an integer */ x->exp = 0; /* clear NaN and sign */ mpd_clear_flags(x); intstring = mpd_to_sci(x, 1); if (intstring == NULL) { PyErr_NoMemory(); goto out; } intlen = strlen(intstring); coeff = PyTuple_New(intlen); if (coeff == NULL) { goto out; } for (i = 0; i < intlen; i++) { tmp = PyLong_FromLong(intstring[i]-'0'); if (tmp == NULL) { goto out; } PyTuple_SET_ITEM(coeff, i, tmp); } } else { coeff = PyTuple_New(0); if (coeff == NULL) { goto out; } } } decimal_state *state = PyType_GetModuleState(cls); result = PyObject_CallFunctionObjArgs((PyObject *)state->DecimalTuple, sign, coeff, expt, NULL); out: if (x) mpd_del(x); if (intstring) mpd_free(intstring); Py_XDECREF(sign); Py_XDECREF(coeff); Py_XDECREF(expt); return result; } /******************************************************************************/ /* Macros for converting mpdecimal functions to Decimal methods */ /******************************************************************************/ /* Unary number method that uses the default module context. */ #define Dec_UnaryNumberMethod(MPDFUNC) \ static PyObject * \ nm_##MPDFUNC(PyObject *self) \ { \ PyObject *result; \ PyObject *context; \ uint32_t status = 0; \ \ decimal_state *state = get_module_state_by_def(Py_TYPE(self)); \ CURRENT_CONTEXT(state, context); \ if ((result = dec_alloc(state)) == NULL) { \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(self), CTX(context), &status); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* Binary number method that uses default module context. */ #define Dec_BinaryNumberMethod(MPDFUNC) \ static PyObject * \ nm_##MPDFUNC(PyObject *self, PyObject *other) \ { \ PyObject *a, *b; \ PyObject *result; \ PyObject *context; \ uint32_t status = 0; \ \ decimal_state *state = find_state_left_or_right(self, other); \ CURRENT_CONTEXT(state, context) ; \ CONVERT_BINOP(&a, &b, self, other, context); \ \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b), CTX(context), &status); \ Py_DECREF(a); \ Py_DECREF(b); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* Boolean function without a context arg. Argument Clinic provides PyObject *self */ #define Dec_BoolFunc(MPDFUNC) \ { \ return MPDFUNC(MPD(self)) ? incr_true() : incr_false(); \ } /* Boolean function with an optional context arg. Argument Clinic provides PyObject *self, PyTypeObject *cls, PyObject *context */ #define Dec_BoolFuncVA(MPDFUNC) \ { \ decimal_state *state = PyType_GetModuleState(cls); \ CONTEXT_CHECK_VA(state, context); \ \ return MPDFUNC(MPD(self), CTX(context)) ? incr_true() : incr_false(); \ } /* Unary function with an optional context arg. Argument Clinic provides PyObject *self, PyTypeObject *cls, PyObject *context */ #define Dec_UnaryFuncVA(MPDFUNC) \ { \ PyObject *result; \ uint32_t status = 0; \ decimal_state *state = PyType_GetModuleState(cls); \ CONTEXT_CHECK_VA(state, context); \ \ if ((result = dec_alloc(state)) == NULL) { \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(self), CTX(context), &status); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* Binary function with an optional context arg. Argument Clinic provides PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context */ #define Dec_BinaryFuncVA(MPDFUNC) \ { \ PyObject *a, *b; \ PyObject *result; \ uint32_t status = 0; \ decimal_state *state = PyType_GetModuleState(cls); \ CONTEXT_CHECK_VA(state, context); \ CONVERT_BINOP_RAISE(&a, &b, self, other, context); \ \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b), CTX(context), &status); \ Py_DECREF(a); \ Py_DECREF(b); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* Binary function with an optional context arg. Actual MPDFUNC does NOT take a context. The context is used to record InvalidOperation if the second operand cannot be converted exactly. Argument Clinic provides PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context */ #define Dec_BinaryFuncVA_NO_CTX(MPDFUNC) \ { \ PyObject *a, *b; \ PyObject *result; \ decimal_state *state = PyType_GetModuleState(cls); \ CONTEXT_CHECK_VA(state, context); \ CONVERT_BINOP_RAISE(&a, &b, self, other, context); \ \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b)); \ Py_DECREF(a); \ Py_DECREF(b); \ \ return result; \ } /* Ternary function with an optional context arg. Argument Clinic provides PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *third, PyObject *context */ #define Dec_TernaryFuncVA(MPDFUNC) \ { \ PyObject *a, *b, *c; \ PyObject *result; \ uint32_t status = 0; \ decimal_state *state = PyType_GetModuleState(cls); \ CONTEXT_CHECK_VA(state, context); \ CONVERT_TERNOP_RAISE(&a, &b, &c, self, other, third, context); \ \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ Py_DECREF(c); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b), MPD(c), CTX(context), &status); \ Py_DECREF(a); \ Py_DECREF(b); \ Py_DECREF(c); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /**********************************************/ /* Number methods */ /**********************************************/ Dec_UnaryNumberMethod(mpd_qminus) Dec_UnaryNumberMethod(mpd_qplus) Dec_UnaryNumberMethod(mpd_qabs) Dec_BinaryNumberMethod(mpd_qadd) Dec_BinaryNumberMethod(mpd_qsub) Dec_BinaryNumberMethod(mpd_qmul) Dec_BinaryNumberMethod(mpd_qdiv) Dec_BinaryNumberMethod(mpd_qrem) Dec_BinaryNumberMethod(mpd_qdivint) static PyObject * nm_dec_as_long(PyObject *dec) { PyObject *context; decimal_state *state = get_module_state_by_def(Py_TYPE(dec)); CURRENT_CONTEXT(state, context); return dec_as_long(dec, context, MPD_ROUND_DOWN); } static int nm_nonzero(PyObject *v) { return !mpd_iszero(MPD(v)); } static PyObject * nm_mpd_qdivmod(PyObject *v, PyObject *w) { PyObject *a, *b; PyObject *q, *r; PyObject *context; uint32_t status = 0; PyObject *ret; decimal_state *state = find_state_left_or_right(v, w); CURRENT_CONTEXT(state, context); CONVERT_BINOP(&a, &b, v, w, context); q = dec_alloc(state); if (q == NULL) { Py_DECREF(a); Py_DECREF(b); return NULL; } r = dec_alloc(state); if (r == NULL) { Py_DECREF(a); Py_DECREF(b); Py_DECREF(q); return NULL; } mpd_qdivmod(MPD(q), MPD(r), MPD(a), MPD(b), CTX(context), &status); Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(r); Py_DECREF(q); return NULL; } ret = PyTuple_Pack(2, q, r); Py_DECREF(r); Py_DECREF(q); return ret; } static PyObject * nm_mpd_qpow(PyObject *base, PyObject *exp, PyObject *mod) { PyObject *a, *b, *c = NULL; PyObject *result; PyObject *context; uint32_t status = 0; decimal_state *state = find_state_ternary(base, exp, mod); CURRENT_CONTEXT(state, context); CONVERT_BINOP(&a, &b, base, exp, context); if (mod != Py_None) { if (!convert_op(NOT_IMPL, &c, mod, context)) { Py_DECREF(a); Py_DECREF(b); return c; } } result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); Py_DECREF(b); Py_XDECREF(c); return NULL; } if (c == NULL) { mpd_qpow(MPD(result), MPD(a), MPD(b), CTX(context), &status); } else { mpd_qpowmod(MPD(result), MPD(a), MPD(b), MPD(c), CTX(context), &status); Py_DECREF(c); } Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /******************************************************************************/ /* Decimal Methods */ /******************************************************************************/ /* Unary arithmetic functions, optional context arg */ /*[clinic input] _decimal.Decimal.exp cls: defining_class context: object = None Return the value of the (natural) exponential function e**x. The function always uses the ROUND_HALF_EVEN mode and the result is correctly rounded. [clinic start generated code]*/ static PyObject * _decimal_Decimal_exp_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=40317012aedbaeac input=84919aad3dabda08]*/ Dec_UnaryFuncVA(mpd_qexp) /*[clinic input] _decimal.Decimal.ln = _decimal.Decimal.exp Return the natural (base e) logarithm of the operand. The function always uses the ROUND_HALF_EVEN mode and the result is correctly rounded. [clinic start generated code]*/ static PyObject * _decimal_Decimal_ln_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=e8f9e81cac38e5dc input=d353c51ec00d1cff]*/ Dec_UnaryFuncVA(mpd_qln) /*[clinic input] _decimal.Decimal.log10 = _decimal.Decimal.exp Return the base ten logarithm of the operand. The function always uses the ROUND_HALF_EVEN mode and the result is correctly rounded. [clinic start generated code]*/ static PyObject * _decimal_Decimal_log10_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=00b3255648135c95 input=48a6be60154c0b46]*/ Dec_UnaryFuncVA(mpd_qlog10) /*[clinic input] _decimal.Decimal.next_minus = _decimal.Decimal.exp Returns the largest representable number smaller than itself. [clinic start generated code]*/ static PyObject * _decimal_Decimal_next_minus_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=a187a55e6976b572 input=666b348f71e6c090]*/ Dec_UnaryFuncVA(mpd_qnext_minus) /*[clinic input] _decimal.Decimal.next_plus = _decimal.Decimal.exp Returns the smallest representable number larger than itself. [clinic start generated code]*/ static PyObject * _decimal_Decimal_next_plus_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=13737d41714e320e input=04e105060ad1fa15]*/ Dec_UnaryFuncVA(mpd_qnext_plus) /*[clinic input] _decimal.Decimal.normalize = _decimal.Decimal.exp Normalize the number by stripping trailing 0s This also change anything equal to 0 to 0e0. Used for producing canonical values for members of an equivalence class. For example, Decimal('32.100') and Decimal('0.321000e+2') both normalize to the equivalent value Decimal('32.1'). [clinic start generated code]*/ static PyObject * _decimal_Decimal_normalize_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=32c4c0d13fe33fb9 input=d5ee63acd904d4de]*/ Dec_UnaryFuncVA(mpd_qreduce) /*[clinic input] _decimal.Decimal.sqrt = _decimal.Decimal.exp Return the square root of the argument to full precision. The result is correctly rounded using the ROUND_HALF_EVEN rounding mode. [clinic start generated code]*/ static PyObject * _decimal_Decimal_sqrt_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=deb1280077b5e586 input=3a76afbd39dc20b9]*/ Dec_UnaryFuncVA(mpd_qsqrt) /* Binary arithmetic functions, optional context arg */ /*[clinic input] _decimal.Decimal.compare cls: defining_class other: object context: object = None Compare self to other. Return a decimal value: a or b is a NaN ==> Decimal('NaN') a < b ==> Decimal('-1') a == b ==> Decimal('0') a > b ==> Decimal('1') [clinic start generated code]*/ static PyObject * _decimal_Decimal_compare_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=a4a1d383ec192cfa input=d18a02bb8083e92a]*/ Dec_BinaryFuncVA(mpd_qcompare) /*[clinic input] _decimal.Decimal.compare_signal = _decimal.Decimal.compare Identical to compare, except that all NaNs signal. [clinic start generated code]*/ static PyObject * _decimal_Decimal_compare_signal_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=22f757371fd4167b input=a52a39d1c6fc369d]*/ Dec_BinaryFuncVA(mpd_qcompare_signal) /*[clinic input] _decimal.Decimal.max = _decimal.Decimal.compare Maximum of self and other. If one operand is a quiet NaN and the other is numeric, the numeric operand is returned. [clinic start generated code]*/ static PyObject * _decimal_Decimal_max_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=d3d12db9815869e5 input=2ae2582f551296d8]*/ Dec_BinaryFuncVA(mpd_qmax) /*[clinic input] _decimal.Decimal.max_mag = _decimal.Decimal.compare As the max() method, but compares the absolute values of the operands. [clinic start generated code]*/ static PyObject * _decimal_Decimal_max_mag_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=f71f2c27d9bc7cac input=88b105e66cf138c5]*/ Dec_BinaryFuncVA(mpd_qmax_mag) /*[clinic input] _decimal.Decimal.min = _decimal.Decimal.compare Minimum of self and other. If one operand is a quiet NaN and the other is numeric, the numeric operand is returned. [clinic start generated code]*/ static PyObject * _decimal_Decimal_min_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=c5620344ae5f3dd1 input=2a70f2c087c418c9]*/ Dec_BinaryFuncVA(mpd_qmin) /*[clinic input] _decimal.Decimal.min_mag = _decimal.Decimal.compare As the min() method, but compares the absolute values of the operands. [clinic start generated code]*/ static PyObject * _decimal_Decimal_min_mag_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=018562ad1c22aae3 input=351fa3c0e592746a]*/ Dec_BinaryFuncVA(mpd_qmin_mag) /*[clinic input] _decimal.Decimal.next_toward = _decimal.Decimal.compare Returns the number closest to self, in the direction towards other. If the two operands are unequal, return the number closest to the first operand in the direction of the second operand. If both operands are numerically equal, return a copy of the first operand with the sign set to be the same as the sign of the second operand. [clinic start generated code]*/ static PyObject * _decimal_Decimal_next_toward_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=71d879bca8bc1019 input=fdf0091ea6e9e416]*/ Dec_BinaryFuncVA(mpd_qnext_toward) /*[clinic input] _decimal.Decimal.remainder_near = _decimal.Decimal.compare Return the remainder from dividing self by other. This differs from self % other in that the sign of the remainder is chosen so as to minimize its absolute value. More precisely, the return value is self - n * other where n is the integer nearest to the exact value of self / other, and if two integers are equally near then the even one is chosen. If the result is zero then its sign will be the sign of self. [clinic start generated code]*/ static PyObject * _decimal_Decimal_remainder_near_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=d3fbb4985f2077fa input=eb5a8dfe3470b794]*/ Dec_BinaryFuncVA(mpd_qrem_near) /* Ternary arithmetic functions, optional context arg */ /*[clinic input] _decimal.Decimal.fma cls: defining_class other: object third: object context: object = None Fused multiply-add. Return self*other+third with no rounding of the intermediate product self*other. >>> Decimal(2).fma(3, 5) Decimal('11') [clinic start generated code]*/ static PyObject * _decimal_Decimal_fma_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *third, PyObject *context) /*[clinic end generated code: output=db49a777e85b71e4 input=2104c001f6077c35]*/ Dec_TernaryFuncVA(mpd_qfma) /* Boolean functions, no context arg */ /*[clinic input] _decimal.Decimal.is_canonical Return True if the argument is canonical and False otherwise. Currently, a Decimal instance is always canonical, so this operation always returns True. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_canonical_impl(PyObject *self) /*[clinic end generated code: output=b29668684f45443e input=b3b3e6878ccf40b8]*/ Dec_BoolFunc(mpd_iscanonical) /*[clinic input] _decimal.Decimal.is_finite Return True if the argument is a finite number, and False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_finite_impl(PyObject *self) /*[clinic end generated code: output=537306fbfc9131f8 input=e9b8b5866704bae6]*/ Dec_BoolFunc(mpd_isfinite) /*[clinic input] _decimal.Decimal.is_infinite Return True if the argument is infinite, and False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_infinite_impl(PyObject *self) /*[clinic end generated code: output=31b775ff28f05ce2 input=8f3937a790ee4ec2]*/ Dec_BoolFunc(mpd_isinfinite) /*[clinic input] _decimal.Decimal.is_nan Return True if the argument is a (quiet or signaling) NaN, else False. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_nan_impl(PyObject *self) /*[clinic end generated code: output=b704e8b49a164388 input=795e5dac85976994]*/ Dec_BoolFunc(mpd_isnan) /*[clinic input] _decimal.Decimal.is_qnan Return True if the argument is a quiet NaN, and False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_qnan_impl(PyObject *self) /*[clinic end generated code: output=85b5241f43798376 input=00485f3c3cfae0af]*/ Dec_BoolFunc(mpd_isqnan) /*[clinic input] _decimal.Decimal.is_snan Return True if the argument is a signaling NaN and False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_snan_impl(PyObject *self) /*[clinic end generated code: output=50de9ec6507e4a4f input=f3b0f8592c921879]*/ Dec_BoolFunc(mpd_issnan) /*[clinic input] _decimal.Decimal.is_signed Return True if the argument has a negative sign and False otherwise. Note that both zeros and NaNs can carry signs. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_signed_impl(PyObject *self) /*[clinic end generated code: output=8ec7bc85d8e755e4 input=97c3437ab5dffecc]*/ Dec_BoolFunc(mpd_issigned) /*[clinic input] _decimal.Decimal.is_zero Return True if the argument is a zero and False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_zero_impl(PyObject *self) /*[clinic end generated code: output=2d87ea1b15879112 input=ae616674cd050a51]*/ Dec_BoolFunc(mpd_iszero) /* Boolean functions, optional context arg */ /*[clinic input] _decimal.Decimal.is_normal = _decimal.Decimal.exp Return True if the argument is a normal number and False otherwise. Normal number is a finite nonzero number, which is not subnormal. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_normal_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=92a3878e293758d4 input=9afe43b9db9f4818]*/ Dec_BoolFuncVA(mpd_isnormal) /*[clinic input] _decimal.Decimal.is_subnormal = _decimal.Decimal.exp Return True if the argument is subnormal, and False otherwise. A number is subnormal if it is non-zero, finite, and has an adjusted exponent less than Emin. [clinic start generated code]*/ static PyObject * _decimal_Decimal_is_subnormal_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=1404c04d980ebc07 input=11839c122c185b8b]*/ Dec_BoolFuncVA(mpd_issubnormal) /* Unary functions, no context arg */ /*[clinic input] _decimal.Decimal.adjusted Return the adjusted exponent (exp + digits - 1) of the number. [clinic start generated code]*/ static PyObject * _decimal_Decimal_adjusted_impl(PyObject *self) /*[clinic end generated code: output=21ea2c9f23994c52 input=8ba2029d8d906b18]*/ { mpd_ssize_t retval; if (mpd_isspecial(MPD(self))) { retval = 0; } else { retval = mpd_adjexp(MPD(self)); } return PyLong_FromSsize_t(retval); } /*[clinic input] _decimal.Decimal.canonical Return the canonical encoding of the argument. Currently, the encoding of a Decimal instance is always canonical, so this operation returns its argument unchanged. [clinic start generated code]*/ static PyObject * _decimal_Decimal_canonical_impl(PyObject *self) /*[clinic end generated code: output=3cbeb47d91e6da2d input=8a4719d14c52d521]*/ { return Py_NewRef(self); } /*[clinic input] _decimal.Decimal.conjugate Return self. [clinic start generated code]*/ static PyObject * _decimal_Decimal_conjugate_impl(PyObject *self) /*[clinic end generated code: output=9a37bf633f25a291 input=c7179975ef74fd84]*/ { return Py_NewRef(self); } static inline PyObject * _dec_mpd_radix(decimal_state *state) { PyObject *result; result = dec_alloc(state); if (result == NULL) { return NULL; } _dec_settriple(result, MPD_POS, 10, 0); return result; } /*[clinic input] _decimal.Decimal.radix cls: defining_class Return Decimal(10). This is the radix (base) in which the Decimal class does all its arithmetic. Included for compatibility with the specification. [clinic start generated code]*/ static PyObject * _decimal_Decimal_radix_impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=40a3bc7ec3d99228 input=b0d4cb9f870bbac1]*/ { decimal_state *state = PyType_GetModuleState(cls); return _dec_mpd_radix(state); } /*[clinic input] _decimal.Decimal.copy_abs cls: defining_class Return the absolute value of the argument. This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. [clinic start generated code]*/ static PyObject * _decimal_Decimal_copy_abs_impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=081cb7fb4230676e input=676d7c62b1795512]*/ { PyObject *result; uint32_t status = 0; decimal_state *state = PyType_GetModuleState(cls); if ((result = dec_alloc(state)) == NULL) { return NULL; } mpd_qcopy_abs(MPD(result), MPD(self), &status); if (status & MPD_Malloc_error) { Py_DECREF(result); PyErr_NoMemory(); return NULL; } return result; } /*[clinic input] _decimal.Decimal.copy_negate = _decimal.Decimal.copy_abs Return the negation of the argument. This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. [clinic start generated code]*/ static PyObject * _decimal_Decimal_copy_negate_impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=04fed82c17d4e28b input=23f41ee8899f3891]*/ { PyObject *result; uint32_t status = 0; decimal_state *state = PyType_GetModuleState(cls); if ((result = dec_alloc(state)) == NULL) { return NULL; } mpd_qcopy_negate(MPD(result), MPD(self), &status); if (status & MPD_Malloc_error) { Py_DECREF(result); PyErr_NoMemory(); return NULL; } return result; } /* Unary functions, optional context arg */ /*[clinic input] _decimal.Decimal.logical_invert = _decimal.Decimal.exp Invert all its digits. The self must be logical number. [clinic start generated code]*/ static PyObject * _decimal_Decimal_logical_invert_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=c626ed4b104a97b7 input=7158d5b525417955]*/ Dec_UnaryFuncVA(mpd_qinvert) /*[clinic input] _decimal.Decimal.logb = _decimal.Decimal.exp Return the adjusted exponent of the operand as a Decimal instance. If the operand is a zero, then Decimal('-Infinity') is returned and the DivisionByZero condition is raised. If the operand is an infinity then Decimal('Infinity') is returned. [clinic start generated code]*/ static PyObject * _decimal_Decimal_logb_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=36b0bda09e934245 input=a8df027d1b8a2b17]*/ Dec_UnaryFuncVA(mpd_qlogb) /*[clinic input] _decimal.Decimal.number_class = _decimal.Decimal.exp Return a string describing the class of the operand. The returned value is one of the following ten strings: * '-Infinity', indicating that the operand is negative infinity. * '-Normal', indicating that the operand is a negative normal number. * '-Subnormal', indicating that the operand is negative and subnormal. * '-Zero', indicating that the operand is a negative zero. * '+Zero', indicating that the operand is a positive zero. * '+Subnormal', indicating that the operand is positive and subnormal. * '+Normal', indicating that the operand is a positive normal number. * '+Infinity', indicating that the operand is positive infinity. * 'NaN', indicating that the operand is a quiet NaN (Not a Number). * 'sNaN', indicating that the operand is a signaling NaN. [clinic start generated code]*/ static PyObject * _decimal_Decimal_number_class_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=1ac82412e0849c52 input=447095d2677fa0ca]*/ { const char *cp; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); cp = mpd_class(MPD(self), CTX(context)); return PyUnicode_FromString(cp); } /*[clinic input] _decimal.Decimal.to_eng_string = _decimal.Decimal.exp Convert to an engineering-type string. Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. For example, Decimal('123E+1') is converted to Decimal('1.23E+3'). The value of context.capitals determines whether the exponent sign is lower or upper case. Otherwise, the context does not affect the operation. [clinic start generated code]*/ static PyObject * _decimal_Decimal_to_eng_string_impl(PyObject *self, PyTypeObject *cls, PyObject *context) /*[clinic end generated code: output=901f128d437ae5c0 input=b2cb7e01e268e45d]*/ { PyObject *result; mpd_ssize_t size; char *s; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); size = mpd_to_eng_size(&s, MPD(self), CtxCaps(context)); if (size < 0) { PyErr_NoMemory(); return NULL; } result = unicode_fromascii(s, size); mpd_free(s); return result; } /* Binary functions, optional context arg for conversion errors */ /*[clinic input] _decimal.Decimal.compare_total = _decimal.Decimal.compare Compare two operands using their abstract representation. Similar to the compare() method, but the result gives a total ordering on Decimal instances. Two Decimal instances with the same numeric value but different representations compare unequal in this ordering: >>> Decimal('12.0').compare_total(Decimal('12')) Decimal('-1') Quiet and signaling NaNs are also included in the total ordering. The result of this function is Decimal('0') if both operands have the same representation, Decimal('-1') if the first operand is lower in the total order than the second, and Decimal('1') if the first operand is higher in the total order than the second operand. See the specification for details of the total order. This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. As an exception, the C version may raise InvalidOperation if the second operand cannot be converted exactly. [clinic start generated code]*/ static PyObject * _decimal_Decimal_compare_total_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=83649010bad7815f input=6f3111ec5fdbf3c1]*/ Dec_BinaryFuncVA_NO_CTX(mpd_compare_total) /*[clinic input] _decimal.Decimal.compare_total_mag = _decimal.Decimal.compare As compare_total(), but ignores the sign of each operand. x.compare_total_mag(y) is equivalent to x.copy_abs().compare_total(y.copy_abs()). This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. As an exception, the C version may raise InvalidOperation if the second operand cannot be converted exactly. [clinic start generated code]*/ static PyObject * _decimal_Decimal_compare_total_mag_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=b99c924cafb5f0e3 input=eba17c4c24eb2833]*/ Dec_BinaryFuncVA_NO_CTX(mpd_compare_total_mag) /*[clinic input] _decimal.Decimal.copy_sign = _decimal.Decimal.compare Return a copy of *self* with the sign of *other*. For example: >>> Decimal('2.3').copy_sign(Decimal('-1.5')) Decimal('-2.3') This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. As an exception, the C version may raise InvalidOperation if the second operand cannot be converted exactly. [clinic start generated code]*/ static PyObject * _decimal_Decimal_copy_sign_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=e4c8f884f4d75801 input=51ed9e4691e2249e]*/ { PyObject *a, *b; PyObject *result; uint32_t status = 0; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); CONVERT_BINOP_RAISE(&a, &b, self, other, context); result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); Py_DECREF(b); return NULL; } mpd_qcopy_sign(MPD(result), MPD(a), MPD(b), &status); Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /*[clinic input] _decimal.Decimal.same_quantum = _decimal.Decimal.compare Test whether self and other have the same exponent or both are NaN. This operation is unaffected by context and is quiet: no flags are changed and no rounding is performed. As an exception, the C version may raise InvalidOperation if the second operand cannot be converted exactly. [clinic start generated code]*/ static PyObject * _decimal_Decimal_same_quantum_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=7c757edb0c263721 input=8339415fa359e7df]*/ { PyObject *a, *b; PyObject *result; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); CONVERT_BINOP_RAISE(&a, &b, self, other, context); result = mpd_same_quantum(MPD(a), MPD(b)) ? incr_true() : incr_false(); Py_DECREF(a); Py_DECREF(b); return result; } /* Binary functions, optional context arg */ /*[clinic input] _decimal.Decimal.logical_and = _decimal.Decimal.compare Applies an 'and' operation between self and other's digits. Both self and other must be logical numbers. [clinic start generated code]*/ static PyObject * _decimal_Decimal_logical_and_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=9a4cbb74c180b0bb input=f22460f1285782d2]*/ Dec_BinaryFuncVA(mpd_qand) /*[clinic input] _decimal.Decimal.logical_or = _decimal.Decimal.compare Applies an 'or' operation between self and other's digits. Both self and other must be logical numbers. [clinic start generated code]*/ static PyObject * _decimal_Decimal_logical_or_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=063c4de18dc41ecb input=b5afa1e1fdebdfce]*/ Dec_BinaryFuncVA(mpd_qor) /*[clinic input] _decimal.Decimal.logical_xor = _decimal.Decimal.compare Applies an 'xor' operation between self and other's digits. Both self and other must be logical numbers. [clinic start generated code]*/ static PyObject * _decimal_Decimal_logical_xor_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=829b09cb49926ad7 input=84d722ada08a2da7]*/ Dec_BinaryFuncVA(mpd_qxor) /*[clinic input] _decimal.Decimal.rotate = _decimal.Decimal.compare Returns a rotated copy of self's digits, value-of-other times. The second operand must be an integer in the range -precision through precision. The absolute value of the second operand gives the number of places to rotate. If the second operand is positive then rotation is to the left; otherwise rotation is to the right. The coefficient of the first operand is padded on the left with zeros to length precision if necessary. The sign and exponent of the first operand are unchanged. [clinic start generated code]*/ static PyObject * _decimal_Decimal_rotate_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=09f2737082882b83 input=cde7b032eac43f0b]*/ Dec_BinaryFuncVA(mpd_qrotate) /*[clinic input] _decimal.Decimal.scaleb = _decimal.Decimal.compare Return the first operand with the exponent adjusted the second. Equivalently, return the first operand multiplied by 10**other. The second operand must be an integer. [clinic start generated code]*/ static PyObject * _decimal_Decimal_scaleb_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=ae8730536c9f2d30 input=7f29f83278d05f83]*/ Dec_BinaryFuncVA(mpd_qscaleb) /*[clinic input] _decimal.Decimal.shift = _decimal.Decimal.compare Returns a shifted copy of self's digits, value-of-other times. The second operand must be an integer in the range -precision through precision. The absolute value of the second operand gives the number of places to shift. If the second operand is positive, then the shift is to the left; otherwise the shift is to the right. Digits shifted into the coefficient are zeros. The sign and exponent of the first operand are unchanged. [clinic start generated code]*/ static PyObject * _decimal_Decimal_shift_impl(PyObject *self, PyTypeObject *cls, PyObject *other, PyObject *context) /*[clinic end generated code: output=82e061a0d9ecc4f5 input=501759c2522cb78e]*/ Dec_BinaryFuncVA(mpd_qshift) /*[clinic input] _decimal.Decimal.quantize cls: defining_class exp as w: object rounding: object = None context: object = None Quantize *self* so its exponent is the same as that of *exp*. Return a value equal to *self* after rounding, with the exponent of *exp*. >>> Decimal('1.41421356').quantize(Decimal('1.000')) Decimal('1.414') Unlike other operations, if the length of the coefficient after the quantize operation would be greater than precision, then an InvalidOperation is signaled. This guarantees that, unless there is an error condition, the quantized exponent is always equal to that of the right-hand operand. Also unlike other operations, quantize never signals Underflow, even if the result is subnormal and inexact. If the exponent of the second operand is larger than that of the first, then rounding may be necessary. In this case, the rounding mode is determined by the rounding argument if given, else by the given context argument; if neither argument is given, the rounding mode of the current thread's context is used. [clinic start generated code]*/ static PyObject * _decimal_Decimal_quantize_impl(PyObject *self, PyTypeObject *cls, PyObject *w, PyObject *rounding, PyObject *context) /*[clinic end generated code: output=fc51edf458559913 input=1166e6311e047b74]*/ { PyObject *a, *b; PyObject *result; uint32_t status = 0; mpd_context_t workctx; decimal_state *state = PyType_GetModuleState(cls); CONTEXT_CHECK_VA(state, context); workctx = *CTX(context); if (rounding != Py_None) { int round = getround(state, rounding); if (round < 0) { return NULL; } if (!mpd_qsetround(&workctx, round)) { INTERNAL_ERROR_PTR("dec_mpd_qquantize"); /* GCOV_NOT_REACHED */ } } CONVERT_BINOP_RAISE(&a, &b, self, w, context); result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); Py_DECREF(b); return NULL; } mpd_qquantize(MPD(result), MPD(a), MPD(b), &workctx, &status); Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /* Special methods */ static PyObject * dec_richcompare(PyObject *v, PyObject *w, int op) { PyObject *a; PyObject *b; PyObject *context; uint32_t status = 0; int a_issnan, b_issnan; int r; decimal_state *state = find_state_left_or_right(v, w); #ifdef Py_DEBUG assert(PyDec_Check(state, v)); #endif CURRENT_CONTEXT(state, context); CONVERT_BINOP_CMP(&a, &b, v, w, op, context); a_issnan = mpd_issnan(MPD(a)); b_issnan = mpd_issnan(MPD(b)); r = mpd_qcmp(MPD(a), MPD(b), &status); Py_DECREF(a); Py_DECREF(b); if (r == INT_MAX) { /* sNaNs or op={le,ge,lt,gt} always signal. */ if (a_issnan || b_issnan || (op != Py_EQ && op != Py_NE)) { if (dec_addstatus(context, status)) { return NULL; } } /* qNaN comparison with op={eq,ne} or comparison * with InvalidOperation disabled. */ return (op == Py_NE) ? incr_true() : incr_false(); } switch (op) { case Py_EQ: r = (r == 0); break; case Py_NE: r = (r != 0); break; case Py_LE: r = (r <= 0); break; case Py_GE: r = (r >= 0); break; case Py_LT: r = (r == -1); break; case Py_GT: r = (r == 1); break; } return PyBool_FromLong(r); } /*[clinic input] _decimal.Decimal.__ceil__ cls: defining_class Return the ceiling as an Integral. [clinic start generated code]*/ static PyObject * _decimal_Decimal___ceil___impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=d986ebf9aadbf9fe input=a8e0b87897706816]*/ { PyObject *context; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); return dec_as_long(self, context, MPD_ROUND_CEILING); } /*[clinic input] _decimal.Decimal.__complex__ Convert this value to exact type complex. [clinic start generated code]*/ static PyObject * _decimal_Decimal___complex___impl(PyObject *self) /*[clinic end generated code: output=c9b5b4a9fdebc912 input=6b11c6f20af7061a]*/ { PyObject *f; double x; f = PyDec_AsFloat(self); if (f == NULL) { return NULL; } x = PyFloat_AsDouble(f); Py_DECREF(f); if (x == -1.0 && PyErr_Occurred()) { return NULL; } return PyComplex_FromDoubles(x, 0); } /*[clinic input] _decimal.Decimal.__copy__ [clinic start generated code]*/ static PyObject * _decimal_Decimal___copy___impl(PyObject *self) /*[clinic end generated code: output=8eb3656c0250762b input=3dfd30a3e1493c01]*/ { return Py_NewRef(self); } /*[clinic input] _decimal.Decimal.__deepcopy__ memo: object / [clinic start generated code]*/ static PyObject * _decimal_Decimal___deepcopy__(PyObject *self, PyObject *memo) /*[clinic end generated code: output=988fb34e0136b376 input=f95598c6f43233aa]*/ { return Py_NewRef(self); } /*[clinic input] _decimal.Decimal.__floor__ = _decimal.Decimal.__ceil__ Return the floor as an Integral. [clinic start generated code]*/ static PyObject * _decimal_Decimal___floor___impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=e239a2f7f6514c12 input=dcc37aeceb0efb8d]*/ { PyObject *context; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); return dec_as_long(self, context, MPD_ROUND_FLOOR); } /* Always uses the module context */ static Py_hash_t _dec_hash(PyDecObject *v) { #if defined(CONFIG_64) && PyHASH_BITS == 61 /* 2**61 - 1 */ mpd_uint_t p_data[1] = {2305843009213693951ULL}; mpd_t p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 19, 1, 1, p_data}; /* Inverse of 10 modulo p */ mpd_uint_t inv10_p_data[1] = {2075258708292324556ULL}; mpd_t inv10_p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 19, 1, 1, inv10_p_data}; #elif defined(CONFIG_32) && PyHASH_BITS == 31 /* 2**31 - 1 */ mpd_uint_t p_data[2] = {147483647UL, 2}; mpd_t p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 10, 2, 2, p_data}; /* Inverse of 10 modulo p */ mpd_uint_t inv10_p_data[2] = {503238553UL, 1}; mpd_t inv10_p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 10, 2, 2, inv10_p_data}; #else #error "No valid combination of CONFIG_64, CONFIG_32 and PyHASH_BITS" #endif const Py_hash_t py_hash_inf = 314159; mpd_uint_t ten_data[1] = {10}; mpd_t ten = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 2, 1, 1, ten_data}; Py_hash_t result; mpd_t *exp_hash = NULL; mpd_t *tmp = NULL; mpd_ssize_t exp; uint32_t status = 0; mpd_context_t maxctx; if (mpd_isspecial(MPD(v))) { if (mpd_issnan(MPD(v))) { PyErr_SetString(PyExc_TypeError, "Cannot hash a signaling NaN value"); return -1; } else if (mpd_isnan(MPD(v))) { return PyObject_GenericHash((PyObject *)v); } else { return py_hash_inf * mpd_arith_sign(MPD(v)); } } mpd_maxcontext(&maxctx); exp_hash = mpd_qnew(); if (exp_hash == NULL) { goto malloc_error; } tmp = mpd_qnew(); if (tmp == NULL) { goto malloc_error; } /* * exp(v): exponent of v * int(v): coefficient of v */ exp = MPD(v)->exp; if (exp >= 0) { /* 10**exp(v) % p */ mpd_qsset_ssize(tmp, exp, &maxctx, &status); mpd_qpowmod(exp_hash, &ten, tmp, &p, &maxctx, &status); } else { /* inv10_p**(-exp(v)) % p */ mpd_qsset_ssize(tmp, -exp, &maxctx, &status); mpd_qpowmod(exp_hash, &inv10_p, tmp, &p, &maxctx, &status); } /* hash = (int(v) * exp_hash) % p */ if (!mpd_qcopy(tmp, MPD(v), &status)) { goto malloc_error; } tmp->exp = 0; mpd_set_positive(tmp); maxctx.prec = MPD_MAX_PREC + 21; maxctx.emax = MPD_MAX_EMAX + 21; maxctx.emin = MPD_MIN_EMIN - 21; mpd_qmul(tmp, tmp, exp_hash, &maxctx, &status); mpd_qrem(tmp, tmp, &p, &maxctx, &status); result = mpd_qget_ssize(tmp, &status); result = mpd_ispositive(MPD(v)) ? result : -result; result = (result == -1) ? -2 : result; if (status != 0) { if (status & MPD_Malloc_error) { goto malloc_error; } else { PyErr_SetString(PyExc_RuntimeError, /* GCOV_NOT_REACHED */ "dec_hash: internal error: please report"); /* GCOV_NOT_REACHED */ } result = -1; /* GCOV_NOT_REACHED */ } finish: if (exp_hash) mpd_del(exp_hash); if (tmp) mpd_del(tmp); return result; malloc_error: PyErr_NoMemory(); result = -1; goto finish; } static Py_hash_t dec_hash(PyObject *op) { PyDecObject *self = _PyDecObject_CAST(op); if (self->hash == -1) { self->hash = _dec_hash(self); } return self->hash; } /*[clinic input] _decimal.Decimal.__reduce__ Return state information for pickling. [clinic start generated code]*/ static PyObject * _decimal_Decimal___reduce___impl(PyObject *self) /*[clinic end generated code: output=84fa6648a496a8d2 input=0345ea951d9b986f]*/ { PyObject *result, *str; str = dec_str(self); if (str == NULL) { return NULL; } result = Py_BuildValue("O(O)", Py_TYPE(self), str); Py_DECREF(str); return result; } /*[clinic input] _decimal.Decimal.__sizeof__ self as v: self Returns size in memory, in bytes [clinic start generated code]*/ static PyObject * _decimal_Decimal___sizeof___impl(PyObject *v) /*[clinic end generated code: output=f16de05097c62b79 input=a557db538cfddbb7]*/ { size_t res = _PyObject_SIZE(Py_TYPE(v)); if (mpd_isdynamic_data(MPD(v))) { res += (size_t)MPD(v)->alloc * sizeof(mpd_uint_t); } return PyLong_FromSize_t(res); } /*[clinic input] _decimal.Decimal.__trunc__ = _decimal.Decimal.__ceil__ Return the Integral closest to x between 0 and x. [clinic start generated code]*/ static PyObject * _decimal_Decimal___trunc___impl(PyObject *self, PyTypeObject *cls) /*[clinic end generated code: output=7b3decc4b636ce32 input=9b3a3a85f63b0515]*/ { PyObject *context; decimal_state *state = PyType_GetModuleState(cls); CURRENT_CONTEXT(state, context); return dec_as_long(self, context, MPD_ROUND_DOWN); } /* real and imag */ static PyObject * dec_real(PyObject *self, void *Py_UNUSED(closure)) { return Py_NewRef(self); } static PyObject * dec_imag(PyObject *self, void *Py_UNUSED(closure)) { PyObject *result; decimal_state *state = get_module_state_by_def(Py_TYPE(self)); result = dec_alloc(state); if (result == NULL) { return NULL; } _dec_settriple(result, MPD_POS, 0, 0); return result; } static PyGetSetDef dec_getsets [] = { { "real", dec_real, NULL, NULL, NULL}, { "imag", dec_imag, NULL, NULL, NULL}, {NULL} }; static PyMethodDef dec_methods [] = { /* Unary arithmetic functions, optional context arg */ _DECIMAL_DECIMAL_EXP_METHODDEF _DECIMAL_DECIMAL_LN_METHODDEF _DECIMAL_DECIMAL_LOG10_METHODDEF _DECIMAL_DECIMAL_NEXT_MINUS_METHODDEF _DECIMAL_DECIMAL_NEXT_PLUS_METHODDEF _DECIMAL_DECIMAL_NORMALIZE_METHODDEF _DECIMAL_DECIMAL_TO_INTEGRAL_METHODDEF _DECIMAL_DECIMAL_TO_INTEGRAL_EXACT_METHODDEF _DECIMAL_DECIMAL_TO_INTEGRAL_VALUE_METHODDEF _DECIMAL_DECIMAL_SQRT_METHODDEF /* Binary arithmetic functions, optional context arg */ _DECIMAL_DECIMAL_COMPARE_METHODDEF _DECIMAL_DECIMAL_COMPARE_SIGNAL_METHODDEF _DECIMAL_DECIMAL_MAX_METHODDEF _DECIMAL_DECIMAL_MAX_MAG_METHODDEF _DECIMAL_DECIMAL_MIN_METHODDEF _DECIMAL_DECIMAL_MIN_MAG_METHODDEF _DECIMAL_DECIMAL_NEXT_TOWARD_METHODDEF _DECIMAL_DECIMAL_QUANTIZE_METHODDEF _DECIMAL_DECIMAL_REMAINDER_NEAR_METHODDEF /* Ternary arithmetic functions, optional context arg */ _DECIMAL_DECIMAL_FMA_METHODDEF /* Boolean functions, no context arg */ _DECIMAL_DECIMAL_IS_CANONICAL_METHODDEF _DECIMAL_DECIMAL_IS_FINITE_METHODDEF _DECIMAL_DECIMAL_IS_INFINITE_METHODDEF _DECIMAL_DECIMAL_IS_NAN_METHODDEF _DECIMAL_DECIMAL_IS_QNAN_METHODDEF _DECIMAL_DECIMAL_IS_SNAN_METHODDEF _DECIMAL_DECIMAL_IS_SIGNED_METHODDEF _DECIMAL_DECIMAL_IS_ZERO_METHODDEF /* Boolean functions, optional context arg */ _DECIMAL_DECIMAL_IS_NORMAL_METHODDEF _DECIMAL_DECIMAL_IS_SUBNORMAL_METHODDEF /* Unary functions, no context arg */ _DECIMAL_DECIMAL_ADJUSTED_METHODDEF _DECIMAL_DECIMAL_CANONICAL_METHODDEF _DECIMAL_DECIMAL_CONJUGATE_METHODDEF _DECIMAL_DECIMAL_RADIX_METHODDEF /* Unary functions, optional context arg for conversion errors */ _DECIMAL_DECIMAL_COPY_ABS_METHODDEF _DECIMAL_DECIMAL_COPY_NEGATE_METHODDEF /* Unary functions, optional context arg */ _DECIMAL_DECIMAL_LOGB_METHODDEF _DECIMAL_DECIMAL_LOGICAL_INVERT_METHODDEF _DECIMAL_DECIMAL_NUMBER_CLASS_METHODDEF _DECIMAL_DECIMAL_TO_ENG_STRING_METHODDEF /* Binary functions, optional context arg for conversion errors */ _DECIMAL_DECIMAL_COMPARE_TOTAL_METHODDEF _DECIMAL_DECIMAL_COMPARE_TOTAL_MAG_METHODDEF _DECIMAL_DECIMAL_COPY_SIGN_METHODDEF _DECIMAL_DECIMAL_SAME_QUANTUM_METHODDEF /* Binary functions, optional context arg */ _DECIMAL_DECIMAL_LOGICAL_AND_METHODDEF _DECIMAL_DECIMAL_LOGICAL_OR_METHODDEF _DECIMAL_DECIMAL_LOGICAL_XOR_METHODDEF _DECIMAL_DECIMAL_ROTATE_METHODDEF _DECIMAL_DECIMAL_SCALEB_METHODDEF _DECIMAL_DECIMAL_SHIFT_METHODDEF /* Miscellaneous */ _DECIMAL_DECIMAL_FROM_FLOAT_METHODDEF _DECIMAL_DECIMAL_FROM_NUMBER_METHODDEF _DECIMAL_DECIMAL_AS_TUPLE_METHODDEF _DECIMAL_DECIMAL_AS_INTEGER_RATIO_METHODDEF /* Special methods */ _DECIMAL_DECIMAL___COPY___METHODDEF _DECIMAL_DECIMAL___DEEPCOPY___METHODDEF _DECIMAL_DECIMAL___FORMAT___METHODDEF _DECIMAL_DECIMAL___REDUCE___METHODDEF _DECIMAL_DECIMAL___ROUND___METHODDEF _DECIMAL_DECIMAL___CEIL___METHODDEF _DECIMAL_DECIMAL___FLOOR___METHODDEF _DECIMAL_DECIMAL___TRUNC___METHODDEF _DECIMAL_DECIMAL___COMPLEX___METHODDEF _DECIMAL_DECIMAL___SIZEOF___METHODDEF { NULL, NULL, 1 } }; static PyType_Slot dec_slots[] = { {Py_tp_token, Py_TP_USE_SPEC}, {Py_tp_dealloc, dec_dealloc}, {Py_tp_getattro, PyObject_GenericGetAttr}, {Py_tp_traverse, _PyObject_VisitType}, {Py_tp_repr, dec_repr}, {Py_tp_hash, dec_hash}, {Py_tp_str, dec_str}, {Py_tp_doc, (void *)dec_new__doc__}, {Py_tp_richcompare, dec_richcompare}, {Py_tp_methods, dec_methods}, {Py_tp_getset, dec_getsets}, {Py_tp_new, dec_new}, // Number protocol {Py_nb_add, nm_mpd_qadd}, {Py_nb_subtract, nm_mpd_qsub}, {Py_nb_multiply, nm_mpd_qmul}, {Py_nb_remainder, nm_mpd_qrem}, {Py_nb_divmod, nm_mpd_qdivmod}, {Py_nb_power, nm_mpd_qpow}, {Py_nb_negative, nm_mpd_qminus}, {Py_nb_positive, nm_mpd_qplus}, {Py_nb_absolute, nm_mpd_qabs}, {Py_nb_bool, nm_nonzero}, {Py_nb_int, nm_dec_as_long}, {Py_nb_float, PyDec_AsFloat}, {Py_nb_floor_divide, nm_mpd_qdivint}, {Py_nb_true_divide, nm_mpd_qdiv}, {0, NULL}, }; static PyType_Spec dec_spec = { .name = "decimal.Decimal", .basicsize = sizeof(PyDecObject), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = dec_slots, }; /******************************************************************************/ /* Context Object, Part 2 */ /******************************************************************************/ /************************************************************************/ /* Macros for converting mpdecimal functions to Context methods */ /************************************************************************/ /* Boolean context method. Argument Clinic provides PyObject *context, PyObject *x */ #define DecCtx_BoolFunc(MPDFUNC) \ { \ PyObject *ret; \ PyObject *a; \ \ CONVERT_OP_RAISE(&a, x, context); \ \ ret = MPDFUNC(MPD(a), CTX(context)) ? incr_true() : incr_false(); \ Py_DECREF(a); \ return ret; \ } /* Boolean context method. MPDFUNC does NOT use a context. Argument Clinic provides PyObject *context, PyObject *x */ #define DecCtx_BoolFunc_NO_CTX(MPDFUNC) \ { \ PyObject *ret; \ PyObject *a; \ \ CONVERT_OP_RAISE(&a, x, context); \ \ ret = MPDFUNC(MPD(a)) ? incr_true() : incr_false(); \ Py_DECREF(a); \ return ret; \ } /* Unary context method. Argument Clinic provides PyObject *context, PyTypeObject *cls, PyObject *x */ #define DecCtx_UnaryFunc(MPDFUNC) \ { \ PyObject *result, *a; \ uint32_t status = 0; \ \ CONVERT_OP_RAISE(&a, x, context); \ decimal_state *state = PyType_GetModuleState(cls); \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), CTX(context), &status); \ Py_DECREF(a); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* Binary context method. Argument Clinic provides PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y */ #define DecCtx_BinaryFunc(MPDFUNC) \ { \ PyObject *a, *b; \ PyObject *result; \ uint32_t status = 0; \ \ CONVERT_BINOP_RAISE(&a, &b, x, y, context); \ decimal_state *state = PyType_GetModuleState(cls); \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b), CTX(context), &status); \ Py_DECREF(a); \ Py_DECREF(b); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* * Binary context method. The context is only used for conversion. * The actual MPDFUNC does NOT take a context arg. * Argument Clinic provides PyObject *context, PyTypeObject *cls, * PyObject *x, PyObject *y */ #define DecCtx_BinaryFunc_NO_CTX(MPDFUNC) \ { \ PyObject *a, *b; \ PyObject *result; \ \ CONVERT_BINOP_RAISE(&a, &b, x, y, context); \ decimal_state *state = \ PyType_GetModuleState(cls); \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b)); \ Py_DECREF(a); \ Py_DECREF(b); \ \ return result; \ } /* Ternary context method. Argument Clinic provides PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y, PyObject *z */ #define DecCtx_TernaryFunc(MPDFUNC) \ { \ PyObject *a, *b, *c; \ PyObject *result; \ uint32_t status = 0; \ \ CONVERT_TERNOP_RAISE(&a, &b, &c, x, y, z, context); \ decimal_state *state = PyType_GetModuleState(cls); \ if ((result = dec_alloc(state)) == NULL) { \ Py_DECREF(a); \ Py_DECREF(b); \ Py_DECREF(c); \ return NULL; \ } \ \ MPDFUNC(MPD(result), MPD(a), MPD(b), MPD(c), CTX(context), &status); \ Py_DECREF(a); \ Py_DECREF(b); \ Py_DECREF(c); \ if (dec_addstatus(context, status)) { \ Py_DECREF(result); \ return NULL; \ } \ \ return result; \ } /* Unary arithmetic functions */ /*[clinic input] _decimal.Context.abs self as context: self cls: defining_class x: object / Return the absolute value of x. [clinic start generated code]*/ static PyObject * _decimal_Context_abs_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=fe080467d32e229c input=00a33f9c68463bb0]*/ DecCtx_UnaryFunc(mpd_qabs) /*[clinic input] _decimal.Context.exp = _decimal.Context.abs Return e ** x. [clinic start generated code]*/ static PyObject * _decimal_Context_exp_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=c7477a67010ccc5f input=5b443c4ab153dd2e]*/ DecCtx_UnaryFunc(mpd_qexp) /*[clinic input] _decimal.Context.ln = _decimal.Context.abs Return the natural (base e) logarithm of x. [clinic start generated code]*/ static PyObject * _decimal_Context_ln_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=63e691b0680bffc7 input=cf43cd98a0fe7425]*/ DecCtx_UnaryFunc(mpd_qln) /*[clinic input] _decimal.Context.log10 = _decimal.Context.abs Return the base 10 logarithm of x. [clinic start generated code]*/ static PyObject * _decimal_Context_log10_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=e0d9fc928570304d input=309e57faf42c257d]*/ DecCtx_UnaryFunc(mpd_qlog10) /*[clinic input] _decimal.Context.minus = _decimal.Context.abs Minus corresponds to unary prefix minus in Python. This operation applies the context to the result. [clinic start generated code]*/ static PyObject * _decimal_Context_minus_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=f06c409b6aef1aad input=63be4c419d1d554b]*/ DecCtx_UnaryFunc(mpd_qminus) /*[clinic input] _decimal.Context.next_minus = _decimal.Context.abs Return the largest representable number smaller than x. [clinic start generated code]*/ static PyObject * _decimal_Context_next_minus_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=8dd168f08bec9547 input=969f4d24dfcd5e85]*/ DecCtx_UnaryFunc(mpd_qnext_minus) /*[clinic input] _decimal.Context.next_plus = _decimal.Context.abs Return the smallest representable number larger than x. [clinic start generated code]*/ static PyObject * _decimal_Context_next_plus_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=2a50586ad2f7c108 input=af1a85ee59b56a3c]*/ DecCtx_UnaryFunc(mpd_qnext_plus) /*[clinic input] _decimal.Context.normalize = _decimal.Context.abs Reduce x to its simplest form. Alias for reduce(x). [clinic start generated code]*/ static PyObject * _decimal_Context_normalize_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=9a9510f442ba2852 input=a65bc39c81a654a9]*/ DecCtx_UnaryFunc(mpd_qreduce) /*[clinic input] _decimal.Context.plus = _decimal.Context.abs Plus corresponds to the unary prefix plus operator in Python. This operation applies the context to the result. [clinic start generated code]*/ static PyObject * _decimal_Context_plus_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=c37d29f58a47f93a input=5d8a75702d20e2f9]*/ DecCtx_UnaryFunc(mpd_qplus) /*[clinic input] _decimal.Context.to_integral_value = _decimal.Context.abs Round to an integer. [clinic start generated code]*/ static PyObject * _decimal_Context_to_integral_value_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=e3d9ad000bc06036 input=3103e147cb9de9ed]*/ DecCtx_UnaryFunc(mpd_qround_to_int) /*[clinic input] _decimal.Context.to_integral_exact = _decimal.Context.abs Round to an integer. Signal if the result is rounded or inexact. [clinic start generated code]*/ static PyObject * _decimal_Context_to_integral_exact_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=680b796dfae8e2ef input=677dc4b915907b68]*/ DecCtx_UnaryFunc(mpd_qround_to_intx) /*[clinic input] _decimal.Context.to_integral = _decimal.Context.abs Identical to to_integral_value(x). [clinic start generated code]*/ static PyObject * _decimal_Context_to_integral_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=09f4823b90b2cf17 input=89d4a4b15495b8c9]*/ DecCtx_UnaryFunc(mpd_qround_to_int) /*[clinic input] _decimal.Context.sqrt = _decimal.Context.abs Square root of a non-negative number to context precision. [clinic start generated code]*/ static PyObject * _decimal_Context_sqrt_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=2b9c16c6f5ceead0 input=90bd954b0b8076fb]*/ DecCtx_UnaryFunc(mpd_qsqrt) /* Binary arithmetic functions */ /*[clinic input] _decimal.Context.add self as context: self cls: defining_class x: object y: object / Return the sum of x and y. [clinic start generated code]*/ static PyObject * _decimal_Context_add_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=ab4f0fb841e6a867 input=f2c74f6a845f62e9]*/ DecCtx_BinaryFunc(mpd_qadd) /*[clinic input] _decimal.Context.compare = _decimal.Context.add Compare x and y numerically. [clinic start generated code]*/ static PyObject * _decimal_Context_compare_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=56efd1faf653f1d7 input=f701cb179c966ec1]*/ DecCtx_BinaryFunc(mpd_qcompare) /*[clinic input] _decimal.Context.compare_signal = _decimal.Context.add Compare x and y numerically. All NaNs signal. [clinic start generated code]*/ static PyObject * _decimal_Context_compare_signal_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=7c1a9a9f6ae4e5cd input=32a1bcef7bbc5179]*/ DecCtx_BinaryFunc(mpd_qcompare_signal) /*[clinic input] _decimal.Context.divide = _decimal.Context.add Return x divided by y. [clinic start generated code]*/ static PyObject * _decimal_Context_divide_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=1a7924b20e24a528 input=00cd9bc2ba2a1786]*/ DecCtx_BinaryFunc(mpd_qdiv) /*[clinic input] _decimal.Context.divide_int = _decimal.Context.add Return x divided by y, truncated to an integer. [clinic start generated code]*/ static PyObject * _decimal_Context_divide_int_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=7a1d8948625105f0 input=e80ada2f50d9719d]*/ DecCtx_BinaryFunc(mpd_qdivint) /*[clinic input] _decimal.Context.max = _decimal.Context.add Compare the values numerically and return the maximum. [clinic start generated code]*/ static PyObject * _decimal_Context_max_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=cd54af10a51c11fc input=22008ab898c86a8b]*/ DecCtx_BinaryFunc(mpd_qmax) /*[clinic input] _decimal.Context.max_mag = _decimal.Context.add Compare the values numerically with their sign ignored. [clinic start generated code]*/ static PyObject * _decimal_Context_max_mag_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=1c812e73bcb7827f input=f7ce42ef82a7c52e]*/ DecCtx_BinaryFunc(mpd_qmax_mag) /*[clinic input] _decimal.Context.min = _decimal.Context.add Compare the values numerically and return the minimum. [clinic start generated code]*/ static PyObject * _decimal_Context_min_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=aa494e95b88107b3 input=2aeec1167638c5ef]*/ DecCtx_BinaryFunc(mpd_qmin) /*[clinic input] _decimal.Context.min_mag = _decimal.Context.add Compare the values numerically with their sign ignored. [clinic start generated code]*/ static PyObject * _decimal_Context_min_mag_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=ee0b69c1d9a14185 input=19d158c29e4fc140]*/ DecCtx_BinaryFunc(mpd_qmin_mag) /*[clinic input] _decimal.Context.multiply = _decimal.Context.add Return the product of x and y. [clinic start generated code]*/ static PyObject * _decimal_Context_multiply_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=45f33b805afa01a8 input=2fdd01acdbeef8ba]*/ DecCtx_BinaryFunc(mpd_qmul) /*[clinic input] _decimal.Context.next_toward = _decimal.Context.add Return the number closest to x, in the direction towards y. [clinic start generated code]*/ static PyObject * _decimal_Context_next_toward_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=436afff6f43edec2 input=aac775298e02b68c]*/ DecCtx_BinaryFunc(mpd_qnext_toward) /*[clinic input] _decimal.Context.quantize = _decimal.Context.add Return a value equal to x (rounded), having the exponent of y. [clinic start generated code]*/ static PyObject * _decimal_Context_quantize_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=fcf8cd32b7d628c9 input=43d67a696ab6d895]*/ DecCtx_BinaryFunc(mpd_qquantize) /*[clinic input] _decimal.Context.remainder = _decimal.Context.add Return the remainder from integer division. The sign of the result, if non-zero, is the same as that of the original dividend. [clinic start generated code]*/ static PyObject * _decimal_Context_remainder_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=e0f96c834abbfbd2 input=36d0eb2b392c1215]*/ DecCtx_BinaryFunc(mpd_qrem) /*[clinic input] _decimal.Context.remainder_near = _decimal.Context.add Return x - y * n. Here n is the integer nearest the exact value of x / y (if the result is 0 then its sign will be the sign of x). [clinic start generated code]*/ static PyObject * _decimal_Context_remainder_near_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=7f18c535a12cf8ac input=bafb6327bb314c5c]*/ DecCtx_BinaryFunc(mpd_qrem_near) /*[clinic input] _decimal.Context.subtract = _decimal.Context.add Return the difference between x and y. [clinic start generated code]*/ static PyObject * _decimal_Context_subtract_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=3d764a8a87e79401 input=6767683ec68f7a1a]*/ DecCtx_BinaryFunc(mpd_qsub) /*[clinic input] _decimal.Context.divmod self as context: self x: object y: object / Return quotient and remainder of the division x / y. [clinic start generated code]*/ static PyObject * _decimal_Context_divmod_impl(PyObject *context, PyObject *x, PyObject *y) /*[clinic end generated code: output=5dbf5410e3f302af input=4d8eee07823c752a]*/ { PyObject *a, *b; PyObject *q, *r; uint32_t status = 0; PyObject *ret; CONVERT_BINOP_RAISE(&a, &b, x, y, context); decimal_state *state = get_module_state_from_ctx(context); q = dec_alloc(state); if (q == NULL) { Py_DECREF(a); Py_DECREF(b); return NULL; } r = dec_alloc(state); if (r == NULL) { Py_DECREF(a); Py_DECREF(b); Py_DECREF(q); return NULL; } mpd_qdivmod(MPD(q), MPD(r), MPD(a), MPD(b), CTX(context), &status); Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(r); Py_DECREF(q); return NULL; } ret = PyTuple_Pack(2, q, r); Py_DECREF(r); Py_DECREF(q); return ret; } /* Binary or ternary arithmetic functions */ /*[clinic input] _decimal.Context.power self as context: self cls: defining_class a as base: object b as exp: object modulo as mod: object = None Compute a**b. If 'a' is negative, then 'b' must be integral. The result will be inexact unless 'a' is integral and the result is finite and can be expressed exactly in 'precision' digits. In the Python version the result is always correctly rounded, in the C version the result is almost always correctly rounded. If modulo is given, compute (a**b) % modulo. The following restrictions hold: * all three arguments must be integral * 'b' must be nonnegative * at least one of 'a' or 'b' must be nonzero * modulo must be nonzero and less than 10**prec in absolute value [clinic start generated code]*/ static PyObject * _decimal_Context_power_impl(PyObject *context, PyTypeObject *cls, PyObject *base, PyObject *exp, PyObject *mod) /*[clinic end generated code: output=d06d40c37cdd69dc input=2a70edd03317c666]*/ { PyObject *a, *b, *c = NULL; PyObject *result; uint32_t status = 0; CONVERT_BINOP_RAISE(&a, &b, base, exp, context); if (mod != Py_None) { if (!convert_op(TYPE_ERR, &c, mod, context)) { Py_DECREF(a); Py_DECREF(b); return c; } } decimal_state *state = PyType_GetModuleState(cls); result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); Py_DECREF(b); Py_XDECREF(c); return NULL; } if (c == NULL) { mpd_qpow(MPD(result), MPD(a), MPD(b), CTX(context), &status); } else { mpd_qpowmod(MPD(result), MPD(a), MPD(b), MPD(c), CTX(context), &status); Py_DECREF(c); } Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /* Ternary arithmetic functions */ /*[clinic input] _decimal.Context.fma self as context: self cls: defining_class x: object y: object z: object / Return x multiplied by y, plus z. [clinic start generated code]*/ static PyObject * _decimal_Context_fma_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y, PyObject *z) /*[clinic end generated code: output=08ec3cefc59d71a9 input=da3963b1a1da83b9]*/ DecCtx_TernaryFunc(mpd_qfma) /* No argument */ /*[clinic input] _decimal.Context.radix self as context: self cls: defining_class Return 10. [clinic start generated code]*/ static PyObject * _decimal_Context_radix_impl(PyObject *context, PyTypeObject *cls) /*[clinic end generated code: output=674b88b7cd0c264d input=e1e4f8c0abf86825]*/ { decimal_state *state = PyType_GetModuleState(cls); return _dec_mpd_radix(state); } /* Boolean functions: single decimal argument */ /*[clinic input] _decimal.Context.is_normal self as context: self cls: defining_class x: object / Return True if x is a normal number, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_normal_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=089c5609db60bf57 input=7c90b825a517ef7e]*/ DecCtx_BoolFunc(mpd_isnormal) /*[clinic input] _decimal.Context.is_subnormal = _decimal.Context.is_normal Return True if x is subnormal, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_subnormal_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=f58c45a288aadeda input=73f1bd9367b913a4]*/ DecCtx_BoolFunc(mpd_issubnormal) /*[clinic input] _decimal.Context.is_finite = _decimal.Context.is_normal Return True if x is finite, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_finite_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=dfb00f1b5589b9f0 input=abff92a8a6bb85e6]*/ DecCtx_BoolFunc_NO_CTX(mpd_isfinite) /*[clinic input] _decimal.Context.is_infinite = _decimal.Context.is_normal Return True if x is infinite, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_infinite_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=1c28517500811d01 input=591242ae9a1e60e6]*/ DecCtx_BoolFunc_NO_CTX(mpd_isinfinite) /*[clinic input] _decimal.Context.is_nan = _decimal.Context.is_normal Return True if x is a qNaN or sNaN, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_nan_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=9dc15463ee19864a input=520218376d5eec5e]*/ DecCtx_BoolFunc_NO_CTX(mpd_isnan) /*[clinic input] _decimal.Context.is_qnan = _decimal.Context.is_normal Return True if x is a quiet NaN, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_qnan_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=4caa672e03703b6d input=97d06a14ab3360d1]*/ DecCtx_BoolFunc_NO_CTX(mpd_isqnan) /*[clinic input] _decimal.Context.is_snan = _decimal.Context.is_normal Return True if x is a signaling NaN, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_snan_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=a8caa929d9f82ecd input=0059fe4e9c3b25a8]*/ DecCtx_BoolFunc_NO_CTX(mpd_issnan) /*[clinic input] _decimal.Context.is_signed = _decimal.Context.is_normal Return True if x is negative, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_signed_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=42c450c99d4fe7db input=b950cd697721ab8b]*/ DecCtx_BoolFunc_NO_CTX(mpd_issigned) /*[clinic input] _decimal.Context.is_zero = _decimal.Context.is_normal Return True if x is a zero, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_zero_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=e6c55359b7241d9e input=bf08197d142a8027]*/ DecCtx_BoolFunc_NO_CTX(mpd_iszero) /*[clinic input] _decimal.Context.is_canonical = _decimal.Context.is_normal Return True if x is canonical, False otherwise. [clinic start generated code]*/ static PyObject * _decimal_Context_is_canonical_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=18ee249d9aec957c input=1bf2129808e55eb9]*/ { decimal_state *state = PyType_GetModuleState(cls); if (!PyDec_Check(state, x)) { PyErr_SetString(PyExc_TypeError, "argument must be a Decimal"); return NULL; } return mpd_iscanonical(MPD(x)) ? incr_true() : incr_false(); } /* Functions with a single decimal argument */ /*[clinic input] _decimal.Context._apply = _decimal.Context.is_normal Apply self to Decimal x. [clinic start generated code]*/ static PyObject * _decimal_Context__apply_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=c6b542f4e8114b97 input=12b34468ca4a4c30]*/ { PyObject *result, *a; CONVERT_OP_RAISE(&a, x, context); result = dec_apply(a, context); Py_DECREF(a); return result; } #ifdef EXTRA_FUNCTIONALITY /*[clinic input] _decimal.Context.apply = _decimal.Context._apply Apply self to Decimal x. [clinic start generated code]*/ static PyObject * _decimal_Context_apply_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=f8a7142d47ad4ff3 input=388e66ca82733516]*/ { return _decimal_Context__apply(context, x); } #endif /*[clinic input] _decimal.Context.canonical = _decimal.Context.is_normal Return a new instance of x. [clinic start generated code]*/ static PyObject * _decimal_Context_canonical_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=f213e433e2032e5e input=025ecb106ac15bff]*/ { decimal_state *state = PyType_GetModuleState(cls); if (!PyDec_Check(state, x)) { PyErr_SetString(PyExc_TypeError, "argument must be a Decimal"); return NULL; } return Py_NewRef(x); } /*[clinic input] _decimal.Context.copy_abs = _decimal.Context.is_normal Return a copy of x with the sign set to 0. [clinic start generated code]*/ static PyObject * _decimal_Context_copy_abs_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=a141ad4b9afe2deb input=4aa2f612625f0f73]*/ { PyObject *result, *a; uint32_t status = 0; CONVERT_OP_RAISE(&a, x, context); decimal_state *state = PyType_GetModuleState(cls); result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); return NULL; } mpd_qcopy_abs(MPD(result), MPD(a), &status); Py_DECREF(a); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /*[clinic input] _decimal.Context.copy_decimal = _decimal.Context.is_normal Return a copy of Decimal x. [clinic start generated code]*/ static PyObject * _decimal_Context_copy_decimal_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=639a82e1193d31f6 input=4db4f942f45fb7c9]*/ { PyObject *result; CONVERT_OP_RAISE(&result, x, context); return result; } /*[clinic input] _decimal.Context.copy_negate = _decimal.Context.is_normal Return a copy of x with the sign inverted. [clinic start generated code]*/ static PyObject * _decimal_Context_copy_negate_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=e49d013489dc252b input=2e6e213e2ed0efda]*/ { PyObject *result, *a; uint32_t status = 0; CONVERT_OP_RAISE(&a, x, context); decimal_state *state = PyType_GetModuleState(cls); result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); return NULL; } mpd_qcopy_negate(MPD(result), MPD(a), &status); Py_DECREF(a); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /*[clinic input] _decimal.Context.logb = _decimal.Context.abs Return the exponent of the magnitude of the operand's MSD. [clinic start generated code]*/ static PyObject * _decimal_Context_logb_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=9b9697e1eb68093f input=28d1cd1a8a906b9a]*/ DecCtx_UnaryFunc(mpd_qlogb) /*[clinic input] _decimal.Context.logical_invert = _decimal.Context.abs Invert all the digits in the operand. The operand must be a logical number. >>> ExtendedContext.logical_invert(Decimal('0')) Decimal('111111111') >>> ExtendedContext.logical_invert(Decimal('1')) Decimal('111111110') >>> ExtendedContext.logical_invert(Decimal('111111111')) Decimal('0') >>> ExtendedContext.logical_invert(Decimal('101010101')) Decimal('10101010') >>> ExtendedContext.logical_invert(1101) Decimal('111110010') [clinic start generated code]*/ static PyObject * _decimal_Context_logical_invert_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=97760277a958e2b0 input=8e568f4c745ab596]*/ DecCtx_UnaryFunc(mpd_qinvert) /*[clinic input] _decimal.Context.number_class = _decimal.Context.is_normal Return an indication of the class of x. [clinic start generated code]*/ static PyObject * _decimal_Context_number_class_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=c1592a23e25ba5ee input=1ead8462f1800e4e]*/ { PyObject *a; const char *cp; CONVERT_OP_RAISE(&a, x, context); cp = mpd_class(MPD(a), CTX(context)); Py_DECREF(a); return PyUnicode_FromString(cp); } /*[clinic input] _decimal.Context.to_sci_string = _decimal.Context.is_normal Convert a number to a string using scientific notation. [clinic start generated code]*/ static PyObject * _decimal_Context_to_sci_string_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=092dcdef999d72da input=ed442677c66d342d]*/ { PyObject *result; PyObject *a; mpd_ssize_t size; char *s; CONVERT_OP_RAISE(&a, x, context); size = mpd_to_sci_size(&s, MPD(a), CtxCaps(context)); Py_DECREF(a); if (size < 0) { PyErr_NoMemory(); return NULL; } result = unicode_fromascii(s, size); mpd_free(s); return result; } /*[clinic input] _decimal.Context.to_eng_string = _decimal.Context.is_normal Convert a number to a string, using engineering notation. [clinic start generated code]*/ static PyObject * _decimal_Context_to_eng_string_impl(PyObject *context, PyTypeObject *cls, PyObject *x) /*[clinic end generated code: output=7fc53216c208f487 input=a574385e2e3e3bc0]*/ { PyObject *result; PyObject *a; mpd_ssize_t size; char *s; CONVERT_OP_RAISE(&a, x, context); size = mpd_to_eng_size(&s, MPD(a), CtxCaps(context)); Py_DECREF(a); if (size < 0) { PyErr_NoMemory(); return NULL; } result = unicode_fromascii(s, size); mpd_free(s); return result; } /* Functions with two decimal arguments */ /*[clinic input] _decimal.Context.compare_total self as context: self cls: defining_class x: object y: object / Compare x and y using their abstract representation. [clinic start generated code]*/ static PyObject * _decimal_Context_compare_total_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=f79177b27fe930e3 input=2bfc677a841e297a]*/ DecCtx_BinaryFunc_NO_CTX(mpd_compare_total) /*[clinic input] _decimal.Context.compare_total_mag = _decimal.Context.compare_total Compare x and y using their abstract representation, ignoring sign. [clinic start generated code]*/ static PyObject * _decimal_Context_compare_total_mag_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=2528c669ccd6d6ff input=2b982e69f932dcb2]*/ DecCtx_BinaryFunc_NO_CTX(mpd_compare_total_mag) /*[clinic input] _decimal.Context.copy_sign = _decimal.Context.compare_total Copy the sign from y to x. [clinic start generated code]*/ static PyObject * _decimal_Context_copy_sign_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=77d23b6f4e42120c input=c0682aeaffc7cfdf]*/ { PyObject *a, *b; PyObject *result; uint32_t status = 0; CONVERT_BINOP_RAISE(&a, &b, x, y, context); decimal_state *state = PyType_GetModuleState(cls); result = dec_alloc(state); if (result == NULL) { Py_DECREF(a); Py_DECREF(b); return NULL; } mpd_qcopy_sign(MPD(result), MPD(a), MPD(b), &status); Py_DECREF(a); Py_DECREF(b); if (dec_addstatus(context, status)) { Py_DECREF(result); return NULL; } return result; } /*[clinic input] _decimal.Context.logical_and = _decimal.Context.add Applies the logical operation 'and' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) Decimal('1000') >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) Decimal('10') >>> ExtendedContext.logical_and(110, 1101) Decimal('100') >>> ExtendedContext.logical_and(Decimal(110), 1101) Decimal('100') >>> ExtendedContext.logical_and(110, Decimal(1101)) Decimal('100') [clinic start generated code]*/ static PyObject * _decimal_Context_logical_and_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=009dfa08ecaa2ac8 input=bcb7d3d6ab7530de]*/ DecCtx_BinaryFunc(mpd_qand) /*[clinic input] _decimal.Context.logical_or = _decimal.Context.add Applies the logical operation 'or' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) Decimal('1110') >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) Decimal('1110') >>> ExtendedContext.logical_or(110, 1101) Decimal('1111') >>> ExtendedContext.logical_or(Decimal(110), 1101) Decimal('1111') >>> ExtendedContext.logical_or(110, Decimal(1101)) Decimal('1111') [clinic start generated code]*/ static PyObject * _decimal_Context_logical_or_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=eb38617e8d31bf12 input=47b45d296fb90846]*/ DecCtx_BinaryFunc(mpd_qor) /*[clinic input] _decimal.Context.logical_xor = _decimal.Context.add Applies the logical operation 'xor' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) Decimal('110') >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) Decimal('1101') >>> ExtendedContext.logical_xor(110, 1101) Decimal('1011') >>> ExtendedContext.logical_xor(Decimal(110), 1101) Decimal('1011') >>> ExtendedContext.logical_xor(110, Decimal(1101)) Decimal('1011') [clinic start generated code]*/ static PyObject * _decimal_Context_logical_xor_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=23cd81fdcd865d5a input=fcaaf828c1d2d089]*/ DecCtx_BinaryFunc(mpd_qxor) /*[clinic input] _decimal.Context.rotate = _decimal.Context.add Return a copy of x, rotated by y places. [clinic start generated code]*/ static PyObject * _decimal_Context_rotate_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=3d5b3cfcb4659432 input=7ad91845c909eb0a]*/ DecCtx_BinaryFunc(mpd_qrotate) /*[clinic input] _decimal.Context.scaleb = _decimal.Context.add Return the first operand after adding the second value to its exp. [clinic start generated code]*/ static PyObject * _decimal_Context_scaleb_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=795ac61bcbe61c67 input=c5d2ee7a57f65f8c]*/ DecCtx_BinaryFunc(mpd_qscaleb) /*[clinic input] _decimal.Context.shift = _decimal.Context.add Return a copy of x, shifted by y places. [clinic start generated code]*/ static PyObject * _decimal_Context_shift_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=43d69615f0271c81 input=1ab44ff0854420ce]*/ DecCtx_BinaryFunc(mpd_qshift) /*[clinic input] _decimal.Context.same_quantum = _decimal.Context.add Return True if the two operands have the same exponent. [clinic start generated code]*/ static PyObject * _decimal_Context_same_quantum_impl(PyObject *context, PyTypeObject *cls, PyObject *x, PyObject *y) /*[clinic end generated code: output=91a4d8325f98d9e9 input=194cd156e398eaf9]*/ { PyObject *a, *b; PyObject *result; CONVERT_BINOP_RAISE(&a, &b, x, y, context); result = mpd_same_quantum(MPD(a), MPD(b)) ? incr_true() : incr_false(); Py_DECREF(a); Py_DECREF(b); return result; } static PyMethodDef context_methods [] = { /* Unary arithmetic functions */ _DECIMAL_CONTEXT_ABS_METHODDEF _DECIMAL_CONTEXT_EXP_METHODDEF _DECIMAL_CONTEXT_LN_METHODDEF _DECIMAL_CONTEXT_LOG10_METHODDEF _DECIMAL_CONTEXT_MINUS_METHODDEF _DECIMAL_CONTEXT_NEXT_MINUS_METHODDEF _DECIMAL_CONTEXT_NEXT_PLUS_METHODDEF _DECIMAL_CONTEXT_NORMALIZE_METHODDEF _DECIMAL_CONTEXT_PLUS_METHODDEF _DECIMAL_CONTEXT_TO_INTEGRAL_METHODDEF _DECIMAL_CONTEXT_TO_INTEGRAL_EXACT_METHODDEF _DECIMAL_CONTEXT_TO_INTEGRAL_VALUE_METHODDEF _DECIMAL_CONTEXT_SQRT_METHODDEF /* Binary arithmetic functions */ _DECIMAL_CONTEXT_ADD_METHODDEF _DECIMAL_CONTEXT_COMPARE_METHODDEF _DECIMAL_CONTEXT_COMPARE_SIGNAL_METHODDEF _DECIMAL_CONTEXT_DIVIDE_METHODDEF _DECIMAL_CONTEXT_DIVIDE_INT_METHODDEF _DECIMAL_CONTEXT_DIVMOD_METHODDEF _DECIMAL_CONTEXT_MAX_METHODDEF _DECIMAL_CONTEXT_MAX_MAG_METHODDEF _DECIMAL_CONTEXT_MIN_METHODDEF _DECIMAL_CONTEXT_MIN_MAG_METHODDEF _DECIMAL_CONTEXT_MULTIPLY_METHODDEF _DECIMAL_CONTEXT_NEXT_TOWARD_METHODDEF _DECIMAL_CONTEXT_QUANTIZE_METHODDEF _DECIMAL_CONTEXT_REMAINDER_METHODDEF _DECIMAL_CONTEXT_REMAINDER_NEAR_METHODDEF _DECIMAL_CONTEXT_SUBTRACT_METHODDEF /* Binary or ternary arithmetic functions */ _DECIMAL_CONTEXT_POWER_METHODDEF /* Ternary arithmetic functions */ _DECIMAL_CONTEXT_FMA_METHODDEF /* No argument */ _DECIMAL_CONTEXT_ETINY_METHODDEF _DECIMAL_CONTEXT_ETOP_METHODDEF _DECIMAL_CONTEXT_RADIX_METHODDEF /* Boolean functions */ _DECIMAL_CONTEXT_IS_CANONICAL_METHODDEF _DECIMAL_CONTEXT_IS_FINITE_METHODDEF _DECIMAL_CONTEXT_IS_INFINITE_METHODDEF _DECIMAL_CONTEXT_IS_NAN_METHODDEF _DECIMAL_CONTEXT_IS_NORMAL_METHODDEF _DECIMAL_CONTEXT_IS_QNAN_METHODDEF _DECIMAL_CONTEXT_IS_SIGNED_METHODDEF _DECIMAL_CONTEXT_IS_SNAN_METHODDEF _DECIMAL_CONTEXT_IS_SUBNORMAL_METHODDEF _DECIMAL_CONTEXT_IS_ZERO_METHODDEF /* Functions with a single decimal argument */ _DECIMAL_CONTEXT__APPLY_METHODDEF _DECIMAL_CONTEXT_APPLY_METHODDEF _DECIMAL_CONTEXT_CANONICAL_METHODDEF _DECIMAL_CONTEXT_COPY_ABS_METHODDEF _DECIMAL_CONTEXT_COPY_DECIMAL_METHODDEF _DECIMAL_CONTEXT_COPY_NEGATE_METHODDEF _DECIMAL_CONTEXT_LOGB_METHODDEF _DECIMAL_CONTEXT_LOGICAL_INVERT_METHODDEF _DECIMAL_CONTEXT_NUMBER_CLASS_METHODDEF _DECIMAL_CONTEXT_TO_SCI_STRING_METHODDEF _DECIMAL_CONTEXT_TO_ENG_STRING_METHODDEF /* Functions with two decimal arguments */ _DECIMAL_CONTEXT_COMPARE_TOTAL_METHODDEF _DECIMAL_CONTEXT_COMPARE_TOTAL_MAG_METHODDEF _DECIMAL_CONTEXT_COPY_SIGN_METHODDEF _DECIMAL_CONTEXT_LOGICAL_AND_METHODDEF _DECIMAL_CONTEXT_LOGICAL_OR_METHODDEF _DECIMAL_CONTEXT_LOGICAL_XOR_METHODDEF _DECIMAL_CONTEXT_ROTATE_METHODDEF _DECIMAL_CONTEXT_SAME_QUANTUM_METHODDEF _DECIMAL_CONTEXT_SCALEB_METHODDEF _DECIMAL_CONTEXT_SHIFT_METHODDEF /* Set context values */ _DECIMAL_CONTEXT_CLEAR_FLAGS_METHODDEF _DECIMAL_CONTEXT_CLEAR_TRAPS_METHODDEF /* Unsafe set functions with relaxed range checks */ _DECIMAL_CONTEXT__UNSAFE_SETPREC_METHODDEF _DECIMAL_CONTEXT__UNSAFE_SETEMIN_METHODDEF _DECIMAL_CONTEXT__UNSAFE_SETEMAX_METHODDEF /* Miscellaneous */ _DECIMAL_CONTEXT___COPY___METHODDEF _DECIMAL_CONTEXT___REDUCE___METHODDEF _DECIMAL_CONTEXT_COPY_METHODDEF _DECIMAL_CONTEXT_CREATE_DECIMAL_METHODDEF _DECIMAL_CONTEXT_CREATE_DECIMAL_FROM_FLOAT_METHODDEF { NULL, NULL, 1 } }; static PyType_Slot context_slots[] = { {Py_tp_token, Py_TP_USE_SPEC}, {Py_tp_dealloc, context_dealloc}, {Py_tp_traverse, context_traverse}, {Py_tp_clear, context_clear}, {Py_tp_repr, context_repr}, {Py_tp_getattro, context_getattr}, {Py_tp_setattro, context_setattr}, {Py_tp_doc, (void *)context_init__doc__}, {Py_tp_methods, context_methods}, {Py_tp_getset, context_getsets}, {Py_tp_init, context_init}, {Py_tp_new, context_new}, {0, NULL}, }; static PyType_Spec context_spec = { .name = "decimal.Context", .basicsize = sizeof(PyDecContextObject), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = context_slots, }; static PyObject * decimal_getattr(PyObject *self, PyObject *args) { PyObject *name; if (!PyArg_UnpackTuple(args, "__getattr__", 1, 1, &name)) { return NULL; } if (PyUnicode_Check(name) && PyUnicode_EqualToUTF8(name, "__version__")) { if (PyErr_WarnEx(PyExc_DeprecationWarning, "'__version__' is deprecated and slated for removal in Python 3.20", 1) < 0) { return NULL; } return PyUnicode_FromString(MPD_SPEC_VERSION); } PyErr_Format(PyExc_AttributeError, "module 'decimal' has no attribute %R", name); return NULL; } static PyMethodDef _decimal_methods [] = { _DECIMAL_GETCONTEXT_METHODDEF _DECIMAL_SETCONTEXT_METHODDEF _DECIMAL_LOCALCONTEXT_METHODDEF _DECIMAL_IEEECONTEXT_METHODDEF {"__getattr__", decimal_getattr, METH_VARARGS, "Module __getattr__"}, { NULL, NULL, 1, NULL } }; struct ssize_constmap { const char *name; mpd_ssize_t val; }; static struct ssize_constmap ssize_constants [] = { {"MAX_PREC", MPD_MAX_PREC}, {"MAX_EMAX", MPD_MAX_EMAX}, {"MIN_EMIN", MPD_MIN_EMIN}, {"MIN_ETINY", MPD_MIN_ETINY}, {NULL} }; struct int_constmap { const char *name; int val; }; static struct int_constmap int_constants [] = { /* int constants */ {"IEEE_CONTEXT_MAX_BITS", MPD_IEEE_CONTEXT_MAX_BITS}, #ifdef EXTRA_FUNCTIONALITY {"DECIMAL32", MPD_DECIMAL32}, {"DECIMAL64", MPD_DECIMAL64}, {"DECIMAL128", MPD_DECIMAL128}, /* int condition flags */ {"DecClamped", MPD_Clamped}, {"DecConversionSyntax", MPD_Conversion_syntax}, {"DecDivisionByZero", MPD_Division_by_zero}, {"DecDivisionImpossible", MPD_Division_impossible}, {"DecDivisionUndefined", MPD_Division_undefined}, {"DecFpuError", MPD_Fpu_error}, {"DecInexact", MPD_Inexact}, {"DecInvalidContext", MPD_Invalid_context}, {"DecInvalidOperation", MPD_Invalid_operation}, {"DecIEEEInvalidOperation", MPD_IEEE_Invalid_operation}, {"DecMallocError", MPD_Malloc_error}, {"DecFloatOperation", MPD_Float_operation}, {"DecOverflow", MPD_Overflow}, {"DecRounded", MPD_Rounded}, {"DecSubnormal", MPD_Subnormal}, {"DecUnderflow", MPD_Underflow}, {"DecErrors", MPD_Errors}, {"DecTraps", MPD_Traps}, #endif {NULL} }; #define CHECK_INT(expr) \ do { if ((expr) < 0) goto error; } while (0) #define ASSIGN_PTR(result, expr) \ do { result = (expr); if (result == NULL) goto error; } while (0) #define CHECK_PTR(expr) \ do { if ((expr) == NULL) goto error; } while (0) static PyCFunction cfunc_noargs(PyTypeObject *t, const char *name) { struct PyMethodDef *m; if (t->tp_methods == NULL) { goto error; } for (m = t->tp_methods; m->ml_name != NULL; m++) { if (strcmp(name, m->ml_name) == 0) { if (!(m->ml_flags & METH_NOARGS)) { goto error; } return m->ml_meth; } } error: PyErr_Format(PyExc_RuntimeError, "internal error: could not find method %s", name); return NULL; } static int minalloc_is_set = 0; static int _decimal_exec(PyObject *m) { PyObject *numbers = NULL; PyObject *Number = NULL; PyObject *collections = NULL; PyObject *collections_abc = NULL; PyObject *MutableMapping = NULL; PyObject *obj = NULL; DecCondMap *cm; struct ssize_constmap *ssize_cm; struct int_constmap *int_cm; int i; /* Init libmpdec */ mpd_traphandler = dec_traphandler; mpd_mallocfunc = PyMem_Malloc; mpd_reallocfunc = PyMem_Realloc; mpd_callocfunc = mpd_callocfunc_em; mpd_free = PyMem_Free; /* Suppress the warning caused by multi-phase initialization */ if (!minalloc_is_set) { mpd_setminalloc(_Py_DEC_MINALLOC); minalloc_is_set = 1; } decimal_state *state = get_module_state(m); /* Init external C-API functions */ state->_py_long_multiply = PyLong_Type.tp_as_number->nb_multiply; state->_py_long_floor_divide = PyLong_Type.tp_as_number->nb_floor_divide; state->_py_long_power = PyLong_Type.tp_as_number->nb_power; state->_py_float_abs = PyFloat_Type.tp_as_number->nb_absolute; ASSIGN_PTR(state->_py_float_as_integer_ratio, cfunc_noargs(&PyFloat_Type, "as_integer_ratio")); ASSIGN_PTR(state->_py_long_bit_length, cfunc_noargs(&PyLong_Type, "bit_length")); /* Init types */ #define CREATE_TYPE(mod, tp, spec) do { \ tp = (PyTypeObject *)PyType_FromMetaclass(NULL, mod, spec, NULL); \ CHECK_PTR(tp); \ } while (0) CREATE_TYPE(m, state->PyDec_Type, &dec_spec); CREATE_TYPE(m, state->PyDecContext_Type, &context_spec); CREATE_TYPE(m, state->PyDecContextManager_Type, &ctxmanager_spec); CREATE_TYPE(m, state->PyDecSignalDictMixin_Type, &signaldict_spec); #undef CREATE_TYPE ASSIGN_PTR(obj, PyUnicode_FromString("decimal")); CHECK_INT(PyDict_SetItemString(state->PyDec_Type->tp_dict, "__module__", obj)); CHECK_INT(PyDict_SetItemString(state->PyDecContext_Type->tp_dict, "__module__", obj)); Py_CLEAR(obj); /* Numeric abstract base classes */ ASSIGN_PTR(numbers, PyImport_ImportModule("numbers")); ASSIGN_PTR(Number, PyObject_GetAttrString(numbers, "Number")); /* Register Decimal with the Number abstract base class */ ASSIGN_PTR(obj, PyObject_CallMethod(Number, "register", "(O)", (PyObject *)state->PyDec_Type)); Py_CLEAR(obj); /* Rational is a global variable used for fraction comparisons. */ ASSIGN_PTR(state->Rational, PyObject_GetAttrString(numbers, "Rational")); /* Done with numbers, Number */ Py_CLEAR(numbers); Py_CLEAR(Number); /* DecimalTuple */ ASSIGN_PTR(collections, PyImport_ImportModule("collections")); ASSIGN_PTR(obj, PyObject_CallMethod(collections, "namedtuple", "(ss)", "DecimalTuple", "sign digits exponent")); if (!PyType_Check(obj)) { PyErr_SetString(PyExc_TypeError, "type is expected from namedtuple call"); goto error; } ASSIGN_PTR(state->DecimalTuple, (PyTypeObject *)obj); ASSIGN_PTR(obj, PyUnicode_FromString("decimal")); CHECK_INT(PyDict_SetItemString(state->DecimalTuple->tp_dict, "__module__", obj)); Py_CLEAR(obj); /* MutableMapping */ ASSIGN_PTR(collections_abc, PyImport_ImportModule("collections.abc")); ASSIGN_PTR(MutableMapping, PyObject_GetAttrString(collections_abc, "MutableMapping")); /* Create SignalDict type */ ASSIGN_PTR(state->PyDecSignalDict_Type, (PyTypeObject *)PyObject_CallFunction( (PyObject *)&PyType_Type, "s(OO){}", "SignalDict", state->PyDecSignalDictMixin_Type, MutableMapping)); /* Done with collections, MutableMapping */ Py_CLEAR(collections); Py_CLEAR(collections_abc); Py_CLEAR(MutableMapping); /* For format specifiers not yet supported by libmpdec */ state->PyDecimal = NULL; /* Add types to the module */ CHECK_INT(PyModule_AddType(m, state->PyDec_Type)); CHECK_INT(PyModule_AddType(m, state->PyDecContext_Type)); CHECK_INT(PyModule_AddType(m, state->DecimalTuple)); /* Create top level exception */ ASSIGN_PTR(state->DecimalException, PyErr_NewException( "decimal.DecimalException", PyExc_ArithmeticError, NULL)); CHECK_INT(PyModule_AddType(m, (PyTypeObject *)state->DecimalException)); /* Create signal tuple */ ASSIGN_PTR(state->SignalTuple, PyTuple_New(SIGNAL_MAP_LEN)); /* Add exceptions that correspond to IEEE signals */ ASSIGN_PTR(state->signal_map, dec_cond_map_init(signal_map_template, sizeof(signal_map_template))); for (i = SIGNAL_MAP_LEN-1; i >= 0; i--) { PyObject *base; cm = state->signal_map + i; switch (cm->flag) { case MPD_Float_operation: base = PyTuple_Pack(2, state->DecimalException, PyExc_TypeError); break; case MPD_Division_by_zero: base = PyTuple_Pack(2, state->DecimalException, PyExc_ZeroDivisionError); break; case MPD_Overflow: base = PyTuple_Pack(2, state->signal_map[INEXACT].ex, state->signal_map[ROUNDED].ex); break; case MPD_Underflow: base = PyTuple_Pack(3, state->signal_map[INEXACT].ex, state->signal_map[ROUNDED].ex, state->signal_map[SUBNORMAL].ex); break; default: base = PyTuple_Pack(1, state->DecimalException); break; } if (base == NULL) { goto error; /* GCOV_NOT_REACHED */ } ASSIGN_PTR(cm->ex, PyErr_NewException(cm->fqname, base, NULL)); Py_DECREF(base); /* add to module */ CHECK_INT(PyModule_AddObjectRef(m, cm->name, cm->ex)); /* add to signal tuple */ PyTuple_SET_ITEM(state->SignalTuple, i, Py_NewRef(cm->ex)); } /* * Unfortunately, InvalidOperation is a signal that comprises * several conditions, including InvalidOperation! Naming the * signal IEEEInvalidOperation would prevent the confusion. */ ASSIGN_PTR(state->cond_map, dec_cond_map_init(cond_map_template, sizeof(cond_map_template))); state->cond_map[0].ex = state->signal_map[0].ex; /* Add remaining exceptions, inherit from InvalidOperation */ for (cm = state->cond_map+1; cm->name != NULL; cm++) { PyObject *base; if (cm->flag == MPD_Division_undefined) { base = PyTuple_Pack(2, state->signal_map[0].ex, PyExc_ZeroDivisionError); } else { base = PyTuple_Pack(1, state->signal_map[0].ex); } if (base == NULL) { goto error; /* GCOV_NOT_REACHED */ } ASSIGN_PTR(cm->ex, PyErr_NewException(cm->fqname, base, NULL)); Py_DECREF(base); CHECK_INT(PyModule_AddObjectRef(m, cm->name, cm->ex)); } /* Init default context template first */ ASSIGN_PTR(state->default_context_template, PyObject_CallObject((PyObject *)state->PyDecContext_Type, NULL)); CHECK_INT(PyModule_AddObjectRef(m, "DefaultContext", state->default_context_template)); #ifndef WITH_DECIMAL_CONTEXTVAR ASSIGN_PTR(state->tls_context_key, PyUnicode_FromString("___DECIMAL_CTX__")); CHECK_INT(PyModule_AddObjectRef(m, "HAVE_CONTEXTVAR", Py_False)); #else ASSIGN_PTR(state->current_context_var, PyContextVar_New("decimal_context", NULL)); CHECK_INT(PyModule_AddObjectRef(m, "HAVE_CONTEXTVAR", Py_True)); #endif CHECK_INT(PyModule_AddObjectRef(m, "HAVE_THREADS", Py_True)); /* Init basic context template */ ASSIGN_PTR(state->basic_context_template, PyObject_CallObject((PyObject *)state->PyDecContext_Type, NULL)); init_basic_context(state->basic_context_template); CHECK_INT(PyModule_AddObjectRef(m, "BasicContext", state->basic_context_template)); /* Init extended context template */ ASSIGN_PTR(state->extended_context_template, PyObject_CallObject((PyObject *)state->PyDecContext_Type, NULL)); init_extended_context(state->extended_context_template); CHECK_INT(PyModule_AddObjectRef(m, "ExtendedContext", state->extended_context_template)); /* Init mpd_ssize_t constants */ for (ssize_cm = ssize_constants; ssize_cm->name != NULL; ssize_cm++) { CHECK_INT(PyModule_Add(m, ssize_cm->name, PyLong_FromSsize_t(ssize_cm->val))); } /* Init int constants */ for (int_cm = int_constants; int_cm->name != NULL; int_cm++) { CHECK_INT(PyModule_AddIntConstant(m, int_cm->name, int_cm->val)); } /* Init string constants */ for (i = 0; i < _PY_DEC_ROUND_GUARD; i++) { ASSIGN_PTR(state->round_map[i], PyUnicode_InternFromString(mpd_round_string[i])); CHECK_INT(PyModule_AddObjectRef(m, mpd_round_string[i], state->round_map[i])); } /* Add specification version number */ CHECK_INT(PyModule_AddStringConstant(m, "SPEC_VERSION", MPD_SPEC_VERSION)); CHECK_INT(PyModule_AddStringConstant(m, "__libmpdec_version__", mpd_version())); return 0; error: Py_CLEAR(obj); /* GCOV_NOT_REACHED */ Py_CLEAR(numbers); /* GCOV_NOT_REACHED */ Py_CLEAR(Number); /* GCOV_NOT_REACHED */ Py_CLEAR(collections); /* GCOV_NOT_REACHED */ Py_CLEAR(collections_abc); /* GCOV_NOT_REACHED */ Py_CLEAR(MutableMapping); /* GCOV_NOT_REACHED */ return -1; } static int decimal_traverse(PyObject *module, visitproc visit, void *arg) { decimal_state *state = get_module_state(module); Py_VISIT(state->PyDecContextManager_Type); Py_VISIT(state->PyDecContext_Type); Py_VISIT(state->PyDecSignalDictMixin_Type); Py_VISIT(state->PyDec_Type); Py_VISIT(state->PyDecSignalDict_Type); Py_VISIT(state->DecimalTuple); Py_VISIT(state->DecimalException); #ifndef WITH_DECIMAL_CONTEXTVAR Py_VISIT(state->tls_context_key); Py_VISIT(state->cached_context); #else Py_VISIT(state->current_context_var); #endif Py_VISIT(state->default_context_template); Py_VISIT(state->basic_context_template); Py_VISIT(state->extended_context_template); Py_VISIT(state->Rational); Py_VISIT(state->SignalTuple); if (state->signal_map != NULL) { for (DecCondMap *cm = state->signal_map; cm->name != NULL; cm++) { Py_VISIT(cm->ex); } } if (state->cond_map != NULL) { for (DecCondMap *cm = state->cond_map + 1; cm->name != NULL; cm++) { Py_VISIT(cm->ex); } } return 0; } static int decimal_clear(PyObject *module) { decimal_state *state = get_module_state(module); Py_CLEAR(state->PyDecContextManager_Type); Py_CLEAR(state->PyDecContext_Type); Py_CLEAR(state->PyDecSignalDictMixin_Type); Py_CLEAR(state->PyDec_Type); Py_CLEAR(state->PyDecSignalDict_Type); Py_CLEAR(state->DecimalTuple); Py_CLEAR(state->DecimalException); #ifndef WITH_DECIMAL_CONTEXTVAR Py_CLEAR(state->tls_context_key); Py_CLEAR(state->cached_context); #else Py_CLEAR(state->current_context_var); #endif Py_CLEAR(state->default_context_template); Py_CLEAR(state->basic_context_template); Py_CLEAR(state->extended_context_template); Py_CLEAR(state->Rational); Py_CLEAR(state->SignalTuple); Py_CLEAR(state->PyDecimal); if (state->signal_map != NULL) { for (DecCondMap *cm = state->signal_map; cm->name != NULL; cm++) { Py_DECREF(cm->ex); } PyMem_Free(state->signal_map); state->signal_map = NULL; } if (state->cond_map != NULL) { // cond_map[0].ex has borrowed a reference from signal_map[0].ex for (DecCondMap *cm = state->cond_map + 1; cm->name != NULL; cm++) { Py_DECREF(cm->ex); } PyMem_Free(state->cond_map); state->cond_map = NULL; } return 0; } static void decimal_free(void *module) { (void)decimal_clear((PyObject *)module); } static struct PyModuleDef_Slot _decimal_slots[] = { {Py_mod_exec, _decimal_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL}, }; static struct PyModuleDef _decimal_module = { PyModuleDef_HEAD_INIT, .m_name = "decimal", .m_doc = "C decimal arithmetic module", .m_size = sizeof(decimal_state), .m_methods = _decimal_methods, .m_slots = _decimal_slots, .m_traverse = decimal_traverse, .m_clear = decimal_clear, .m_free = decimal_free, }; PyMODINIT_FUNC PyInit__decimal(void) { return PyModuleDef_Init(&_decimal_module); }
c
github
https://github.com/python/cpython
Modules/_decimal/_decimal.c
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. void foo() { }
c
github
https://github.com/golang/go
src/cmd/cgo/internal/test/issue8828/issue8828.c
/* * Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.http.content import io.ktor.http.* import io.ktor.http.content.PartData.* import io.ktor.utils.io.* import io.ktor.utils.io.core.* import kotlinx.coroutines.flow.* /** * Represents a multipart/form-data entry. Could be a [FormItem] or [FileItem]. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData) * * @property dispose to be invoked when this part is no longer needed * @property headers of this part, could be inaccurate on some engines */ public sealed class PartData(public val dispose: () -> Unit, public val headers: Headers) { /** * Represents a multipart form item. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.FormItem) * * @property value of this field */ public class FormItem(public val value: String, dispose: () -> Unit, partHeaders: Headers) : PartData(dispose, partHeaders) /** * Represents a file item. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.FileItem) * * @property provider of content bytes */ public class FileItem( public val provider: () -> ByteReadChannel, dispose: () -> Unit, partHeaders: Headers ) : PartData(dispose, partHeaders) { /** * Original file name if present * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.FileItem.originalFileName) */ public val originalFileName: String? = contentDisposition?.parameter(ContentDisposition.Parameters.FileName) } /** * Represents a binary item. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.BinaryItem) * * @property provider of content bytes */ public class BinaryItem( public val provider: () -> Input, dispose: () -> Unit, partHeaders: Headers ) : PartData(dispose, partHeaders) /** * Represents a binary part with a provider that supplies [ByteReadChannel]. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.BinaryChannelItem) * * @property provider supplies a channel to read data from */ public class BinaryChannelItem( public val provider: () -> ByteReadChannel, partHeaders: Headers ) : PartData({}, partHeaders) /** * Parsed `Content-Disposition` header or `null` if missing. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.contentDisposition) */ public val contentDisposition: ContentDisposition? by lazy(LazyThreadSafetyMode.NONE) { headers[HttpHeaders.ContentDisposition]?.let { ContentDisposition.parse(it) } } /** * Parsed `Content-Type` header or `null` if missing. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.contentType) */ public val contentType: ContentType? by lazy(LazyThreadSafetyMode.NONE) { headers[HttpHeaders.ContentType]?.let { ContentType.parse( it ) } } /** * Optional part name based on `Content-Disposition` header. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.PartData.name) */ public val name: String? get() = contentDisposition?.name } /** * Represents a multipart data stream that could be received from a call. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.MultiPartData) */ public interface MultiPartData { /** * Reads next part data or `null` if the end of multipart stream encountered. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.MultiPartData.readPart) */ public suspend fun readPart(): PartData? /** * An empty multipart data stream. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.MultiPartData.Empty) */ public object Empty : MultiPartData { override suspend fun readPart(): PartData? { return null } } } /** * Transforms the multipart data stream into a [Flow] of [PartData]. * * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.asFlow) * * @return a [Flow] emitting each part of the multipart data until the end of the stream. */ public fun MultiPartData.asFlow(): Flow<PartData> = flow { while (true) { val part = readPart() ?: break emit(part) } } /** * Parse multipart data stream and invoke [partHandler] for each [PartData] encountered. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.forEachPart) * * @param partHandler to be invoked for every part item */ public suspend fun MultiPartData.forEachPart(partHandler: suspend (PartData) -> Unit): Unit = asFlow().collect(partHandler) /** * Parse multipart data stream and put all parts into a list. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.content.readAllParts) * * @return a list of [PartData] */ @Deprecated("This method can deadlock on large requests. Use `forEachPart` instead.", level = DeprecationLevel.ERROR) public suspend fun MultiPartData.readAllParts(): List<PartData> { var part = readPart() ?: return emptyList() val parts = ArrayList<PartData>() parts.add(part) do { part = readPart() ?: break parts.add(part) } while (true) return parts }
kotlin
github
https://github.com/ktorio/ktor
ktor-http/common/src/io/ktor/http/content/Multipart.kt
# Author: Mathieu Blondel <mathieu@mblondel.org> # License: BSD 3 clause import time import matplotlib.pyplot as plt from sklearn.utils import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_kernels def plot(func): random_state = check_random_state(0) one_core = [] multi_core = [] sample_sizes = range(1000, 6000, 1000) for n_samples in sample_sizes: X = random_state.rand(n_samples, 300) start = time.time() func(X, n_jobs=1) one_core.append(time.time() - start) start = time.time() func(X, n_jobs=-1) multi_core.append(time.time() - start) plt.figure('scikit-learn parallel %s benchmark results' % func.__name__) plt.plot(sample_sizes, one_core, label="one core") plt.plot(sample_sizes, multi_core, label="multi core") plt.xlabel('n_samples') plt.ylabel('Time (s)') plt.title('Parallel %s' % func.__name__) plt.legend() def euclidean_distances(X, n_jobs): return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs) def rbf_kernels(X, n_jobs): return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1) plot(euclidean_distances) plot(rbf_kernels) plt.show()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # muMDAU_app main / first page from muMDAU_app import app, socketio from threading import Thread from flask import request, render_template, Blueprint, url_for, redirect, session from database import ManageSQL, LoginSQL from dbmongo import User import subprocess, os from subprocess import PIPE from time import sleep main = Blueprint('main', __name__) thread = None # index page main route page @main.route('/', methods=['GET', 'POST']) def index(): if 'username' in session: p = subprocess.Popen(['python3 ./muMDAU_app/pydeamon/cpuinfo.py core'], cwd='./', shell=True, stdout=PIPE, stderr=PIPE) core, errlog = p.communicate() global thread if thread is None: thread = Thread(target=info_connect) thread.daemon = True thread.start() return render_template('index.html', **locals()) else: return render_template('login.html') def info_connect(): while True: p = subprocess.Popen(['python3 ./muMDAU_app/pydeamon/cpuinfo.py percent'], cwd='./', shell=True, stdout=PIPE, stderr=PIPE) cpupercent, cpuerrlog = p.communicate() m = subprocess.Popen(['python3 ./muMDAU_app/pydeamon/mem.py'], cwd='./', shell=True, stdout=PIPE, stderr=PIPE) mempercent, merrlog = m.communicate() ns = subprocess.Popen(['python3 ./muMDAU_app/pydeamon/net.py sent'], cwd='./', shell=True, stdout=PIPE, stderr=PIPE) nslog, nserrlog = ns.communicate() nr = subprocess.Popen(['python3 ./muMDAU_app/pydeamon/net.py recv'], cwd='./', shell=True, stdout=PIPE, stderr=PIPE) nrlog, nrerrlog = nr.communicate() with open('/proc/uptime') as upfile: raw = upfile.read() fuptime = int(raw.split('.')[0]) day = int(fuptime / 86400) fuptime = fuptime % 86400 hour = int(fuptime / 3600) fuptime = fuptime % 3600 minute = int(fuptime / 60) uptime = '{daystring}{hours}:{mins:02d}'.format(daystring='{days} day{s}, '.format(days=day, s=('s' if day > 1else '')) if day else '', hours=hour, mins=minute) lavg = os.getloadavg() socketio.emit('info', {'cpuusage': round(float(cpupercent.decode('utf-8').replace('\n',''))), 'uptime': str(uptime), 'lavg': lavg, 'mem': round(float(mempercent.decode('utf-8').replace('\n',''))), 'ns': str(nslog.decode('utf-8')), 'nr': str(nrlog.decode('utf-8'))}, namespace='/info') sleep(2) # init route to first time use @app.route('/init', methods=['GET', 'POST']) def init(): if request.method == 'POST': user = request.form['buser'] passd = request.form['bpass'] import hashlib hashsha = hashlib.sha256(passd.replace('\n', '').encode()) # ManageSQL.addUser(user, hashsha.hexdigest(), '1', '0') User.add(user, hashsha.hexdigest(), '1') return redirect(url_for('main.index')) else: return render_template('first.html') # test of adduser route page @app.route('/adduser', methods=['GET', 'POST']) def adduser(): if request.method == 'POST': user = request.form['buser'] if LoginSQL.getPass(user) is None: import hashlib import random ans = random.uniform(1, 10) hashpass1 = hashlib.sha1(str(ans).encode()) passd1 = hashpass1.hexdigest() hashpass0 = hashlib.sha256(passd1.replace('\n', '').encode()) ManageSQL.addUser(user, hashpass0.hexdigest(), '0', '1') return passd1 else: return '使用者已經他媽的存在了喔!'
unknown
codeparrot/codeparrot-clean
# # details_tab.py # # Copyright (C) 2008 Andrew Resch <andrewresch@gmail.com> # # Deluge is free software. # # You may redistribute it and/or modify it under the terms of the # GNU General Public License, as published by the Free Software # Foundation; either version 3 of the License, or (at your option) # any later version. # # deluge is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with deluge. If not, write to: # The Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor # Boston, MA 02110-1301, USA. # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the OpenSSL # library. # You must obey the GNU General Public License in all respects for all of # the code used other than OpenSSL. If you modify file(s) with this # exception, you may extend this exception to your version of the file(s), # but you are not obligated to do so. If you do not wish to do so, delete # this exception statement from your version. If you delete this exception # statement from all source files in the program, then also delete it here. # # import gtk, gtk.glade from deluge.ui.client import client import deluge.component as component from deluge.common import fsize, is_url from deluge.ui.gtkui.torrentdetails import Tab from deluge.log import LOG as log class DetailsTab(Tab): def __init__(self): Tab.__init__(self) # Get the labels we need to update. # widgetname, modifier function, status keys glade = component.get("MainWindow").main_glade self._name = "Details" self._child_widget = glade.get_widget("details_tab") self._tab_label = glade.get_widget("details_tab_label") self.label_widgets = [ (glade.get_widget("summary_name"), None, ("name",)), (glade.get_widget("summary_total_size"), fsize, ("total_size",)), (glade.get_widget("summary_num_files"), str, ("num_files",)), (glade.get_widget("summary_tracker"), None, ("tracker",)), (glade.get_widget("summary_torrent_path"), None, ("save_path",)), (glade.get_widget("summary_message"), str, ("message",)), (glade.get_widget("summary_hash"), str, ("hash",)), (glade.get_widget("summary_comments"), str, ("comment",)) ] def update(self): # Get the first selected torrent selected = component.get("TorrentView").get_selected_torrents() # Only use the first torrent in the list or return if None selected if len(selected) != 0: selected = selected[0] else: # No torrent is selected in the torrentview self.clear() return # Get the torrent status status_keys = ["name", "total_size", "num_files", "tracker", "save_path", "message", "hash", "comment"] session = component.get("SessionProxy") session.get_torrent_status(selected, status_keys).addCallback(self._on_get_torrent_status) def _on_get_torrent_status(self, status): # Check to see if we got valid data from the core if status is None: return # Update all the label widgets for widget in self.label_widgets: if widget[1] != None: args = [] try: for key in widget[2]: args.append(status[key]) except Exception, e: log.debug("Unable to get status value: %s", e) continue txt = widget[1](*args) else: txt = status[widget[2][0]] if widget[0].get_text() != txt: if widget[2][0] == 'comment' and is_url(txt): widget[0].set_markup('<a href="%s">%s</a>' % (txt, txt.replace('&', '&amp;'))) else: widget[0].set_markup(txt.replace('&', '&amp;')) def clear(self): for widget in self.label_widgets: widget[0].set_text("")
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import unittest from mock import Mock from tools.convert_gpx import distanceOnUnitSphereInMeters class TestGpxConversion(unittest.TestCase): def testDistanceCalculationBetweenTwoPoints(self): point1 = Mock() point1.latitude = 30 point1.longitude = 120 point2 = Mock() point2.latitude = 30 point2.longitude = 121 distance = distanceOnUnitSphereInMeters(point1, point2) self.assertAlmostEqual(distance, 96327.55557110473, places=6, msg="Distance calculation was wrong") def testDistanceCalculationBetweenTwoSamePoints(self): point1 = Mock() point1.latitude = 30 point1.longitude = 120 point2 = Mock() point2.latitude = 30 point2.longitude = 120 distance = distanceOnUnitSphereInMeters(point1, point2) self.assertEqual(distance, 0, "Distance calculation was wrong") if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- $id: http://devicetree.org/schemas/media/ti,vpe.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Texas Instruments DRA7x Video Processing Engine (VPE) maintainers: - Benoit Parrot <bparrot@ti.com> description: |- The Video Processing Engine (VPE) is a key component for image post processing applications. VPE consist of a single memory to memory path which can perform chroma up/down sampling, deinterlacing, scaling and color space conversion. properties: compatible: const: ti,dra7-vpe reg: items: - description: The VPE main register region - description: Scaler (SC) register region - description: Color Space Conversion (CSC) register region - description: Video Port Direct Memory Access (VPDMA) register region reg-names: items: - const: vpe_top - const: sc - const: csc - const: vpdma interrupts: maxItems: 1 required: - compatible - reg - reg-names - interrupts additionalProperties: false examples: - | #include <dt-bindings/interrupt-controller/arm-gic.h> vpe: vpe@489d0000 { compatible = "ti,dra7-vpe"; reg = <0x489d0000 0x120>, <0x489d0700 0x80>, <0x489d5700 0x18>, <0x489dd000 0x400>; reg-names = "vpe_top", "sc", "csc", "vpdma"; interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>; }; ...
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/media/ti,vpe.yaml
"""Native adapter for serving CherryPy via mod_python Basic usage: ########################################## # Application in a module called myapp.py ########################################## import cherrypy class Root: @cherrypy.expose def index(self): return 'Hi there, Ho there, Hey there' # We will use this method from the mod_python configuration # as the entry point to our application def setup_server(): cherrypy.tree.mount(Root()) cherrypy.config.update({'environment': 'production', 'log.screen': False, 'show_tracebacks': False}) ########################################## # mod_python settings for apache2 # This should reside in your httpd.conf # or a file that will be loaded at # apache startup ########################################## # Start DocumentRoot "/" Listen 8080 LoadModule python_module /usr/lib/apache2/modules/mod_python.so <Location "/"> PythonPath "sys.path+['/path/to/my/application']" SetHandler python-program PythonHandler cherrypy._cpmodpy::handler PythonOption cherrypy.setup myapp::setup_server PythonDebug On </Location> # End The actual path to your mod_python.so is dependent on your environment. In this case we suppose a global mod_python installation on a Linux distribution such as Ubuntu. We do set the PythonPath configuration setting so that your application can be found by from the user running the apache2 instance. Of course if your application resides in the global site-package this won't be needed. Then restart apache2 and access http://127.0.0.1:8080 """ import logging import sys import cherrypy from cherrypy._cpcompat import BytesIO, copyitems, ntob from cherrypy._cperror import format_exc, bare_error from cherrypy.lib import httputil # ------------------------------ Request-handling def setup(req): from mod_python import apache # Run any setup functions defined by a "PythonOption cherrypy.setup" directive. options = req.get_options() if 'cherrypy.setup' in options: for function in options['cherrypy.setup'].split(): atoms = function.split('::', 1) if len(atoms) == 1: mod = __import__(atoms[0], globals(), locals()) else: modname, fname = atoms mod = __import__(modname, globals(), locals(), [fname]) func = getattr(mod, fname) func() cherrypy.config.update({'log.screen': False, "tools.ignore_headers.on": True, "tools.ignore_headers.headers": ['Range'], }) engine = cherrypy.engine if hasattr(engine, "signal_handler"): engine.signal_handler.unsubscribe() if hasattr(engine, "console_control_handler"): engine.console_control_handler.unsubscribe() engine.autoreload.unsubscribe() cherrypy.server.unsubscribe() def _log(msg, level): newlevel = apache.APLOG_ERR if logging.DEBUG >= level: newlevel = apache.APLOG_DEBUG elif logging.INFO >= level: newlevel = apache.APLOG_INFO elif logging.WARNING >= level: newlevel = apache.APLOG_WARNING # On Windows, req.server is required or the msg will vanish. See # http://www.modpython.org/pipermail/mod_python/2003-October/014291.html. # Also, "When server is not specified...LogLevel does not apply..." apache.log_error(msg, newlevel, req.server) engine.subscribe('log', _log) engine.start() def cherrypy_cleanup(data): engine.exit() try: # apache.register_cleanup wasn't available until 3.1.4. apache.register_cleanup(cherrypy_cleanup) except AttributeError: req.server.register_cleanup(req, cherrypy_cleanup) class _ReadOnlyRequest: expose = ('read', 'readline', 'readlines') def __init__(self, req): for method in self.expose: self.__dict__[method] = getattr(req, method) recursive = False _isSetUp = False def handler(req): from mod_python import apache try: global _isSetUp if not _isSetUp: setup(req) _isSetUp = True # Obtain a Request object from CherryPy local = req.connection.local_addr local = httputil.Host(local[0], local[1], req.connection.local_host or "") remote = req.connection.remote_addr remote = httputil.Host(remote[0], remote[1], req.connection.remote_host or "") scheme = req.parsed_uri[0] or 'http' req.get_basic_auth_pw() try: # apache.mpm_query only became available in mod_python 3.1 q = apache.mpm_query threaded = q(apache.AP_MPMQ_IS_THREADED) forked = q(apache.AP_MPMQ_IS_FORKED) except AttributeError: bad_value = ("You must provide a PythonOption '%s', " "either 'on' or 'off', when running a version " "of mod_python < 3.1") threaded = options.get('multithread', '').lower() if threaded == 'on': threaded = True elif threaded == 'off': threaded = False else: raise ValueError(bad_value % "multithread") forked = options.get('multiprocess', '').lower() if forked == 'on': forked = True elif forked == 'off': forked = False else: raise ValueError(bad_value % "multiprocess") sn = cherrypy.tree.script_name(req.uri or "/") if sn is None: send_response(req, '404 Not Found', [], '') else: app = cherrypy.tree.apps[sn] method = req.method path = req.uri qs = req.args or "" reqproto = req.protocol headers = copyitems(req.headers_in) rfile = _ReadOnlyRequest(req) prev = None try: redirections = [] while True: request, response = app.get_serving(local, remote, scheme, "HTTP/1.1") request.login = req.user request.multithread = bool(threaded) request.multiprocess = bool(forked) request.app = app request.prev = prev # Run the CherryPy Request object and obtain the response try: request.run(method, path, qs, reqproto, headers, rfile) break except cherrypy.InternalRedirect: ir = sys.exc_info()[1] app.release_serving() prev = request if not recursive: if ir.path in redirections: raise RuntimeError("InternalRedirector visited the " "same URL twice: %r" % ir.path) else: # Add the *previous* path_info + qs to redirections. if qs: qs = "?" + qs redirections.append(sn + path + qs) # Munge environment and try again. method = "GET" path = ir.path qs = ir.query_string rfile = BytesIO() send_response(req, response.output_status, response.header_list, response.body, response.stream) finally: app.release_serving() except: tb = format_exc() cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR) s, h, b = bare_error() send_response(req, s, h, b) return apache.OK def send_response(req, status, headers, body, stream=False): # Set response status req.status = int(status[:3]) # Set response headers req.content_type = "text/plain" for header, value in headers: if header.lower() == 'content-type': req.content_type = value continue req.headers_out.add(header, value) if stream: # Flush now so the status and headers are sent immediately. req.flush() # Set response body if isinstance(body, basestring): req.write(body) else: for seg in body: req.write(seg) # --------------- Startup tools for CherryPy + mod_python --------------- # import os import re try: import subprocess def popen(fullcmd): p = subprocess.Popen(fullcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) return p.stdout except ImportError: def popen(fullcmd): pipein, pipeout = os.popen4(fullcmd) return pipeout def read_process(cmd, args=""): fullcmd = "%s %s" % (cmd, args) pipeout = popen(fullcmd) try: firstline = pipeout.readline() if (re.search(ntob("(not recognized|No such file|not found)"), firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output class ModPythonServer(object): template = """ # Apache2 server configuration file for running CherryPy with mod_python. DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so <Location %(loc)s> SetHandler python-program PythonHandler %(handler)s PythonDebug On %(opts)s </Location> """ def __init__(self, loc="/", port=80, opts=None, apache_path="apache", handler="cherrypy._cpmodpy::handler"): self.loc = loc self.port = port self.opts = opts self.apache_path = apache_path self.handler = handler def start(self): opts = "".join([" PythonOption %s %s\n" % (k, v) for k, v in self.opts]) conf_data = self.template % {"port": self.port, "loc": self.loc, "opts": opts, "handler": self.handler, } mpconf = os.path.join(os.path.dirname(__file__), "cpmodpy.conf") f = open(mpconf, 'wb') try: f.write(conf_data) finally: f.close() response = read_process(self.apache_path, "-k start -f %s" % mpconf) self.ready = True return response def stop(self): os.popen("apache -k stop") self.ready = False
unknown
codeparrot/codeparrot-clean
// Copyright 2024 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package util import "strings" const indentation = " " // Normalize normalizes a string: // 1. trim the leading and trailing space // 2. add an indentation before each line func Normalize(s string) string { if len(s) == 0 { return s } return normalizer{s}.trim().indent().string } type normalizer struct { string } func (n normalizer) trim() normalizer { n.string = strings.TrimSpace(n.string) return n } func (n normalizer) indent() normalizer { indentedLines := []string{} for _, line := range strings.Split(n.string, "\n") { trimmed := strings.TrimSpace(line) indented := indentation + trimmed indentedLines = append(indentedLines, indented) } n.string = strings.Join(indentedLines, "\n") return n }
go
github
https://github.com/etcd-io/etcd
etcdctl/util/normalizer.go
import sys sys.path.insert(1, "../../../") import h2o, tests import pandas as pd import zipfile import statsmodels.api as sm def link_functions_poisson(): print("Read in prostate data.") h2o_data = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip")) sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")). open("prostate_complete.csv")).as_matrix() sm_data_response = sm_data[:,9] sm_data_features = sm_data[:,1:9] print("Testing for family: POISSON") print("Set variables for h2o.") myY = "GLEASON" myX = ["ID","AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"] print("Create h2o model with canonical link: LOG") h2o_model_log = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="poisson", link="log",alpha=[0.5], Lambda=[0]) print("Create statsmodel model with canonical link: LOG") sm_model_log = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Poisson(sm.families.links.log)).fit() print("Compare model deviances for link function log") h2o_deviance_log = h2o_model_log.residual_deviance() / h2o_model_log.null_deviance() sm_deviance_log = sm_model_log.deviance / sm_model_log.null_deviance assert h2o_deviance_log - sm_deviance_log < 0.01, "expected h2o to have an equivalent or better deviance measures" print("Create h2o models with link: IDENTITY") h2o_model_id = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="poisson", link="identity",alpha=[0.5], Lambda=[0]) print("Create statsmodel models with link: IDENTITY") sm_model_id = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Poisson(sm.families.links.identity)).fit() print("Compare model deviances for link function identity") h2o_deviance_id = h2o_model_id.residual_deviance() / h2o_model_id.null_deviance() sm_deviance_id = sm_model_id.deviance / sm_model_id.null_deviance assert h2o_deviance_id - sm_deviance_id < 0.01, "expected h2o to have an equivalent or better deviance measures" if __name__ == "__main__": tests.run_test(sys.argv, link_functions_poisson)
unknown
codeparrot/codeparrot-clean
# (c) 2017 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import pytest import os from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep from ansible.modules.cloud.amazon import ec2_vpc_vpn from ansible.module_utils._text import to_text from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, boto3_tag_list_to_ansible_dict class FakeModule(object): def __init__(self, **kwargs): self.params = kwargs def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs raise Exception('FAIL') def exit_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs def get_vgw(connection): # see if two vgw exist and return them if so vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}]) if len(vgw['VpnGateways']) >= 2: return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']] # otherwise create two and return them vgw_1 = connection.create_vpn_gateway(Type='ipsec.1') vgw_2 = connection.create_vpn_gateway(Type='ipsec.1') for resource in (vgw_1, vgw_2): connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}]) return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']] def get_cgw(connection): # see if two cgw exist and return them if so cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']}, {'Name': 'tag:Name', 'Values': ['Ansible-CGW']}]) if len(cgw['CustomerGateways']) >= 2: return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']] # otherwise create and return them cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000) cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000) for resource in (cgw_1, cgw_2): connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}]) return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']] def get_dependencies(): if os.getenv('PLACEBO_RECORD'): module = FakeModule(**{}) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) vgw = get_vgw(connection) cgw = get_cgw(connection) else: vgw = ["vgw-35d70c2b", "vgw-32d70c2c"] cgw = ["cgw-6113c87f", "cgw-9e13c880"] return cgw, vgw def setup_mod_conn(placeboify, params): conn = placeboify.client('ec2') m = FakeModule(**params) return m, conn def make_params(cgw, vgw, tags=None, filters=None, routes=None): tags = {} if tags is None else tags filters = {} if filters is None else filters routes = [] if routes is None else routes return {'customer_gateway_id': cgw, 'static_only': True, 'vpn_gateway_id': vgw, 'connection_type': 'ipsec.1', 'purge_tags': True, 'tags': tags, 'filters': filters, 'routes': routes, 'delay': 15, 'wait_timeout': 600} def make_conn(placeboify, module, connection): customer_gateway_id = module.params['customer_gateway_id'] static_only = module.params['static_only'] vpn_gateway_id = module.params['vpn_gateway_id'] connection_type = module.params['connection_type'] check_mode = module.params['check_mode'] changed = True vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type) return changed, vpn def tear_down_conn(placeboify, connection, vpn_connection_id): ec2_vpc_vpn.delete_connection(connection, vpn_connection_id, delay=15, max_attempts=40) def test_find_connection_vpc_conn_id(placeboify, maybe_sleep): # setup dependencies for 2 vpn connections dependencies = setup_req(placeboify, 2) dep1, dep2 = dependencies[0], dependencies[1] params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection'] params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection'] # find the connection with a vpn_connection_id and assert it is the expected one assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId'] tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId']) tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId']) def test_find_connection_filters(placeboify, maybe_sleep): # setup dependencies for 2 vpn connections dependencies = setup_req(placeboify, 2) dep1, dep2 = dependencies[0], dependencies[1] params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection'] params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection'] # update to different tags params1.update(tags={'Wrong': 'Tag'}) params2.update(tags={'Correct': 'Tag'}) ec2_vpc_vpn.ensure_present(conn1, params1) ec2_vpc_vpn.ensure_present(conn2, params2) # create some new parameters for a filter params = {'filters': {'tags': {'Correct': 'Tag'}}} # find the connection that has the parameters above found = ec2_vpc_vpn.find_connection(conn1, params) # assert the correct connection was found assert found['VpnConnectionId'] == vpn2['VpnConnectionId'] # delete the connections tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId']) tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId']) def test_find_connection_insufficient_filters(placeboify, maybe_sleep): # get list of customer gateways and virtual private gateways cgw, vgw = get_dependencies() # create two connections with the same tags params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'}) params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'}) m, conn = setup_mod_conn(placeboify, params) m2, conn2 = setup_mod_conn(placeboify, params2) _, vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params) _, vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params) # reset the parameters so only filtering by tags will occur m.params = {'filters': {'tags': {'Correct': 'Tag'}}} # assert that multiple matching connections have been found with pytest.raises(Exception) as error_message: ec2_vpc_vpn.find_connection(conn, m.params) assert error_message == "More than one matching VPN connection was found.To modify or delete a VPN please specify vpn_connection_id or add filters." # delete the connections tear_down_conn(placeboify, conn, vpn1['VpnConnectionId']) tear_down_conn(placeboify, conn, vpn2['VpnConnectionId']) def test_find_connection_nonexistent(placeboify, maybe_sleep): # create parameters but don't create a connection with them params = {'filters': {'tags': {'Correct': 'Tag'}}} m, conn = setup_mod_conn(placeboify, params) # try to find a connection with matching parameters and assert None are found assert ec2_vpc_vpn.find_connection(conn, m.params) is None def test_create_connection(placeboify, maybe_sleep): # get list of customer gateways and virtual private gateways cgw, vgw = get_dependencies() # create a connection params = make_params(cgw[0], vgw[0]) m, conn = setup_mod_conn(placeboify, params) changed, vpn = ec2_vpc_vpn.ensure_present(conn, m.params) # assert that changed is true and that there is a connection id assert changed is True assert 'VpnConnectionId' in vpn # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def test_create_connection_that_exists(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # try to recreate the same connection changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params) # nothing should have changed assert changed is False assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId'] # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def test_modify_deleted_connection(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # delete it tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) # try to update the deleted connection m.params.update(vpn_connection_id=vpn['VpnConnectionId']) with pytest.raises(Exception) as error_message: ec2_vpc_vpn.ensure_present(conn, m.params) assert error_message == "There is no VPN connection available or pending with that id. Did you delete it?" def test_delete_connection(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # delete it changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params) assert changed is True assert vpn == {} def test_delete_nonexistent_connection(placeboify, maybe_sleep): # create parameters and ensure any connection matching (None) is deleted params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}, 'delay': 15, 'wait_timeout': 600} m, conn = setup_mod_conn(placeboify, params) changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params) assert changed is False assert vpn == {} def test_check_for_update_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # add and remove a number of tags m.params['tags'] = {'One': 'one', 'Two': 'two'} ec2_vpc_vpn.ensure_present(conn, m.params) m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'} changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId']) flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add']) correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}]) assert flat_dict_changes == correct_changes assert changes['tags_to_remove'] == ['One'] # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] current_vgw = params['vpn_gateway_id'] # update a parameter that isn't modifiable m.params.update(vpn_gateway_id="invalidchange") err = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are tags.'.format(current_vgw) with pytest.raises(Exception) as error_message: ec2_vpc_vpn.check_for_update(m, conn, vpn['VpnConnectionId']) assert error_message == err # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def test_add_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # add a tag to the connection ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}]) # assert tag is there current_vpn = ec2_vpc_vpn.find_connection(conn, params) assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}] # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def test_remove_tags(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # remove a tag from the connection ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test']) # assert the tag is gone current_vpn = ec2_vpc_vpn.find_connection(conn, params) assert 'Tags' not in current_vpn # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def test_add_routes(placeboify, maybe_sleep): # setup dependencies for 1 vpn connection dependencies = setup_req(placeboify, 1) params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection'] # create connection with a route ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24']) # assert both routes are there current_vpn = ec2_vpc_vpn.find_connection(conn, params) assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24']) # delete connection tear_down_conn(placeboify, conn, vpn['VpnConnectionId']) def setup_req(placeboify, number_of_results=1): ''' returns dependencies for VPN connections ''' assert number_of_results in (1, 2) results = [] cgw, vgw = get_dependencies() for each in range(0, number_of_results): params = make_params(cgw[each], vgw[each]) m, conn = setup_mod_conn(placeboify, params) _, vpn = ec2_vpc_vpn.ensure_present(conn, params) results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params}) if number_of_results == 1: return results[0] else: return results[0], results[1]
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python from __future__ import print_function import os import sys import json from subprocess import check_call this_path = os.path.dirname(os.path.abspath(__file__)) daft_path = os.path.dirname(this_path) sys.path.insert(0, daft_path) example_dir = os.path.join(daft_path, "examples") out_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "examples") img_out_dir = os.path.join(this_path, "_static", "examples") try: os.makedirs(out_dir) except os.error: pass try: os.makedirs(img_out_dir) except os.error: pass example_template = """.. _{example}: {title} .. figure:: /_static/examples/{example}.png {doc} :: {src} """ def main(fn, thumb_info): # Run the code. pyfn = os.path.join(example_dir, fn + ".py") src = open(pyfn).read() print("Executing: " + pyfn) ns = {} exec src in ns pgm = ns["pgm"] # Generate the RST source file. src = src.split("\n") if ns["__doc__"] is None: title = fn.title() + "\n" + "=" * len(fn) doc = "" else: doc = ns["__doc__"].split("\n") title = "\n".join(doc[:3]) doc = "\n".join(doc) src = src[len(ns["__doc__"].split("\n")):] fmt_src = "\n".join([" " + l for l in src]) img_path = os.path.join(img_out_dir, fn + ".png") thumb_path = os.path.join(img_out_dir, fn + "-thumb.png") rst = example_template.format(title=title, doc=doc, example=fn, src=fmt_src, img_path=img_path) # Write the RST file. rstfn = os.path.join(out_dir, fn + ".rst") print("Writing: " + rstfn) with open(rstfn, "w") as f: f.write(rst) # Remove the generated plots. try: os.remove(fn + ".png") except os.error: pass try: os.remove(fn + ".pdf") except os.error: pass # Save the new figure. print("Saving: " + img_path) pgm.figure.savefig(img_path, dpi=150) # Crop the thumbnail. cmd = " ".join(["convert", "-crop 190x190+{0[0]:d}+{0[1]:d}".format(thumb_info), img_path, thumb_path]) print(cmd) check_call(cmd, shell=True) if __name__ == "__main__": m = json.load(open(os.path.join(this_path, "_static", "examples.json"))) if len(sys.argv) == 1: # Build all the examples. argv = m.keys() else: argv = sys.argv[1:] for k in argv: assert k in m, "Add {0} to _static/examples.json".format(k) main(k, m[k])
unknown
codeparrot/codeparrot-clean
import { forEachNoEmitOnErrorScenarioTscWatch } from "../helpers/noEmitOnError.js"; describe("unittests:: tsbuildWatch:: watchMode:: with noEmitOnError::", () => { forEachNoEmitOnErrorScenarioTscWatch(["-b", "-verbose"]); });
typescript
github
https://github.com/microsoft/TypeScript
src/testRunner/unittests/tsbuildWatch/noEmitOnError.ts
# This is part of the Python test suite. # The object is registered when you first run the test suite. # (and hopefully unregistered once done ;-) # Ensure the vtables in the tlb are known. from win32com import universal from win32com.server.exception import COMException from win32com.client import gencache import winerror from win32com.client import constants from win32com.server.util import wrap import pythoncom pythoncom.__future_currency__ = True # We use the constants from the module, so must insist on a gencache. # Otherwise, use of gencache is not necessary (tho still advised) gencache.EnsureModule('{6BCDCB60-5605-11D0-AE5F-CADD4C000000}', 0, 1, 1) class PyCOMTest: _typelib_guid_ = "{6BCDCB60-5605-11D0-AE5F-CADD4C000000}" _typelib_version = 1,0 _com_interfaces_ = ['IPyCOMTest'] _reg_clsid_ = "{e743d9cd-cb03-4b04-b516-11d3a81c1597}" _reg_progid_ = "Python.Test.PyCOMTest" def DoubleString(self, str): return str*2 def DoubleInOutString(self, str): return str*2 def Fire(self, nID): raise COMException(hresult=winerror.E_NOTIMPL) def GetLastVarArgs(self): raise COMException(hresult=winerror.E_NOTIMPL) def GetMultipleInterfaces(self, outinterface1, outinterface2): raise COMException(hresult=winerror.E_NOTIMPL) def GetSafeArrays(self, attrs, attrs2, ints): raise COMException(hresult=winerror.E_NOTIMPL) def GetSetDispatch(self, indisp): raise COMException(hresult=winerror.E_NOTIMPL) # Result is of type IPyCOMTest def GetSetInterface(self, ininterface): return wrap(self) def GetSetVariant(self, indisp): return indisp def TestByRefVariant(self, v): return v * 2 def TestByRefString(self, v): return v * 2 # Result is of type IPyCOMTest def GetSetInterfaceArray(self, ininterface): raise COMException(hresult=winerror.E_NOTIMPL) def GetSetUnknown(self, inunk): raise COMException(hresult=winerror.E_NOTIMPL) # Result is of type ISimpleCounter def GetSimpleCounter(self): raise COMException(hresult=winerror.E_NOTIMPL) def GetSimpleSafeArray(self, ints): raise COMException(hresult=winerror.E_NOTIMPL) def GetStruct(self): raise COMException(hresult=winerror.E_NOTIMPL) def SetIntSafeArray(self, ints): return len(ints) def SetLongLongSafeArray(self, ints): return len(ints) def SetULongLongSafeArray(self, ints): return len(ints) def SetBinSafeArray(self, buf): return len(buf) def SetVarArgs(self, *args): raise COMException(hresult=winerror.E_NOTIMPL) def SetVariantSafeArray(self, vars): raise COMException(hresult=winerror.E_NOTIMPL) def Start(self): raise COMException(hresult=winerror.E_NOTIMPL) def Stop(self, nID): raise COMException(hresult=winerror.E_NOTIMPL) def StopAll(self): raise COMException(hresult=winerror.E_NOTIMPL) def TakeByRefDispatch(self, inout): raise COMException(hresult=winerror.E_NOTIMPL) def TakeByRefTypedDispatch(self, inout): raise COMException(hresult=winerror.E_NOTIMPL) def Test(self, key, inval): return not inval def Test2(self, inval): return inval def Test3(self, inval): raise COMException(hresult=winerror.E_NOTIMPL) def Test4(self, inval): raise COMException(hresult=winerror.E_NOTIMPL) def Test5(self, inout): if inout == constants.TestAttr1: return constants.TestAttr1_1 elif inout == constants.TestAttr1_1: return constants.TestAttr1 else: return -1 def Test6(self, inval): return inval def TestOptionals(self, strArg='def', sval=0, lval=1, dval=3.1400001049041748): raise COMException(hresult=winerror.E_NOTIMPL) def TestOptionals2(self, dval, strval='', sval=1): raise COMException(hresult=winerror.E_NOTIMPL) def CheckVariantSafeArray(self, data): return 1 def LongProp(self): return self.longval def SetLongProp(self, val): self.longval = val def ULongProp(self): return self.ulongval def SetULongProp(self, val): self.ulongval = val def IntProp(self): return self.intval def SetIntProp(self, val): self.intval = val class PyCOMTestMI(PyCOMTest): _typelib_guid_ = "{6BCDCB60-5605-11D0-AE5F-CADD4C000000}" _typelib_version = 1,0 # Interfaces with a interface name, a real IID, and an IID as a string _com_interfaces_ = ['IPyCOMTest', pythoncom.IID_IStream, str(pythoncom.IID_IStorage), ] _reg_clsid_ = "{F506E9A1-FB46-4238-A597-FA4EB69787CA}" _reg_progid_ = "Python.Test.PyCOMTestMI" if __name__ == '__main__': import win32com.server.register win32com.server.register.UseCommandLine(PyCOMTest) win32com.server.register.UseCommandLine(PyCOMTestMI)
unknown
codeparrot/codeparrot-clean
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # FBGEMM [FBGEMM (Facebook GEneral Matrix Multiplication)](https://github.com/pytorch/FBGEMM) is a low-precision matrix multiplication library for small batch sizes and support for accuracy-loss minimizing techniques such as row-wise quantization and outlier-aware quantization. With FBGEMM, quantize a models weights to 8-bits/channel and the activations to 8-bits/token (also known as fp8 or w8a8). > [!TIP] > You need a GPU with [compute capability 9+](https://developer.nvidia.com/cuda-gpus#collapseOne) like a H100. Install the FBGEMM_GPU package with the command below to ensure you have the latest version. ```bash pip install --upgrade accelerate fbgemm-gpu torch ``` If you're having installation issues, try installing the [nightly release](https://pytorch.org/FBGEMM/fbgemm_gpu-development/InstallationInstructions.html#fbgemm-gpu-install-libraries:~:text=found%20here.-,Install%20the%20FBGEMM_GPU%20Package,-Install%20through%20PyTorch). Create a [`FbgemmFp8Config`] and pass it to [`~PreTrainedModel.from_pretrained`] to quantize a model to fp8. ```py from transformers import FbgemmFp8Config, AutoModelForCausalLM quantization_config = FbgemmFp8Config() quantized_model = AutoModelForCausalLM.from_pretrained( "meta-llama/Meta-Llama-3-8B", dtype="auto", device_map="auto", quantization_config=quantization_config ) ``` [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] enable saving and loading a quantized model. ```py quant_path = "/path/to/save/quantized/model" model.save_pretrained(quant_path) model = AutoModelForCausalLM.from_pretrained(quant_path, device_map="auto") ``` ## Resources Read the [Open-sourcing FBGEMM for state-of-the-art server-side inference](https://engineering.fb.com/2018/11/07/ml-applications/fbgemm/) blog post for more details on FBGEMM.
unknown
github
https://github.com/huggingface/transformers
docs/source/en/quantization/fbgemm_fp8.md
def func(): pass func.__module__ = None class A: def method(self): pass method.__module__ = None
python
github
https://github.com/python/cpython
Lib/test/test_pydoc/module_none.py
//===--- CompilerInvocation.cpp - CompilerInvocation methods --------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2025 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "clang/Driver/Driver.h" #include "swift/AST/SILOptions.h" #include "swift/Basic/DiagnosticOptions.h" #include "swift/Frontend/Frontend.h" #include "ArgsToFrontendOptionsConverter.h" #include "swift/AST/DiagnosticsFrontend.h" #include "swift/Basic/Assertions.h" #include "swift/Basic/Feature.h" #include "swift/Basic/LanguageMode.h" #include "swift/Basic/Platform.h" #include "swift/Basic/Version.h" #include "swift/Option/Options.h" #include "swift/Option/SanitizerOptions.h" #include "swift/Parse/Lexer.h" #include "swift/Parse/ParseVersion.h" #include "swift/SIL/SILBridging.h" #include "swift/Strings.h" #include "swift/SymbolGraphGen/SymbolGraphOptions.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Option/Arg.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/Option.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/LineIterator.h" #include "llvm/Support/Path.h" #include "llvm/Support/PrefixMapper.h" #include "llvm/Support/Process.h" #include "llvm/Support/VersionTuple.h" #include "llvm/Support/WithColor.h" #include "llvm/TargetParser/Triple.h" using namespace swift; using namespace llvm::opt; /// The path for Swift libraries in the OS on Darwin. #define DARWIN_OS_LIBRARY_PATH "/usr/lib/swift" static constexpr const char *const localeCodes[] = { #define SUPPORTED_LOCALE(Code, Language) #Code, #include "swift/AST/LocalizationLanguages.def" }; swift::CompilerInvocation::CompilerInvocation() { setTargetTriple(llvm::sys::getDefaultTargetTriple()); } void CompilerInvocation::computeRuntimeResourcePathFromExecutablePath( StringRef mainExecutablePath, bool shared, llvm::SmallVectorImpl<char> &runtimeResourcePath) { runtimeResourcePath.append(mainExecutablePath.begin(), mainExecutablePath.end()); llvm::sys::path::remove_filename(runtimeResourcePath); // Remove /swift llvm::sys::path::remove_filename(runtimeResourcePath); // Remove /bin appendSwiftLibDir(runtimeResourcePath, shared); } void CompilerInvocation::appendSwiftLibDir(llvm::SmallVectorImpl<char> &path, bool shared) { llvm::sys::path::append(path, "lib", shared ? "swift" : "swift_static"); } void CompilerInvocation::setMainExecutablePath(StringRef Path) { FrontendOpts.MainExecutablePath = Path.str(); llvm::SmallString<128> LibPath; computeRuntimeResourcePathFromExecutablePath( Path, FrontendOpts.UseSharedResourceFolder, LibPath); setRuntimeResourcePath(LibPath.str()); llvm::SmallString<128> clangPath(Path); llvm::sys::path::remove_filename(clangPath); llvm::sys::path::append(clangPath, "clang"); ClangImporterOpts.clangPath = std::string(clangPath); } static std::string getVersionedPrebuiltModulePath(std::optional<llvm::VersionTuple> sdkVer, StringRef defaultPrebuiltPath) { if (!sdkVer.has_value()) return defaultPrebuiltPath.str(); std::string versionStr = sdkVer->getAsString(); StringRef vs = versionStr; do { SmallString<64> pathWithSDKVer = defaultPrebuiltPath; llvm::sys::path::append(pathWithSDKVer, vs); if (llvm::sys::fs::exists(pathWithSDKVer)) { return pathWithSDKVer.str().str(); } else if (vs.ends_with(".0")) { vs = vs.substr(0, vs.size() - 2); } else { return defaultPrebuiltPath.str(); } } while(true); } std::string CompilerInvocation::computePrebuiltCachePath( StringRef RuntimeResourcePath, llvm::Triple target, std::optional<llvm::VersionTuple> sdkVer) { SmallString<64> defaultPrebuiltPath{RuntimeResourcePath}; StringRef platform; if (tripleIsMacCatalystEnvironment(target)) { // The prebuilt cache for macCatalyst is the same as the one for macOS, not // iOS or a separate location of its own. platform = "macosx"; } else { platform = getPlatformNameForTriple(target); } llvm::sys::path::append(defaultPrebuiltPath, platform, "prebuilt-modules"); // If the SDK version is given, we should check if SDK-versioned prebuilt // module cache is available and use it if so. return getVersionedPrebuiltModulePath(sdkVer, defaultPrebuiltPath); } void CompilerInvocation::setDefaultPrebuiltCacheIfNecessary() { if (!FrontendOpts.PrebuiltModuleCachePath.empty()) return; if (SearchPathOpts.RuntimeResourcePath.empty()) return; FrontendOpts.PrebuiltModuleCachePath = computePrebuiltCachePath( SearchPathOpts.RuntimeResourcePath, LangOpts.Target, LangOpts.SDKVersion); if (!FrontendOpts.PrebuiltModuleCachePath.empty()) return; StringRef anchor = "prebuilt-modules"; assert(((StringRef)FrontendOpts.PrebuiltModuleCachePath).contains(anchor)); auto pair = ((StringRef)FrontendOpts.PrebuiltModuleCachePath).split(anchor); FrontendOpts.BackupModuleInterfaceDir = (llvm::Twine(pair.first) + "preferred-interfaces" + pair.second).str(); } void CompilerInvocation::setDefaultBlocklistsIfNecessary() { if (!LangOpts.BlocklistConfigFilePaths.empty()) return; if (SearchPathOpts.RuntimeResourcePath.empty()) return; // XcodeDefault.xctoolchain/usr/lib/swift SmallString<64> blocklistDir{SearchPathOpts.RuntimeResourcePath}; // XcodeDefault.xctoolchain/usr/lib llvm::sys::path::remove_filename(blocklistDir); // XcodeDefault.xctoolchain/usr llvm::sys::path::remove_filename(blocklistDir); // XcodeDefault.xctoolchain/usr/local/lib/swift/blocklists llvm::sys::path::append(blocklistDir, "local", "lib", "swift", "blocklists"); std::error_code EC; if (llvm::sys::fs::is_directory(blocklistDir)) { for (llvm::sys::fs::directory_iterator F(blocklistDir, EC), FE; F != FE; F.increment(EC)) { StringRef ext = llvm::sys::path::extension(F->path()); if (ext == "yml" || ext == "yaml") { LangOpts.BlocklistConfigFilePaths.push_back(F->path()); } } } } void CompilerInvocation::setDefaultInProcessPluginServerPathIfNecessary() { if (!SearchPathOpts.InProcessPluginServerPath.empty()) return; if (FrontendOpts.MainExecutablePath.empty()) return; // '/usr/bin/swift' SmallString<64> serverLibPath{FrontendOpts.MainExecutablePath}; llvm::sys::path::remove_filename(serverLibPath); // remove 'swift' #if defined(_WIN32) // Windows: usr\bin\SwiftInProcPluginServer.dll llvm::sys::path::append(serverLibPath, "SwiftInProcPluginServer.dll"); #elif defined(__APPLE__) // Darwin: usr/lib/swift/host/libSwiftInProcPluginServer.dylib llvm::sys::path::remove_filename(serverLibPath); // remove 'bin' llvm::sys::path::append(serverLibPath, "lib", "swift", "host"); llvm::sys::path::append(serverLibPath, "libSwiftInProcPluginServer.dylib"); #else // Other: usr/lib/swift/host/libSwiftInProcPluginServer.so llvm::sys::path::remove_filename(serverLibPath); // remove 'bin' llvm::sys::path::append(serverLibPath, "lib", "swift", "host"); llvm::sys::path::append(serverLibPath, "libSwiftInProcPluginServer.so"); #endif SearchPathOpts.InProcessPluginServerPath = serverLibPath.str(); } static std::optional<clang::DarwinSDKInfo> parseSDKSettings(llvm::vfs::FileSystem &VFS, const LangOptions &LangOpts, const SearchPathOptions &SearchPathOpts, DiagnosticEngine &Diags) { if (!LangOpts.Target.isOSDarwin() || SearchPathOpts.getSDKPath().empty()) return std::nullopt; auto SDKInfoOrErr = clang::parseDarwinSDKInfo(VFS, SearchPathOpts.getSDKPath()); if (!SDKInfoOrErr) { llvm::consumeError(SDKInfoOrErr.takeError()); Diags.diagnose(SourceLoc(), diag::warning_darwin_sdk_invalid_settings); return std::nullopt; } return *SDKInfoOrErr; } static void appendPlatformIncludePrefix( SmallString<128> &Path, const llvm::Triple &Triple, const std::optional<clang::DarwinSDKInfo> &SDKInfo) { if (SDKInfo) { const StringRef PlatformIncludePrefix = SDKInfo->getPlatformPrefix(Triple); if (!PlatformIncludePrefix.empty()) llvm::sys::path::append(Path, PlatformIncludePrefix); } } static void updateRuntimeLibraryPaths(SearchPathOptions &SearchPathOpts, const FrontendOptions &FrontendOpts, const LangOptions &LangOpts, const std::optional<clang::DarwinSDKInfo> &SDKInfo) { const llvm::Triple &Triple = LangOpts.Target; llvm::SmallString<128> LibPath(SearchPathOpts.RuntimeResourcePath); StringRef LibSubDir = getPlatformNameForTriple(Triple); if (tripleIsMacCatalystEnvironment(Triple)) LibSubDir = "maccatalyst"; if (LangOpts.hasFeature(Feature::Embedded)) LibSubDir = "embedded"; SearchPathOpts.RuntimeLibraryPaths.clear(); #if defined(_WIN32) // Resource path looks like this: // // C:\...\Swift\Toolchains\6.0.0+Asserts\usr\lib\swift // // The runtimes are in // // C:\...\Swift\Runtimes\6.0.0\usr\bin // // But, for testing, we also need to look in `bin`, next to the driver. llvm::SmallString<128> RuntimePath(LibPath); llvm::sys::path::remove_filename(RuntimePath); llvm::sys::path::remove_filename(RuntimePath); // For testing, we need to look in `bin` first llvm::sys::path::append(RuntimePath, "bin"); SearchPathOpts.RuntimeLibraryPaths.push_back(std::string(RuntimePath.str())); llvm::sys::path::remove_filename(RuntimePath); llvm::sys::path::remove_filename(RuntimePath); llvm::SmallString<128> VersionWithAttrs(llvm::sys::path::filename(RuntimePath)); size_t MaybePlus = VersionWithAttrs.find_first_of('+'); StringRef Version = VersionWithAttrs.substr(0, MaybePlus); llvm::sys::path::remove_filename(RuntimePath); llvm::sys::path::remove_filename(RuntimePath); llvm::sys::path::append(RuntimePath, "Runtimes", Version, "usr", "bin"); SearchPathOpts.RuntimeLibraryPaths.push_back(std::string(RuntimePath.str())); #endif llvm::sys::path::append(LibPath, LibSubDir); SearchPathOpts.RuntimeLibraryPaths.push_back(std::string(LibPath.str())); if (Triple.isOSDarwin()) SearchPathOpts.RuntimeLibraryPaths.push_back(DARWIN_OS_LIBRARY_PATH); // If this is set, we don't want any runtime import paths. if (SearchPathOpts.SkipAllImplicitImportPaths) { SearchPathOpts.setRuntimeLibraryImportPaths({}); return; } // Set up the import paths containing the swiftmodules for the libraries in // RuntimeLibraryPath. std::vector<std::string> RuntimeLibraryImportPaths; RuntimeLibraryImportPaths.push_back(std::string(LibPath.str())); // This is compatibility for <=5.3 if (!Triple.isOSDarwin()) { llvm::sys::path::append(LibPath, swift::getMajorArchitectureName(Triple)); RuntimeLibraryImportPaths.push_back(std::string(LibPath.str())); } if (!SearchPathOpts.SkipSDKImportPaths && !SearchPathOpts.getSDKPath().empty()) { const char *swiftDir = FrontendOpts.UseSharedResourceFolder ? "swift" : "swift_static"; if (tripleIsMacCatalystEnvironment(Triple)) { LibPath = SearchPathOpts.getSDKPath(); llvm::sys::path::append(LibPath, "System", "iOSSupport"); llvm::sys::path::append(LibPath, "usr", "lib", swiftDir); RuntimeLibraryImportPaths.push_back(std::string(LibPath.str())); } LibPath = SearchPathOpts.getSDKPath(); appendPlatformIncludePrefix(LibPath, Triple, SDKInfo); llvm::sys::path::append(LibPath, "usr", "lib", swiftDir); if (!Triple.isOSDarwin()) { // Use the non-architecture suffixed form with directory-layout // swiftmodules. llvm::sys::path::append(LibPath, getPlatformNameForTriple(Triple)); RuntimeLibraryImportPaths.push_back(std::string(LibPath.str())); // Compatibility with older releases - use the architecture suffixed form // for pre-directory-layout multi-architecture layout. Note that some // platforms (e.g. Windows) will use this even with directory layout in // older releases. llvm::sys::path::append(LibPath, swift::getMajorArchitectureName(Triple)); } RuntimeLibraryImportPaths.push_back(std::string(LibPath.str())); } SearchPathOpts.setRuntimeLibraryImportPaths(RuntimeLibraryImportPaths); } static void updateImplicitFrameworkSearchPaths( SearchPathOptions &SearchPathOpts, const LangOptions &LangOpts, const std::optional<clang::DarwinSDKInfo> &SDKInfo) { if (SearchPathOpts.SkipAllImplicitImportPaths) { SearchPathOpts.setImplicitFrameworkSearchPaths({}); return; } std::vector<std::string> ImplicitFrameworkSearchPaths; if (LangOpts.Target.isOSDarwin()) { if (!SearchPathOpts.SkipSDKImportPaths && !SearchPathOpts.getSDKPath().empty()) { SmallString<128> SDKPath(SearchPathOpts.getSDKPath()); appendPlatformIncludePrefix(SDKPath, LangOpts.Target, SDKInfo); SmallString<128> systemFrameworksScratch(SDKPath); llvm::sys::path::append(systemFrameworksScratch, "System", "Library", "Frameworks"); SmallString<128> systemSubFrameworksScratch(SDKPath); llvm::sys::path::append(systemSubFrameworksScratch, "System", "Library", "SubFrameworks"); SmallString<128> frameworksScratch(SDKPath); llvm::sys::path::append(frameworksScratch, "Library", "Frameworks"); ImplicitFrameworkSearchPaths = {systemFrameworksScratch.str().str(), systemSubFrameworksScratch.str().str(), frameworksScratch.str().str()}; } } SearchPathOpts.setImplicitFrameworkSearchPaths(ImplicitFrameworkSearchPaths); } static void setIRGenOutputOptsFromFrontendOptions(IRGenOptions &IRGenOpts, const FrontendOptions &FrontendOpts) { // Set the OutputKind for the given Action. IRGenOpts.OutputKind = [](FrontendOptions::ActionType Action) { switch (Action) { case FrontendOptions::ActionType::EmitIRGen: return IRGenOutputKind::LLVMAssemblyBeforeOptimization; case FrontendOptions::ActionType::EmitIR: return IRGenOutputKind::LLVMAssemblyAfterOptimization; case FrontendOptions::ActionType::EmitBC: return IRGenOutputKind::LLVMBitcode; case FrontendOptions::ActionType::EmitAssembly: return IRGenOutputKind::NativeAssembly; case FrontendOptions::ActionType::Immediate: return IRGenOutputKind::Module; case FrontendOptions::ActionType::EmitObject: default: // Just fall back to emitting an object file. If we aren't going to run // IRGen, it doesn't really matter what we put here anyways. return IRGenOutputKind::ObjectFile; } }(FrontendOpts.RequestedAction); // If we're in JIT mode, set the requisite flags. if (FrontendOpts.RequestedAction == FrontendOptions::ActionType::Immediate) { IRGenOpts.UseJIT = true; IRGenOpts.DebugInfoLevel = IRGenDebugInfoLevel::Normal; IRGenOpts.DebugInfoFormat = IRGenDebugInfoFormat::DWARF; } } static void setBridgingHeaderFromFrontendOptions(ClangImporterOptions &ImporterOpts, const FrontendOptions &FrontendOpts) { if (FrontendOpts.RequestedAction != FrontendOptions::ActionType::EmitPCH) return; // If there aren't any inputs, there's nothing to do. if (!FrontendOpts.InputsAndOutputs.hasInputs()) return; ImporterOpts.BridgingHeaderIsInternal = FrontendOpts.ImportHeaderAsInternal; // If we aren't asked to output a bridging header, we don't need to set this. if (ImporterOpts.PrecompiledHeaderOutputDir.empty()) return; ImporterOpts.BridgingHeader = FrontendOpts.InputsAndOutputs.getFilenameOfFirstInput(); } void CompilerInvocation::computeAArch64TBIOptions() { auto &LLVMArgs = getFrontendOptions().LLVMArgs; auto aarch64_use_tbi = std::find(LLVMArgs.begin(), LLVMArgs.end(), "-aarch64-use-tbi"); IRGenOpts.HasAArch64TBI = aarch64_use_tbi != LLVMArgs.end(); } void CompilerInvocation::computeCXXStdlibOptions() { // The MSVC driver in Clang is not aware of the C++ stdlib, and currently // always assumes libstdc++, which is incorrect: the Microsoft stdlib is // normally used. if (LangOpts.Target.isOSWindows()) { // In the future, we should support libc++ on Windows. That would require // the MSVC driver to support it first // (see https://reviews.llvm.org/D101479). LangOpts.CXXStdlib = CXXStdlibKind::Msvcprt; LangOpts.PlatformDefaultCXXStdlib = CXXStdlibKind::Msvcprt; } else if (LangOpts.Target.isOSLinux() || LangOpts.Target.isOSDarwin() || LangOpts.Target.isOSFreeBSD()) { auto [clangDriver, clangDiagEngine, clangDiagOpts] = ClangImporter::createClangDriver(LangOpts, ClangImporterOpts); auto clangDriverArgs = ClangImporter::createClangArgs( ClangImporterOpts, SearchPathOpts, clangDriver); auto &clangToolchain = clangDriver.getToolChain(clangDriverArgs, LangOpts.Target); auto cxxStdlibKind = clangToolchain.GetCXXStdlibType(clangDriverArgs); auto cxxDefaultStdlibKind = clangToolchain.GetDefaultCXXStdlibType(); auto toCXXStdlibKind = [](clang::driver::ToolChain::CXXStdlibType clangCXXStdlibType) -> CXXStdlibKind { switch (clangCXXStdlibType) { case clang::driver::ToolChain::CST_Libcxx: return CXXStdlibKind::Libcxx; case clang::driver::ToolChain::CST_Libstdcxx: return CXXStdlibKind::Libstdcxx; } }; LangOpts.CXXStdlib = toCXXStdlibKind(cxxStdlibKind); LangOpts.PlatformDefaultCXXStdlib = toCXXStdlibKind(cxxDefaultStdlibKind); } if (!LangOpts.isUsingPlatformDefaultCXXStdlib()) { // The CxxStdlib overlay was built for the platform default C++ stdlib, and // its .swiftmodule file refers to implementation-specific symbols (such as // namespace __1 in libc++, or namespace __cxx11 in libstdc++). Let's // proactively rebuild the CxxStdlib module from its .swiftinterface if a // non-default C++ stdlib is used. FrontendOpts.PreferInterfaceForModules.push_back("CxxStdlib"); } } void CompilerInvocation::setRuntimeResourcePath(StringRef Path) { SearchPathOpts.RuntimeResourcePath = Path.str(); updateRuntimeLibraryPaths(SearchPathOpts, FrontendOpts, LangOpts, SDKInfo); } void CompilerInvocation::setTargetTriple(StringRef Triple) { setTargetTriple(llvm::Triple(Triple)); } void CompilerInvocation::setTargetTriple(const llvm::Triple &Triple) { LangOpts.setTarget(Triple); updateRuntimeLibraryPaths(SearchPathOpts, FrontendOpts, LangOpts, SDKInfo); updateImplicitFrameworkSearchPaths(SearchPathOpts, LangOpts, SDKInfo); } void CompilerInvocation::setSDKPath(const std::string &Path) { SearchPathOpts.setSDKPath(Path); updateRuntimeLibraryPaths(SearchPathOpts, FrontendOpts, LangOpts, SDKInfo); updateImplicitFrameworkSearchPaths(SearchPathOpts, LangOpts, SDKInfo); } bool CompilerInvocation::setModuleAliasMap(std::vector<std::string> args, DiagnosticEngine &diags) { return ModuleAliasesConverter::computeModuleAliases(args, FrontendOpts, diags); } static void ParseAssertionArgs(ArgList &args) { using namespace options; if (args.hasArg(OPT_compiler_assertions)) { CONDITIONAL_ASSERT_Global_enable_flag = 1; } } static bool ParseFrontendArgs( FrontendOptions &opts, ArgList &args, DiagnosticEngine &diags, SmallVectorImpl<std::unique_ptr<llvm::MemoryBuffer>> *buffers) { ArgsToFrontendOptionsConverter converter(diags, args, opts); return converter.convert(buffers); } static void diagnoseSwiftVersion(std::optional<version::Version> &vers, Arg *verArg, ArgList &Args, DiagnosticEngine &diags) { // General invalid version error diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, verArg->getAsString(Args), verArg->getValue()); // Enumerate the valid language modes. std::string modesStr; { llvm::raw_string_ostream os(modesStr); llvm::interleaveComma( LanguageMode::allSupportedModes(), os, [&](LanguageMode mode) { os << "'" << mode.versionString() << "'"; }); } diags.diagnose(SourceLoc(), diag::note_valid_swift_versions, modesStr); } /// Create a new Regex instance out of the string value in \p RpassArg. /// It returns a pointer to the newly generated Regex instance. static std::shared_ptr<llvm::Regex> generateOptimizationRemarkRegex(DiagnosticEngine &Diags, ArgList &Args, Arg *RpassArg) { StringRef Val = RpassArg->getValue(); std::string RegexError; std::shared_ptr<llvm::Regex> Pattern = std::make_shared<llvm::Regex>(Val); if (!Pattern->isValid(RegexError)) { Diags.diagnose(SourceLoc(), diag::error_optimization_remark_pattern, RegexError, RpassArg->getAsString(Args)); Pattern.reset(); } return Pattern; } // Lifted from the clang driver. static void PrintArg(raw_ostream &OS, const char *Arg, StringRef TempDir) { const bool Escape = std::strpbrk(Arg, "\"\\$ "); if (!TempDir.empty()) { llvm::SmallString<256> ArgPath{Arg}; llvm::sys::fs::make_absolute(ArgPath); llvm::sys::path::native(ArgPath); llvm::SmallString<256> TempPath{TempDir}; llvm::sys::fs::make_absolute(TempPath); llvm::sys::path::native(TempPath); if (StringRef(ArgPath).starts_with(TempPath)) { // Don't write temporary file names in the debug info. This would prevent // incremental llvm compilation because we would generate different IR on // every compiler invocation. Arg = "<temporary-file>"; } } if (!Escape) { OS << Arg; return; } // Quote and escape. This isn't really complete, but good enough. OS << '"'; while (const char c = *Arg++) { if (c == '"' || c == '\\' || c == '$') OS << '\\'; OS << c; } OS << '"'; } static void ParseModuleInterfaceArgs(ModuleInterfaceOptions &Opts, ArgList &Args, DiagnosticEngine &diags) { using namespace options; Opts.PreserveTypesAsWritten |= Args.hasArg(OPT_module_interface_preserve_types_as_written); Opts.AliasModuleNames |= Args.hasFlag(OPT_alias_module_names_in_module_interface, OPT_disable_alias_module_names_in_module_interface, ::getenv("SWIFT_ALIAS_MODULE_NAMES_IN_INTERFACES")); Opts.PrintFullConvention |= Args.hasArg(OPT_experimental_print_full_convention); Opts.DebugPrintInvalidSyntax |= Args.hasArg(OPT_debug_emit_invalid_swiftinterface_syntax); Opts.PrintMissingImports = !Args.hasArg(OPT_disable_print_missing_imports_in_module_interface); if (const Arg *A = Args.getLastArg(OPT_library_level)) { StringRef contents = A->getValue(); if (contents == "spi") { Opts.setInterfaceMode(PrintOptions::InterfaceMode::Private); } } if (Args.hasArgNoClaim(OPT_enable_module_selectors_in_module_interface) || Args.hasArgNoClaim(OPT_disable_module_selectors_in_module_interface)) { Opts.UseModuleSelectors = Args.hasFlag(OPT_enable_module_selectors_in_module_interface, OPT_disable_module_selectors_in_module_interface, false); } else if (auto envValue = ::getenv("SWIFT_MODULE_SELECTORS_IN_INTERFACES")) { Opts.UseModuleSelectors = llvm::StringSwitch<bool>(envValue) .CasesLower({"false", "no", "off", "0"}, false) .Default(true); } else { // Any heuristics we might add would go here. Opts.UseModuleSelectors = false; } if (Opts.PreserveTypesAsWritten && Opts.UseModuleSelectors) { Opts.PreserveTypesAsWritten = false; diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-module-interface-preserve-types-as-written", "-enable-module-selectors-in-module-interface"); } if (Opts.AliasModuleNames && Opts.UseModuleSelectors) { Opts.AliasModuleNames = false; diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-alias-module-names-in-module-interface", "-enable-module-selectors-in-module-interface"); } } /// Checks if an arg is generally allowed to be included /// in a module interface static bool ShouldIncludeModuleInterfaceArg(const Arg *A) { if (!A->getOption().hasFlag(options::ModuleInterfaceOption) && !A->getOption().hasFlag(options::ModuleInterfaceOptionIgnorable)) return false; if (!A->getOption().matches(options::OPT_enable_experimental_feature)) return true; if (auto feature = Feature::getExperimentalFeature(A->getValue())) { return feature->includeInModuleInterface(); } return true; } static bool IsPackageInterfaceFlag(const Arg *A, ArgList &Args) { return false; } static bool IsPrivateInterfaceFlag(const Arg *A, ArgList &Args) { return A->getOption().matches(options::OPT_project_name); } /// Save a copy of any flags marked as ModuleInterfaceOption, if running /// in a mode that is going to emit a .swiftinterface file. static void SaveModuleInterfaceArgs(ModuleInterfaceOptions &Opts, FrontendOptions &FOpts, ArgList &Args, DiagnosticEngine &Diags) { if (!FOpts.InputsAndOutputs.hasModuleInterfaceOutputPath()) return; struct RenderedInterfaceArgs { ArgStringList Standard = {}; ArgStringList Ignorable = {}; }; RenderedInterfaceArgs PublicArgs{}; RenderedInterfaceArgs PrivateArgs{}; RenderedInterfaceArgs PackageArgs{}; auto interfaceArgListForArg = [&](Arg *A) -> ArgStringList & { bool ignorable = A->getOption().hasFlag(options::ModuleInterfaceOptionIgnorable); if (IsPackageInterfaceFlag(A, Args)) return ignorable ? PackageArgs.Ignorable : PackageArgs.Standard; if (IsPrivateInterfaceFlag(A, Args)) return ignorable ? PrivateArgs.Ignorable : PrivateArgs.Standard; return ignorable ? PublicArgs.Ignorable : PublicArgs.Standard; }; for (auto A : Args) { if (!ShouldIncludeModuleInterfaceArg(A)) continue; ArgStringList &ArgList = interfaceArgListForArg(A); A->render(Args, ArgList); } auto updateInterfaceOpts = [](ModuleInterfaceOptions::InterfaceFlags &Flags, RenderedInterfaceArgs &RenderedArgs) { auto printFlags = [](std::string &str, ArgStringList argList) { llvm::raw_string_ostream OS(str); interleave( argList, [&](const char *Argument) { PrintArg(OS, Argument, StringRef()); }, [&] { OS << " "; }); }; printFlags(Flags.Flags, RenderedArgs.Standard); printFlags(Flags.IgnorableFlags, RenderedArgs.Ignorable); }; updateInterfaceOpts(Opts.PublicFlags, PublicArgs); updateInterfaceOpts(Opts.PrivateFlags, PrivateArgs); updateInterfaceOpts(Opts.PackageFlags, PackageArgs); } enum class CxxCompatMode { invalid, enabled, off }; static std::pair<CxxCompatMode, version::Version> validateCxxInteropCompatibilityMode(StringRef mode) { if (mode == "off") return {CxxCompatMode::off, {}}; if (mode == "default" || mode == "upcoming-swift" || mode == "swift-6" || mode == "swift-5.9") return {CxxCompatMode::enabled, {}}; // Note: If this is updated, corresponding code in // InterfaceSubContextDelegateImpl::InterfaceSubContextDelegateImpl needs // to be updated also. return {CxxCompatMode::invalid, {}}; } static void diagnoseCxxInteropCompatMode(Arg *verArg, ArgList &Args, DiagnosticEngine &diags) { // General invalid argument error diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, verArg->getAsString(Args), verArg->getValue()); // Note valid C++ interoperability modes. auto validVers = {llvm::StringRef("off"), llvm::StringRef("default"), llvm::StringRef("swift-6"), llvm::StringRef("swift-5.9")}; auto versStr = "'" + llvm::join(validVers, "', '") + "'"; diags.diagnose(SourceLoc(), diag::valid_cxx_interop_modes, verArg->getSpelling(), versStr); } void LangOptions::setCxxInteropFromArgs(ArgList &Args, swift::DiagnosticEngine &Diags, const FrontendOptions &FrontendOpts) { if (Arg *A = Args.getLastArg(options::OPT_cxx_interoperability_mode)) { if (Args.hasArg(options::OPT_enable_experimental_cxx_interop)) { Diags.diagnose(SourceLoc(), diag::dont_enable_interop_and_compat); } auto interopCompatMode = validateCxxInteropCompatibilityMode(A->getValue()); EnableCXXInterop |= (interopCompatMode.first == CxxCompatMode::enabled); if (interopCompatMode.first == CxxCompatMode::invalid) diagnoseCxxInteropCompatMode(A, Args, Diags); } if (Args.hasArg(options::OPT_enable_experimental_cxx_interop)) { Diags.diagnose(SourceLoc(), diag::enable_interop_flag_deprecated); Diags.diagnose(SourceLoc(), diag::swift_will_maintain_compat); EnableCXXInterop |= true; } if (Arg *A = Args.getLastArg(options::OPT_formal_cxx_interoperability_mode)) { // Take formal version from explicitly specified formal version flag StringRef version = A->getValue(); // FIXME: the only valid modes are 'off' and 'swift-6'; see below. if (version == "off") { FormalCxxInteropMode = std::nullopt; } else if (version == "swift-6") { FormalCxxInteropMode = {6}; } else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); Diags.diagnose(SourceLoc(), diag::valid_cxx_interop_modes, A->getSpelling(), "'off', 'swift-6'"); } } else { // In the absence of a formal mode flag, we capture it from the current // C++ compat version (if C++ interop is enabled). // // FIXME: cxxInteropCompatVersion is computed based on the Swift language // version, and is either 4, 5, 6, or 7 (even though only 5.9 and 6.* make // any sense). For now, we don't actually care about the version, so we'll // just use version 6 (i.e., 'swift-6') to mean that C++ interop mode is on. // // FIXME: We always declare the 'Darwin' module as formally having been built // without C++Interop, for compatibility with prior versions. Once we are certain // that we are only building against modules built with support of // '-formal-cxx-interoperability-mode', this hard-coded check should be removed. if (EnableCXXInterop && (FrontendOpts.ModuleName.compare("Darwin") != 0)) FormalCxxInteropMode = {6}; else FormalCxxInteropMode = std::nullopt; } } static std::string printFormalCxxInteropVersion(const LangOptions &Opts) { std::string str; llvm::raw_string_ostream OS(str); OS << "-formal-cxx-interoperability-mode="; // We must print a 'stable' C++ interop version here, which cannot be // 'default' and 'upcoming-swift' (since those are relative to the current // version, which may change in the future). if (!Opts.FormalCxxInteropMode) { OS << "off"; } else { // FIXME: FormalCxxInteropMode will always be 6 (or nullopt); see above OS << "swift-6"; } return str; } static std::optional<swift::StrictConcurrency> parseStrictConcurrency(StringRef value) { return llvm::StringSwitch<std::optional<swift::StrictConcurrency>>(value) .Case("minimal", swift::StrictConcurrency::Minimal) .Case("targeted", swift::StrictConcurrency::Targeted) .Case("complete", swift::StrictConcurrency::Complete) .Default(std::nullopt); } static bool ParseCASArgs(CASOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, const FrontendOptions &FrontendOpts) { using namespace options; Opts.EnableCaching |= Args.hasFlag( OPT_cache_compile_job, OPT_no_cache_compile_job, /*Default=*/false); Opts.EnableCachingRemarks |= Args.hasArg(OPT_cache_remarks); Opts.CacheSkipReplay |= Args.hasArg(OPT_cache_disable_replay); if (const Arg *A = Args.getLastArg(OPT_cas_path)) Opts.CASOpts.CASPath = A->getValue(); if (const Arg *A = Args.getLastArg(OPT_cas_plugin_path)) Opts.CASOpts.PluginPath = A->getValue(); for (StringRef Opt : Args.getAllArgValues(OPT_cas_plugin_option)) { StringRef Name, Value; std::tie(Name, Value) = Opt.split('='); Opts.CASOpts.PluginOptions.emplace_back(std::string(Name), std::string(Value)); } Opts.ImportModuleFromCAS |= Args.hasArg(OPT_module_import_from_cas); if (auto *A = Args.getLastArg(OPT_clang_include_tree_root)) Opts.ClangIncludeTree = A->getValue(); if (auto *A = Args.getLastArg(OPT_clang_include_tree_filelist)) Opts.ClangIncludeTreeFileList = A->getValue(); if (const Arg *A = Args.getLastArg(OPT_input_file_key)) Opts.InputFileKey = A->getValue(); if (const Arg*A = Args.getLastArg(OPT_bridging_header_pch_key)) Opts.BridgingHeaderPCHCacheKey = A->getValue(); if (!Opts.ClangIncludeTree.empty() || !Opts.ClangIncludeTreeFileList.empty()) Opts.HasImmutableFileSystem = true; return false; } static bool ParseEnabledFeatureArgs(LangOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, const FrontendOptions &FrontendOpts) { using namespace options; bool HadError = false; // Enable feature upcoming/experimental features if requested. However, leave // a feature disabled if an -enable-upcoming-feature flag is superseded by a // -disable-upcoming-feature flag. Since only the last flag specified is // honored, we iterate over them in reverse order. std::vector<StringRef> psuedoFeatures; llvm::SmallSet<Feature, 8> seenFeatures; bool shouldEnableEmbeddedExistentialsPerDefault = false; for (const Arg *A : Args.filtered_reverse( OPT_enable_experimental_feature, OPT_disable_experimental_feature, OPT_enable_upcoming_feature, OPT_disable_upcoming_feature)) { auto &option = A->getOption(); const StringRef argValue = A->getValue(); bool isEnableUpcomingFeatureFlag = option.matches(OPT_enable_upcoming_feature); bool isUpcomingFeatureFlag = isEnableUpcomingFeatureFlag || option.matches(OPT_disable_upcoming_feature); bool isEnableFeatureFlag = isEnableUpcomingFeatureFlag || option.matches(OPT_enable_experimental_feature); // Collect some special case pseudo-features which should be processed // separately. if (argValue.starts_with("StrictConcurrency") || argValue.starts_with("AvailabilityMacro=") || argValue.starts_with("RequiresObjC=")) { if (isEnableFeatureFlag) psuedoFeatures.push_back(argValue); continue; } if (isUpcomingFeatureFlag && argValue.compare("ApproachableConcurrency") == 0) { psuedoFeatures.push_back(argValue); continue; } // For all other features, the argument format is `<name>[:migrate]`. StringRef featureName; std::optional<StringRef> featureMode; std::tie(featureName, featureMode) = argValue.rsplit(':'); if (featureMode.value().empty()) { featureMode = std::nullopt; } auto feature = Feature::getUpcomingFeature(featureName); if (feature) { // Diagnose upcoming features enabled with -enable-experimental-feature. if (!isUpcomingFeatureFlag) Diags.diagnose(SourceLoc(), diag::feature_not_experimental, featureName, isEnableFeatureFlag); } else { // If -enable-upcoming-feature was used and an upcoming feature was not // found, diagnose and continue. if (isUpcomingFeatureFlag) { Diags.diagnose(SourceLoc(), diag::unrecognized_feature, featureName, /*upcoming=*/true); continue; } // If the feature is also not a recognized experimental feature, skip it. feature = Feature::getExperimentalFeature(featureName); if (!feature) { Diags.diagnose(SourceLoc(), diag::unrecognized_feature, featureName, /*upcoming=*/false); continue; } } // If the current language mode enables the feature by default then // diagnose and skip it. if (auto languageMode = feature->getLanguageMode()) { if (Opts.isLanguageModeAtLeast(languageMode.value())) { Diags.diagnose(SourceLoc(), diag::warning_upcoming_feature_on_by_default, feature->getName(), languageMode->versionString()); continue; } } // If this is a known experimental feature, allow it in +Asserts // (non-release) builds for testing purposes. if (Opts.RestrictNonProductionExperimentalFeatures && !feature->isAvailableInProduction()) { Diags.diagnose(SourceLoc(), diag::experimental_not_supported_in_production, featureName); HadError = true; continue; } if (featureMode) { if (isEnableFeatureFlag) { const auto isMigratable = feature->isMigratable(); // Diagnose an invalid mode. StringRef validModeName = "migrate"; if (*featureMode != validModeName) { Diags.diagnose(SourceLoc(), diag::invalid_feature_mode, *featureMode, featureName, /*didYouMean=*/validModeName, /*showDidYouMean=*/isMigratable); continue; } if (!isMigratable) { Diags.diagnose(SourceLoc(), diag::feature_does_not_support_migration_mode, featureName); continue; } } else { // `-disable-*-feature` flags do not support a mode specifier. Diags.diagnose(SourceLoc(), diag::cannot_disable_feature_with_mode, option.getPrefixedName(), argValue); continue; } } // Skip features that are already enabled or disabled. if (!seenFeatures.insert(*feature).second) continue; // "Embedded" enables "EmbeddedExistentials" per default except if // EmbeddedExistentials is explicitly disabled. // "Embedded" enables "EmbeddedExistentials" if we have not yet seen // explicit feature handling of "EmbeddedExistentials". // Because we can see "Embedded" before (parsing in reverse) we see explict // disabling of "EmbeddedExistentials" we delay default enablement so that a // later (when viewed in reverse as this loop's logic does) explicit // disablement can take place. if (*feature == Feature::Embedded && isEnableFeatureFlag && !seenFeatures.contains(Feature::EmbeddedExistentials)) shouldEnableEmbeddedExistentialsPerDefault = true; else if (*feature == Feature::EmbeddedExistentials && !isEnableFeatureFlag) shouldEnableEmbeddedExistentialsPerDefault = false; bool forMigration = featureMode.has_value(); // Enable the feature if requested. if (isEnableFeatureFlag) Opts.enableFeature(*feature, forMigration); // 'StandaloneSwiftAvailability' implies 'SwiftRuntimeAvailability' if (*feature == Feature::StandaloneSwiftAvailability) Opts.enableFeature(Feature::SwiftRuntimeAvailability, forMigration); } // Since pseudo-features don't have a boolean on/off state, process them in // the order they were specified on the command line. for (auto featureName = psuedoFeatures.rbegin(), end = psuedoFeatures.rend(); featureName != end; ++featureName) { // Allow StrictConcurrency to have a value that corresponds to the // -strict-concurrency=<blah> settings. if (featureName->starts_with("StrictConcurrency")) { auto decomposed = featureName->split("="); if (decomposed.first == "StrictConcurrency") { if (decomposed.second == "") { Opts.StrictConcurrencyLevel = StrictConcurrency::Complete; } else if (auto level = parseStrictConcurrency(decomposed.second)) { Opts.StrictConcurrencyLevel = *level; } } continue; } if (featureName->compare("ApproachableConcurrency") == 0) { Opts.enableFeature(Feature::DisableOutwardActorInference); Opts.enableFeature(Feature::GlobalActorIsolatedTypesUsability); Opts.enableFeature(Feature::InferIsolatedConformances); Opts.enableFeature(Feature::InferSendableFromCaptures); Opts.enableFeature(Feature::NonisolatedNonsendingByDefault); continue; } // Hack: In order to support using availability macros in SPM packages, we // need to be able to use: // .enableExperimentalFeature("AvailabilityMacro='...'") // within the package manifest and the feature recognizer can't recognize // this form of feature, so specially handle it here until features can // maybe have extra arguments in the future. if (featureName->starts_with("AvailabilityMacro=")) { auto availability = featureName->split("=").second; Opts.AvailabilityMacros.push_back(availability.str()); continue; } if (featureName->starts_with("RequiresObjC")) { auto modules = featureName->split("=").second; modules.split(Opts.ModulesRequiringObjC, ","); } } if (shouldEnableEmbeddedExistentialsPerDefault) { Opts.enableFeature(Feature::EmbeddedExistentials); } // Map historical flags over to experimental features. We do this for all // compilers because that's how existing experimental feature flags work. if (Args.hasArg(OPT_enable_experimental_static_assert)) Opts.enableFeature(Feature::StaticAssert); if (Args.hasArg(OPT_enable_experimental_named_opaque_types)) Opts.enableFeature(Feature::NamedOpaqueTypes); if (Args.hasArg(OPT_enable_experimental_flow_sensitive_concurrent_captures)) Opts.enableFeature(Feature::FlowSensitiveConcurrencyCaptures); if (Args.hasArg(OPT_enable_experimental_move_only)) { // FIXME: drop addition of Feature::MoveOnly once its queries are gone. Opts.enableFeature(Feature::MoveOnly); Opts.enableFeature(Feature::NoImplicitCopy); Opts.enableFeature(Feature::OldOwnershipOperatorSpellings); } if (Args.hasArg(OPT_enable_experimental_forward_mode_differentiation)) Opts.enableFeature(Feature::ForwardModeDifferentiation); if (Args.hasArg(OPT_enable_experimental_additive_arithmetic_derivation)) Opts.enableFeature(Feature::AdditiveArithmeticDerivedConformances); if (Args.hasArg(OPT_enable_experimental_opaque_type_erasure)) Opts.enableFeature(Feature::OpaqueTypeErasure); if (Args.hasArg(OPT_enable_builtin_module)) Opts.enableFeature(Feature::BuiltinModule); if (Args.hasArg(OPT_strict_memory_safety)) Opts.enableFeature(Feature::StrictMemorySafety); else if (Args.hasArg(OPT_strict_memory_safety_migrate)) Opts.enableFeature(Feature::StrictMemorySafety, /*forMigration=*/true); if (Args.hasArg(OPT_enable_library_evolution, OPT_enable_resilience)) Opts.enableFeature(Feature::LibraryEvolution); return HadError; } static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, ModuleInterfaceOptions &ModuleInterfaceOpts, const FrontendOptions &FrontendOpts) { using namespace options; bool buildingFromInterface = FrontendOptions::doesActionBuildModuleFromInterface( FrontendOpts.RequestedAction); bool HadError = false; if (auto A = Args.getLastArg(OPT_swift_version)) { auto vers = VersionParser::parseVersionString(A->getValue(), SourceLoc(), &Diags); bool isValid = false; if (vers.has_value()) { if (auto effectiveVers = vers.value().getEffectiveLanguageVersion()) { Opts.EffectiveLanguageVersion = effectiveVers.value(); isValid = true; } } if (!isValid) diagnoseSwiftVersion(vers, A, Args, Diags); } else if (FrontendOpts.InputsAndOutputs.hasModuleInterfaceOutputPath()) { Diags.diagnose({}, diag::error_module_interface_requires_language_mode) .limitBehavior(DiagnosticBehavior::Warning); // FIXME: Make this an error again (rdar://145168219) // HadError = true; } if (auto A = Args.getLastArg(OPT_min_swift_runtime_version)) { if (auto vers = VersionParser::parseVersionString(A->getValue(), SourceLoc(), &Diags)) { Opts.MinSwiftRuntimeVersion = *vers; } else { return true; } } if (auto A = Args.getLastArg(OPT_package_description_version)) { auto vers = VersionParser::parseVersionString(A->getValue(), SourceLoc(), &Diags); if (vers.has_value()) { Opts.PackageDescriptionVersion = vers.value(); } else { return true; } } Opts.AttachCommentsToDecls |= Args.hasArg(OPT_dump_api_path); Opts.UseMalloc |= Args.hasArg(OPT_use_malloc); Opts.EnableExperimentalConcurrency |= Args.hasArg(OPT_enable_experimental_concurrency); Opts.DisableExperimentalClangImporterDiagnostics |= Args.hasArg(OPT_disable_experimental_clang_importer_diagnostics); Opts.EnableExperimentalEagerClangModuleDiagnostics |= !Args.hasArg(OPT_disable_experimental_clang_importer_diagnostics) && Args.hasArg(OPT_enable_experimental_eager_clang_module_diagnostics); Opts.DisableNamedLazyImportAsMemberLoading |= Args.hasArg(OPT_disable_named_lazy_import_as_member_loading); Opts.DisableImplicitConcurrencyModuleImport |= Args.hasArg(OPT_disable_implicit_concurrency_module_import); Opts.DisableImplicitStringProcessingModuleImport |= Args.hasArg(OPT_disable_implicit_string_processing_module_import); Opts.DisableImplicitCxxModuleImport |= Args.hasArg(OPT_disable_implicit_cxx_module_import); if (Args.hasArg(OPT_enable_experimental_async_top_level)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-enable-experimental-async-top-level"); Opts.DiagnoseInvalidEphemeralnessAsError |= Args.hasArg(OPT_enable_invalid_ephemeralness_as_error); if (auto A = Args.getLastArg(OPT_enable_deserialization_recovery, OPT_disable_deserialization_recovery)) { Opts.EnableDeserializationRecovery = A->getOption().matches(OPT_enable_deserialization_recovery); } if (auto A = Args.getLastArg(OPT_enable_deserialization_safety, OPT_disable_deserialization_safety)) { Opts.EnableDeserializationSafety = A->getOption().matches(OPT_enable_deserialization_safety); } else if (auto A = Args.getLastArg(OPT_enable_access_control, OPT_disable_access_control)) { // Disable deserialization safety along with access control. Opts.EnableDeserializationSafety = A->getOption().matches(OPT_enable_access_control); } if (auto A = Args.getLastArg(OPT_enable_access_control, OPT_disable_access_control)) { Opts.EnableAccessControl = A->getOption().matches(OPT_enable_access_control); } Opts.EnableWorkaroundBrokenModules &= !Args.hasArg(OPT_disable_workaround_broken_modules); // Either the env var and the flag has to be set to enable package interface load Opts.EnablePackageInterfaceLoad = Args.hasArg(OPT_experimental_package_interface_load) || ::getenv("SWIFT_ENABLE_PACKAGE_INTERFACE_LOAD"); Opts.DisableAvailabilityChecking |= Args.hasArg(OPT_disable_availability_checking); if (Args.hasArg(OPT_check_api_availability_only)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-check-api-availability-only"); if (Args.hasArg(OPT_warn_on_potentially_unavailable_enum_case)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-warn-on-potentially-unavailable-enum-case"); if (const Arg *A = Args.getLastArg(OPT_unavailable_decl_optimization_EQ)) { auto value = llvm::StringSwitch<std::optional<UnavailableDeclOptimization>>( A->getValue()) .Case("none", UnavailableDeclOptimization::None) .Case("stub", UnavailableDeclOptimization::Stub) .Case("complete", UnavailableDeclOptimization::Complete) .Default(std::nullopt); if (value) Opts.UnavailableDeclOptimizationMode = *value; else Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } Opts.WeakLinkAtTarget |= Args.hasArg(OPT_weak_link_at_target); Opts.WarnOnEditorPlaceholder |= Args.hasArg(OPT_warn_on_editor_placeholder); auto setUnsignedIntegerArgument = [&Args, &Diags, &HadError](options::ID optionID, unsigned &valueToSet) { if (const Arg *A = Args.getLastArg(optionID)) { unsigned attempt; if (StringRef(A->getValue()).getAsInteger(/*radix*/ 10, attempt)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); HadError = true; } else { valueToSet = attempt; } } }; setUnsignedIntegerArgument(OPT_typo_correction_limit, Opts.TypoCorrectionLimit); if (Args.hasArg(OPT_disable_typo_correction)) Opts.TypoCorrectionLimit = 0; setUnsignedIntegerArgument(OPT_value_recursion_threshold, Opts.MaxCircularityDepth); if (auto A = Args.getLastArg(OPT_enable_target_os_checking, OPT_disable_target_os_checking)) { Opts.EnableTargetOSChecking = A->getOption().matches(OPT_enable_target_os_checking); } Opts.EnableNewOperatorLookup = Args.hasFlag(OPT_enable_new_operator_lookup, OPT_disable_new_operator_lookup, /*default*/ false); Opts.UseClangFunctionTypes |= Args.hasArg(OPT_use_clang_function_types); if (Args.hasArg(OPT_emit_fine_grained_dependency_sourcefile_dot_files)) Opts.EmitFineGrainedDependencySourcefileDotFiles = true; Opts.DebuggerSupport |= Args.hasArg(OPT_debugger_support); if (Opts.DebuggerSupport) Opts.EnableDollarIdentifiers = true; Opts.DebuggerTestingTransform = Args.hasArg(OPT_debugger_testing_transform); Opts.Playground |= Args.hasArg(OPT_playground); Opts.PlaygroundTransform |= Args.hasArg(OPT_playground); if (Args.hasArg(OPT_disable_playground_transform)) Opts.PlaygroundTransform = false; if (Args.hasArg(OPT_playground_high_performance)) { // Disable any playground options that are marked as not being enabled in // high performance mode. #define PLAYGROUND_OPTION(OptionName, Description, DefaultOn, HighPerfOn) \ if (!HighPerfOn) \ Opts.PlaygroundOptions.erase(PlaygroundOption::OptionName); #include "swift/Basic/PlaygroundOptions.def" } for (const Arg *A : Args.filtered(OPT_playground_option)) { // Enable the option (or disable if it has a "No" prefix). Any unknown // options are ignored. StringRef optionName = A->getValue(); const bool disableOption = optionName.consume_front("No"); if (auto option = getPlaygroundOption(optionName)) { if (disableOption) Opts.PlaygroundOptions.erase(*option); else Opts.PlaygroundOptions.insert(*option); } } // This can be enabled independently of the playground transform. Opts.PCMacro |= Args.hasArg(OPT_pc_macro); Opts.EnableThrowWithoutTry |= Args.hasArg(OPT_enable_throw_without_try); Opts.ThrowsAsTraps |= Args.hasArg(OPT_throws_as_traps); if (auto A = Args.getLastArg(OPT_enable_objc_attr_requires_foundation_module, OPT_disable_objc_attr_requires_foundation_module)) { Opts.EnableObjCAttrRequiresFoundation = A->getOption().matches(OPT_enable_objc_attr_requires_foundation_module); } if (auto A = Args.getLastArg(OPT_enable_testable_attr_requires_testable_module, OPT_disable_testable_attr_requires_testable_module)) { Opts.EnableTestableAttrRequiresTestableModule = A->getOption().matches(OPT_enable_testable_attr_requires_testable_module); } else if (buildingFromInterface) { Opts.EnableObjCAttrRequiresFoundation = false; } if (Args.getLastArg(OPT_debug_cycles)) Opts.DebugDumpCycles = true; for (const Arg *A : Args.filtered(OPT_define_availability)) { Opts.AvailabilityMacros.push_back(A->getValue()); } for (const Arg *A : Args.filtered(OPT_D)) { Opts.addCustomConditionalCompilationFlag(A->getValue()); } // Add a future feature if it is not already implied by the language version. auto addFutureFeatureIfNotImplied = [&](Feature feature) { // Check if this feature was introduced already in this language version. if (auto languageMode = feature.getLanguageMode()) { if (Opts.isLanguageModeAtLeast(languageMode.value())) return; } Opts.enableFeature(feature); }; // Map historical flags over to future features. if (Args.hasArg(OPT_enable_experimental_concise_pound_file)) addFutureFeatureIfNotImplied(Feature::ConciseMagicFile); if (Args.hasArg(OPT_enable_bare_slash_regex)) addFutureFeatureIfNotImplied(Feature::BareSlashRegexLiterals); // Experimental string processing. If explicitly enabled/disabled, use that. // Otherwise if bare slash regex literals were enabled, also enable string // processing. if (auto A = Args.getLastArg(OPT_enable_experimental_string_processing, OPT_disable_experimental_string_processing)) { Opts.EnableExperimentalStringProcessing = A->getOption().matches(OPT_enable_experimental_string_processing); // When experimental string processing is explicitly disabled, also disable // forward slash regex `/.../`. if (!Opts.EnableExperimentalStringProcessing) Opts.disableFeature(Feature::BareSlashRegexLiterals); } else if (Opts.hasFeature(Feature::BareSlashRegexLiterals)) { Opts.EnableExperimentalStringProcessing = true; } if (ParseEnabledFeatureArgs(Opts, Args, Diags, FrontendOpts)) HadError = true; // Do not allow both versions of SuppressedAssociatedTypes at the same time. // Pick the version with defaults if both are specified. if (Opts.hasFeature(SuppressedAssociatedTypes) && Opts.hasFeature(SuppressedAssociatedTypesWithDefaults)) { Opts.disableFeature(SuppressedAssociatedTypes); } Opts.EnableAppExtensionLibraryRestrictions |= Args.hasArg(OPT_enable_app_extension_library); Opts.EnableAppExtensionRestrictions |= Args.hasArg(OPT_enable_app_extension); Opts.EnableAppExtensionRestrictions |= Opts.EnableAppExtensionLibraryRestrictions; if (Args.hasArg(OPT_enable_swift3_objc_inference)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-enable-swift3-objc-inference"); if (Args.hasArg(OPT_disable_swift3_objc_inference)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-disable-swift3-objc-inference"); if (const Arg *A = Args.getLastArg(OPT_library_level)) { StringRef contents = A->getValue(); if (contents == "api") { Opts.LibraryLevel = LibraryLevel::API; } else if (contents == "spi") { Opts.LibraryLevel = LibraryLevel::SPI; } else if (contents == "ipi") { Opts.LibraryLevel = LibraryLevel::IPI; } else { Opts.LibraryLevel = LibraryLevel::Other; if (contents != "other") { // Error on unknown library levels. Diags.diagnose(SourceLoc(), diag::error_unknown_library_level, contents); } } } if (const Arg *A = Args.getLastArg(OPT_package_name)) { auto pkgName = A->getValue(); if (StringRef(pkgName).empty()) Diags.diagnose(SourceLoc(), diag::error_empty_package_name); else { Opts.PackageName = pkgName; // Unless the input type is public or private swift interface, do not // allow non package interface imports for dependencies in the same // package. Opts.AllowNonPackageInterfaceImportFromSamePackage = FrontendOpts.InputsAndOutputs .shouldTreatAsNonPackageModuleInterface(); } } if (const Arg *A = Args.getLastArg(OPT_require_explicit_availability_EQ)) { StringRef diagLevel = A->getValue(); if (diagLevel == "warn") { Opts.RequireExplicitAvailabilityBehavior = LangOptions::RequireExplicitAvailabilityDiagnosticBehavior::Warning; } else if (diagLevel == "error") { Opts.RequireExplicitAvailabilityBehavior = LangOptions::RequireExplicitAvailabilityDiagnosticBehavior::Error; } else if (diagLevel == "ignore") { Opts.RequireExplicitAvailabilityBehavior = LangOptions::RequireExplicitAvailabilityDiagnosticBehavior::Ignore; } else { Diags.diagnose(SourceLoc(), diag::error_unknown_require_explicit_availability, diagLevel); } } else if (Args.getLastArg(OPT_require_explicit_availability, OPT_require_explicit_availability_target) || Opts.LibraryLevel == LibraryLevel::API) { Opts.RequireExplicitAvailabilityBehavior = LangOptions::RequireExplicitAvailabilityDiagnosticBehavior::Warning; } if (const Arg *A = Args.getLastArg(OPT_require_explicit_availability_target)) { Opts.RequireExplicitAvailabilityTarget = A->getValue(); } Opts.EnableSPIOnlyImports = Args.hasArg(OPT_experimental_spi_only_imports); if (Args.hasArg(OPT_experimental_spi_imports)) { if (Opts.EffectiveLanguageVersion.isVersionAtLeast(6)) { Diags.diagnose(SourceLoc(), diag::flag_unsuppored, "-experimental-spi-imports"); HadError = true; } else { Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-experimental-spi-imports"); } } if (Args.hasArg(OPT_warn_swift3_objc_inference_minimal)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-warn-swift3-objc-inference-minimal"); if (Args.hasArg(OPT_warn_swift3_objc_inference_complete)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-warn-swift3-objc-inference-complete"); // Swift 6+ uses the strictest concurrency level. if (Opts.hasFeature(Feature::StrictConcurrency)) { Opts.StrictConcurrencyLevel = StrictConcurrency::Complete; } else if (const Arg *A = Args.getLastArg(OPT_strict_concurrency)) { if (auto value = parseStrictConcurrency(A->getValue())) Opts.StrictConcurrencyLevel = *value; else Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } else if (Args.hasArg(OPT_warn_concurrency)) { Opts.StrictConcurrencyLevel = StrictConcurrency::Complete; } else { // Default to minimal checking in Swift 5.x. } // Make sure StrictConcurrency, StrictConcurrency=complete and // -strict-concurrency=complete all mean the same thing. // // The compiler implementation should standardize on StrictConcurrencyLevel, // but if there is any check for `Feature::StrictConcurrency`, the result // should be the same regardless of which flag was used to enable it. if (Opts.StrictConcurrencyLevel == StrictConcurrency::Complete) { Opts.enableFeature(Feature::StrictConcurrency); } // StrictConcurrency::Complete enables all data-race safety features. if (Opts.StrictConcurrencyLevel == StrictConcurrency::Complete) { Opts.enableFeature(Feature::IsolatedDefaultValues); Opts.enableFeature(Feature::GlobalConcurrency); Opts.enableFeature(Feature::RegionBasedIsolation); } Opts.WarnImplicitOverrides = Args.hasArg(OPT_warn_implicit_overrides); Opts.WarnSoftDeprecated = Args.hasArg(OPT_warn_soft_deprecated); Opts.EnableNSKeyedArchiverDiagnostics = Args.hasFlag(OPT_enable_nskeyedarchiver_diagnostics, OPT_disable_nskeyedarchiver_diagnostics, Opts.EnableNSKeyedArchiverDiagnostics); if (Args.hasFlag(OPT_enable_nonfrozen_enum_exhaustivity_diagnostics, OPT_disable_nonfrozen_enum_exhaustivity_diagnostics, Opts.isLanguageModeAtLeast(LanguageMode::v5))) { Opts.enableFeature(Feature::NonfrozenEnumExhaustivity); } if (Arg *A = Args.getLastArg(OPT_Rpass_EQ)) Opts.OptimizationRemarkPassedPattern = generateOptimizationRemarkRegex(Diags, Args, A); if (Arg *A = Args.getLastArg(OPT_Rpass_missed_EQ)) Opts.OptimizationRemarkMissedPattern = generateOptimizationRemarkRegex(Diags, Args, A); if (const Arg *A = Args.getLastArg(OPT_access_notes_path)) Opts.AccessNotesPath = A->getValue(); if (Arg *A = Args.getLastArg(OPT_Raccess_note)) { auto value = llvm::StringSwitch<std::optional<AccessNoteDiagnosticBehavior>>( A->getValue()) .Case("none", AccessNoteDiagnosticBehavior::Ignore) .Case("failures", AccessNoteDiagnosticBehavior::RemarkOnFailure) .Case("all", AccessNoteDiagnosticBehavior::RemarkOnFailureOrSuccess) .Case("all-validate", AccessNoteDiagnosticBehavior::ErrorOnFailureRemarkOnSuccess) .Default(std::nullopt); if (value) Opts.AccessNoteBehavior = *value; else Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } Opts.EnableCrossImportOverlays = Args.hasFlag(OPT_enable_cross_import_overlays, OPT_disable_cross_import_overlays, Opts.EnableCrossImportOverlays); Opts.EnableCrossImportRemarks = Args.hasArg(OPT_emit_cross_import_remarks); Opts.EnableModuleLoadingRemarks = Args.hasArg(OPT_remark_loading_module); Opts.EnableModuleRecoveryRemarks = Args.hasArg(OPT_remark_module_recovery); Opts.EnableModuleSerializationRemarks = Args.hasArg(OPT_remark_module_serialization); Opts.EnableModuleApiImportRemarks = Args.hasArg(OPT_remark_module_api_import); Opts.EnableMacroLoadingRemarks = Args.hasArg(OPT_remark_macro_loading); Opts.EnableIndexingSystemModuleRemarks = Args.hasArg(OPT_remark_indexing_system_module); if (Args.hasArg(OPT_experimental_skip_non_exportable_decls)) { // Only allow -experimental-skip-non-exportable-decls if either library // evolution is enabled (in which case the module's ABI is independent of // internal declarations) or when -experimental-skip-all-function-bodies is // present. The latter implies the module will not be used for code // generation, so omitting details needed for ABI should be safe. if (Args.hasArg(OPT_enable_library_evolution) || Args.hasArg(OPT_experimental_skip_all_function_bodies)) { Opts.SkipNonExportableDecls |= true; } else { Diags.diagnose(SourceLoc(), diag::ignoring_option_requires_option, "-experimental-skip-non-exportable-decls", "-enable-library-evolution"); } } Opts.AbortOnDeserializationFailForPackageCMO = Args.hasArg(OPT_ExperimentalPackageCMOAbortOnDeserializationFail); Opts.AllowNonResilientAccess = Args.hasArg(OPT_experimental_allow_non_resilient_access) || Args.hasArg(OPT_allow_non_resilient_access) || Opts.hasFeature(Feature::AllowNonResilientAccessInPackage); if (Opts.AllowNonResilientAccess) { // Override the option to skip non-exportable decls. if (Opts.SkipNonExportableDecls) { Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-experimental-skip-non-exportable-decls", "-allow-non-resilient-access"); Opts.SkipNonExportableDecls = false; } // If built from interface, non-resilient access should not be allowed. if (Opts.AllowNonResilientAccess && FrontendOptions::doesActionBuildModuleFromInterface( FrontendOpts.RequestedAction)) { if (FrontendOpts.RequestedAction != FrontendOptions::ActionType::TypecheckModuleFromInterface) Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-allow-non-resilient-access", "-compile-module-from-interface"); Opts.AllowNonResilientAccess = false; } } // HACK: The driver currently erroneously passes all flags to module interface // verification jobs. -experimental-skip-non-exportable-decls is not // appropriate for verification tasks and should be ignored, though. if (FrontendOpts.RequestedAction == FrontendOptions::ActionType::TypecheckModuleFromInterface) Opts.SkipNonExportableDecls = false; llvm::Triple Target = Opts.Target; StringRef TargetArg; std::string TargetArgScratch; if (const Arg *A = Args.getLastArg(OPT_target)) { Target = llvm::Triple(A->getValue()); TargetArg = A->getValue(); const bool targetNeedsRemapping = Target.isXROS(); if (targetNeedsRemapping && Target.getOSMajorVersion() == 0) { // FIXME(xrOS): Work around an LLVM-ism until we have something // akin to Target::get*Version for this platform. The Clang driver // also has to pull version numbers up to 1.0.0 when a triple for an // unknown platform with no explicit version number is passed. if (Target.getEnvironmentName().empty()) { Target = llvm::Triple(Target.getArchName(), Target.getVendorName(), Target.getOSName() + "1.0"); } else { Target = llvm::Triple(Target.getArchName(), Target.getVendorName(), Target.getOSName() + "1.0", Target.getEnvironmentName()); } } // Backward compatibility hack: infer "simulator" environment for x86 // iOS/tvOS/watchOS. The driver takes care of this for the frontend // most of the time, but loading of old .swiftinterface files goes // directly to the frontend. if (tripleInfersSimulatorEnvironment(Target)) { // Set the simulator environment. Target.setEnvironment(llvm::Triple::EnvironmentType::Simulator); TargetArgScratch = Target.str(); TargetArg = TargetArgScratch; } } if (const Arg *A = Args.getLastArg(OPT_target_variant)) { Opts.TargetVariant = llvm::Triple(A->getValue()); } // Collect -clang-target value if specified in the front-end invocation. // Usually, the driver will pass down a clang target with the // exactly same value as the main target, so we could diagnose the usage of // unavailable APIs. // The reason we cannot infer clang target from -target is that not all // front-end invocation will include a -target to start with. For instance, // when compiling a Swift module from a textual interface, -target isn't // necessary because the textual interface hardcoded the proper target triple // to use. Inferring -clang-target there will always give us the default // target triple. if (const Arg *A = Args.getLastArg(OPT_clang_target)) Opts.ClangTarget = llvm::Triple(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_clang_target_variant)) Opts.ClangTargetVariant = llvm::Triple(A->getValue()); Opts.setCxxInteropFromArgs(Args, Diags, FrontendOpts); if (!Args.hasArg(options::OPT_formal_cxx_interoperability_mode)) ModuleInterfaceOpts.PublicFlags.IgnorableFlags += " " + printFormalCxxInteropVersion(Opts); Opts.UseStaticStandardLibrary = Args.hasArg(OPT_use_static_resource_dir); Opts.EnableObjCInterop = Args.hasFlag(OPT_enable_objc_interop, OPT_disable_objc_interop, Target.isOSDarwin() && !Opts.hasFeature(Feature::Embedded)); if (Args.hasArg(OPT_experimental_c_foreign_reference_types)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-experimental-c-foreign-reference-types"); Opts.CxxInteropGettersSettersAsProperties = Args.hasArg(OPT_cxx_interop_getters_setters_as_properties); Opts.RequireCxxInteropToImportCxxInteropModule = !Args.hasArg(OPT_cxx_interop_disable_requirement_at_import); Opts.CxxInteropUseOpaquePointerForMoveOnly = Args.hasArg(OPT_cxx_interop_use_opaque_pointer_for_moveonly); Opts.VerifyAllSubstitutionMaps |= Args.hasArg(OPT_verify_all_substitution_maps); Opts.EnableVolatileModules |= Args.hasArg(OPT_enable_volatile_modules); Opts.HermeticSealAtLink |= Args.hasArg(OPT_experimental_hermetic_seal_at_link); Opts.UseDarwinPreStableABIBit = (Target.isMacOSX() && Target.isMacOSXVersionLT(10, 14, 4)) || (Target.isiOS() && Target.isOSVersionLT(12, 2)) || (Target.isTvOS() && Target.isOSVersionLT(12, 2)) || (Target.isWatchOS() && Target.isOSVersionLT(5, 2)); // Must be processed after any other language options that could affect // platform conditions. bool UnsupportedOS, UnsupportedArch; std::tie(UnsupportedOS, UnsupportedArch) = Opts.setTarget(Target); SmallVector<StringRef, 3> TargetComponents; TargetArg.split(TargetComponents, "-"); if (UnsupportedArch) { auto TargetArgArch = TargetComponents.size() ? TargetComponents[0] : ""; Diags.diagnose(SourceLoc(), diag::error_unsupported_target_arch, TargetArgArch); } if (UnsupportedOS) { auto TargetArgOS = TargetComponents.size() > 2 ? TargetComponents[2] : ""; Diags.diagnose(SourceLoc(), diag::error_unsupported_target_os, TargetArgOS); } // First, set up default minimum inlining target versions. auto getDefaultMinimumInliningTargetVersion = [&](const llvm::Triple &triple) -> llvm::VersionTuple { const auto targetVersion = getVersionForTriple(triple); // In API modules, default to the version when Swift first became available. if (Opts.LibraryLevel == LibraryLevel::API) { if (auto minVersion = minimumAvailableOSVersionForTriple(triple)) return *minVersion; } // In other modules, assume that availability is used less consistently // and that library clients will generally raise deployment targets as the // library evolves so the min inlining version should be the deployment // target by default. return targetVersion; }; Opts.MinimumInliningTargetVersion = getDefaultMinimumInliningTargetVersion(Opts.Target); // Parse OS version number arguments. auto parseVersionArg = [&](OptSpecifier opt) -> std::optional<llvm::VersionTuple> { Arg *A = Args.getLastArg(opt); if (!A) return std::nullopt; if (StringRef(A->getValue()) == "min") return minimumAvailableOSVersionForTriple(Opts.Target); if (StringRef(A->getValue()) == "target") return Opts.getMinPlatformVersion(); if (auto vers = VersionParser::parseVersionString(A->getValue(), SourceLoc(), &Diags)) return (llvm::VersionTuple)*vers; Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return std::nullopt; }; if (auto vers = parseVersionArg(OPT_min_inlining_target_version)) // FIXME: Should we diagnose if it's below the default? Opts.MinimumInliningTargetVersion = *vers; if (auto vers = parseVersionArg(OPT_min_runtime_version)) Opts.RuntimeVersion = version::Version(*vers); if (auto vers = parseVersionArg(OPT_target_sdk_version)) Opts.SDKVersion = *vers; if (auto vers = parseVersionArg(OPT_target_variant_sdk_version)) Opts.VariantSDKVersion = *vers; // Get the SDK name. if (Arg *A = Args.getLastArg(options::OPT_target_sdk_name)) { Opts.SDKName = A->getValue(); } if (const Arg *A = Args.getLastArg(OPT_entry_point_function_name)) { Opts.entryPointFunctionName = A->getValue(); } // Configure lexing to parse and remember comments if: // - Emitting a swiftdoc/swiftsourceinfo // - Performing index-while-building // - Emitting a symbol graph file // If we are asked to emit a module documentation file, configure lexing and // parsing to remember comments. if (FrontendOpts.InputsAndOutputs.hasModuleDocOutputPath() || FrontendOpts.InputsAndOutputs.hasModuleSourceInfoOutputPath() || !FrontendOpts.IndexStorePath.empty() || FrontendOpts.EmitSymbolGraph) { Opts.AttachCommentsToDecls = true; } // If we're parsing SIL, access control doesn't make sense to enforce. if (Args.hasArg(OPT_parse_sil) || FrontendOpts.InputsAndOutputs.shouldTreatAsSIL()) { Opts.EnableAccessControl = false; Opts.DisableAvailabilityChecking = true; } if (FrontendOpts.AllowModuleWithCompilerErrors) { Opts.AllowModuleWithCompilerErrors = true; } if (auto A = Args.getLastArg(OPT_enable_ast_verifier, OPT_disable_ast_verifier)) { using ASTVerifierOverrideKind = LangOptions::ASTVerifierOverrideKind; if (A->getOption().matches(OPT_enable_ast_verifier)) { Opts.ASTVerifierOverride = ASTVerifierOverrideKind::EnableVerifier; } else if (A->getOption().matches(OPT_disable_ast_verifier)) { Opts.ASTVerifierOverride = ASTVerifierOverrideKind::DisableVerifier; } else { // This is an assert since getLastArg should not have let us get here if // we did not have one of enable/disable specified. llvm_unreachable( "Should have found one of enable/disable ast verifier?!"); } } Opts.DisableSubstSILFunctionTypes = Args.hasArg(OPT_disable_subst_sil_function_types); Opts.AnalyzeRequestEvaluator = Args.hasArg( OPT_analyze_request_evaluator); Opts.DumpRequirementMachine = Args.hasArg( OPT_dump_requirement_machine); Opts.AnalyzeRequirementMachine = Args.hasArg( OPT_analyze_requirement_machine); Opts.DumpMacroExpansions = Args.hasArg( OPT_dump_macro_expansions); Opts.RemarkMacroExpansions = Args.hasArg( OPT_expansion_remarks); Opts.DumpSourceFileImports = Args.hasArg( OPT_dump_source_file_imports); if (const Arg *A = Args.getLastArg(OPT_debug_requirement_machine)) Opts.DebugRequirementMachine = A->getValue(); setUnsignedIntegerArgument(OPT_requirement_machine_max_rule_count, Opts.RequirementMachineMaxRuleCount); setUnsignedIntegerArgument(OPT_requirement_machine_max_rule_length, Opts.RequirementMachineMaxRuleLength); setUnsignedIntegerArgument(OPT_requirement_machine_max_concrete_nesting, Opts.RequirementMachineMaxConcreteNesting); setUnsignedIntegerArgument(OPT_requirement_machine_max_concrete_size, Opts.RequirementMachineMaxConcreteSize); setUnsignedIntegerArgument(OPT_requirement_machine_max_type_differences, Opts.RequirementMachineMaxTypeDifferences); setUnsignedIntegerArgument(OPT_requirement_machine_max_split_concrete_equiv_class_attempts, Opts.RequirementMachineMaxSplitConcreteEquivClassAttempts); if (Args.hasArg(OPT_disable_requirement_machine_concrete_contraction)) Opts.EnableRequirementMachineConcreteContraction = false; if (Args.hasArg(OPT_disable_requirement_machine_loop_normalization)) Opts.EnableRequirementMachineLoopNormalization = false; if (Args.hasArg(OPT_disable_requirement_machine_reuse)) Opts.EnableRequirementMachineReuse = false; if (Args.hasArg(OPT_enable_requirement_machine_opaque_archetypes)) Opts.EnableRequirementMachineOpaqueArchetypes = true; setUnsignedIntegerArgument(OPT_max_substitution_depth, Opts.MaxSubstitutionDepth); setUnsignedIntegerArgument(OPT_max_substitution_count, Opts.MaxSubstitutionCount); if (Args.hasArg(OPT_enable_experimental_lifetime_dependence_inference)) Opts.EnableExperimentalLifetimeDependenceInference = true; if (Args.hasArg(OPT_disable_experimental_lifetime_dependence_inference)) Opts.EnableExperimentalLifetimeDependenceInference = false; Opts.DumpTypeWitnessSystems = Args.hasArg(OPT_dump_type_witness_systems); for (auto &block: FrontendOpts.BlocklistConfigFilePaths) Opts.BlocklistConfigFilePaths.push_back(block); if (const Arg *A = Args.getLastArg(options::OPT_concurrency_model)) { Opts.ActiveConcurrencyModel = llvm::StringSwitch<ConcurrencyModel>(A->getValue()) .Case("standard", ConcurrencyModel::Standard) .Case("task-to-thread", ConcurrencyModel::TaskToThread) .Default(ConcurrencyModel::Standard); } Opts.BypassResilienceChecks |= Args.hasArg(OPT_bypass_resilience); if (Opts.hasFeature(Feature::EmbeddedExistentials) && !Opts.hasFeature(Feature::Embedded)) { Diags.diagnose(SourceLoc(), diag::embedded_existentials_without_embedded); HadError = true; } if (Opts.hasFeature(Feature::Embedded)) { Opts.UnavailableDeclOptimizationMode = UnavailableDeclOptimization::Complete; Opts.DisableImplicitStringProcessingModuleImport = true; Opts.DisableImplicitConcurrencyModuleImport = true; if (!swiftModulesInitialized()) { Diags.diagnose(SourceLoc(), diag::no_swift_sources_with_embedded); HadError = true; } if (Opts.hasFeature(Feature::LibraryEvolution)) { Diags.diagnose(SourceLoc(), diag::evolution_with_embedded); HadError = true; } if (!FrontendOpts.InputsAndOutputs.isWholeModule() && FrontendOptions::doesActionGenerateSIL(FrontendOpts.RequestedAction)) { Diags.diagnose(SourceLoc(), diag::wmo_with_embedded); HadError = true; } if (Opts.EnableObjCInterop) { Diags.diagnose(SourceLoc(), diag::objc_with_embedded); HadError = true; } } if (auto A = Args.getLastArg(OPT_checked_async_objc_bridging)) { auto value = llvm::StringSwitch<std::optional<bool>>(A->getValue()) .Case("off", false) .Case("on", true) .Default(std::nullopt); if (value) { Opts.UseCheckedAsyncObjCBridging = *value; } else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); HadError = true; } } else if (Opts.isLanguageModeAtLeast(LanguageMode::v6)) { Opts.UseCheckedAsyncObjCBridging = true; } Opts.DisableDynamicActorIsolation |= Args.hasArg(OPT_disable_dynamic_actor_isolation); if (const Arg *A = Args.getLastArg(options::OPT_default_isolation)) { auto behavior = llvm::StringSwitch<std::optional<DefaultIsolation>>(A->getValue()) .Case("MainActor", DefaultIsolation::MainActor) .Case("nonisolated", DefaultIsolation::Nonisolated) .Default(std::nullopt); if (behavior) { Opts.DefaultIsolationBehavior = *behavior; } else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); HadError = true; } } else { Opts.DefaultIsolationBehavior = DefaultIsolation::Nonisolated; } if (Opts.DefaultIsolationBehavior == DefaultIsolation::MainActor) { Opts.enableFeature(Feature::InferIsolatedConformances); Opts.enableFeature(Feature::NoExplicitNonIsolated); } if (FrontendOpts.ImportHeaderAsInternal) Opts.enableFeature(Feature::CheckImplementationOnly); if (Opts.hasFeature(Feature::CheckImplementationOnlyStrict) && !::getenv("SWIFT_DISABLE_IMPLICIT_CHECK_IMPLEMENTATION_ONLY")) Opts.enableFeature(Feature::CheckImplementationOnly); #if !defined(NDEBUG) && SWIFT_ENABLE_EXPERIMENTAL_PARSER_VALIDATION /// Enable round trip parsing via the new swift parser unless it is disabled /// explicitly. The new Swift parser can have mismatches with C++ parser - /// rdar://118013482 Use this flag to disable round trip through the new /// Swift parser for such cases. if (!Args.hasArg(OPT_disable_experimental_parser_round_trip)) { Opts.enableFeature(Feature::ParserRoundTrip); Opts.enableFeature(Feature::ParserValidation); } #endif if (Args.hasArg(OPT_disable_safe_interop_wrappers)) Opts.DisableSafeInteropWrappers = true; return HadError || UnsupportedOS || UnsupportedArch; } static bool ParseTypeCheckerArgs(TypeCheckerOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, const LangOptions &LangOpts, const FrontendOptions &FrontendOpts) { using namespace options; bool HadError = false; auto setUnsignedIntegerArgument = [&Args, &Diags, &HadError](options::ID optionID, unsigned &valueToSet) { if (const Arg *A = Args.getLastArg(optionID)) { unsigned attempt; if (StringRef(A->getValue()).getAsInteger(/*radix*/ 10, attempt)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); HadError = true; } else { valueToSet = attempt; } } }; setUnsignedIntegerArgument(OPT_warn_long_function_bodies, Opts.WarnLongFunctionBodies); setUnsignedIntegerArgument(OPT_warn_long_expression_type_checking, Opts.WarnLongExpressionTypeChecking); setUnsignedIntegerArgument(OPT_solver_expression_time_threshold_EQ, Opts.ExpressionTimeoutThreshold); setUnsignedIntegerArgument(OPT_dynamic_member_lookup_depth_limit_EQ, Opts.DynamicMemberLookupDepthLimit); setUnsignedIntegerArgument(OPT_switch_checking_invocation_threshold_EQ, Opts.SwitchCheckingInvocationThreshold); setUnsignedIntegerArgument(OPT_debug_constraints_attempt, Opts.DebugConstraintSolverAttempt); setUnsignedIntegerArgument(OPT_solver_memory_threshold_EQ, Opts.SolverMemoryThreshold); setUnsignedIntegerArgument(OPT_solver_scope_threshold_EQ, Opts.SolverScopeThreshold); setUnsignedIntegerArgument(OPT_solver_trail_threshold_EQ, Opts.SolverTrailThreshold); setUnsignedIntegerArgument(OPT_solver_shuffle_disjunctions_EQ, Opts.ShuffleDisjunctionSeed); setUnsignedIntegerArgument(OPT_solver_shuffle_choices_EQ, Opts.ShuffleDisjunctionChoicesSeed); Opts.DebugTimeFunctionBodies |= Args.hasArg(OPT_debug_time_function_bodies); Opts.DebugTimeExpressions |= Args.hasArg(OPT_debug_time_expression_type_checking); // Check for SkipFunctionBodies arguments in order from skipping less to // skipping more. if (Args.hasArg( OPT_experimental_skip_non_inlinable_function_bodies_without_types)) { if (LangOpts.AllowNonResilientAccess) Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-experimental-skip-non-inlinable-function-bodies-without-types", "-allow-non-resilient-access"); else Opts.SkipFunctionBodies = FunctionBodySkipping::NonInlinableWithoutTypes; } // If asked to perform InstallAPI, go ahead and enable non-inlinable function // body skipping. if (Args.hasArg(OPT_experimental_skip_non_inlinable_function_bodies)) { if (LangOpts.AllowNonResilientAccess) Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-experimental-skip-non-inlinable-function-bodies", "-allow-non-resilient-access"); else Opts.SkipFunctionBodies = FunctionBodySkipping::NonInlinable; } if (Args.hasArg(OPT_tbd_is_installapi)) { if (LangOpts.AllowNonResilientAccess) Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-tbd-is-installapi", "-allow-non-resilient-access"); else Opts.SkipFunctionBodies = FunctionBodySkipping::NonInlinable; } if (Args.hasArg(OPT_experimental_skip_all_function_bodies)) { if (LangOpts.AllowNonResilientAccess) Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-experimental-skip-all-function-bodies", "-allow-non-resilient-access"); else Opts.SkipFunctionBodies = FunctionBodySkipping::All; } if (Opts.SkipFunctionBodies != FunctionBodySkipping::None && FrontendOpts.ModuleName == SWIFT_ONONE_SUPPORT) { // Disable these optimizations if we're compiling SwiftOnoneSupport, // because we _definitely_ need to look inside every declaration to figure // out what gets prespecialized. Opts.SkipFunctionBodies = FunctionBodySkipping::None; Diags.diagnose( SourceLoc(), diag::module_incompatible_with_skip_function_bodies, SWIFT_ONONE_SUPPORT); } Opts.PrintFullConvention |= Args.hasArg(OPT_experimental_print_full_convention); Opts.DebugConstraintSolver |= Args.hasArg(OPT_debug_constraints); for (const Arg *A : Args.filtered(OPT_debug_constraints_on_line)) { unsigned line; if (StringRef(A->getValue()).getAsInteger(/*radix*/ 10, line)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); HadError = true; } else { Opts.DebugConstraintSolverOnLines.push_back(line); } } llvm::sort(Opts.DebugConstraintSolverOnLines); for (auto A : Args.getAllArgValues(OPT_debug_forbid_typecheck_prefix)) { Opts.DebugForbidTypecheckPrefixes.push_back(A); } if (Args.getLastArg(OPT_solver_disable_splitter)) Opts.SolverDisableSplitter = true; if (Args.hasArg(OPT_solver_enable_prepared_overloads) || Args.hasArg(OPT_solver_disable_prepared_overloads)) Opts.SolverEnablePreparedOverloads = Args.hasArg(OPT_solver_enable_prepared_overloads); if (Args.hasArg(OPT_solver_enable_prune_disjunctions) || Args.hasArg(OPT_solver_disable_prune_disjunctions)) Opts.SolverPruneDisjunctions = Args.hasArg(OPT_solver_enable_prune_disjunctions); if (Args.hasArg(OPT_solver_enable_optimize_operator_defaults) || Args.hasArg(OPT_solver_disable_optimize_operator_defaults)) Opts.SolverOptimizeOperatorDefaults = Args.hasArg(OPT_solver_enable_optimize_operator_defaults); if (Args.hasArg(OPT_solver_enable_performance_hacks) || Args.hasArg(OPT_solver_disable_performance_hacks)) Opts.SolverEnablePerformanceHacks = Args.hasArg(OPT_solver_enable_performance_hacks); if (FrontendOpts.RequestedAction == FrontendOptions::ActionType::Immediate) Opts.DeferToRuntime = true; Opts.DebugGenericSignatures |= Args.hasArg(OPT_debug_generic_signatures); Opts.DebugInverseRequirements |= Args.hasArg(OPT_debug_inverse_requirements); if (Args.hasArg(OPT_experimental_lazy_typecheck)) { // Same restrictions as -experimental-skip-non-exportable-decls. These // could be relaxed in the future, since lazy typechecking is probably not // inherently unsafe without these options. if (Args.hasArg(OPT_enable_library_evolution) || Args.hasArg(OPT_experimental_skip_all_function_bodies)) { Opts.EnableLazyTypecheck |= Args.hasArg(OPT_experimental_lazy_typecheck); } else { Diags.diagnose(SourceLoc(), diag::ignoring_option_requires_option, "-experimental-lazy-typecheck", "-enable-library-evolution"); } } if (LangOpts.AllowNonResilientAccess && Opts.EnableLazyTypecheck) { Diags.diagnose(SourceLoc(), diag::warn_ignore_option_overridden_by, "-experimental-lazy-typecheck", "-allow-non-resilient-access"); Opts.EnableLazyTypecheck = false; } // HACK: The driver currently erroneously passes all flags to module interface // verification jobs. -experimental-skip-non-exportable-decls is not // appropriate for verification tasks and should be ignored, though. if (FrontendOpts.RequestedAction == FrontendOptions::ActionType::TypecheckModuleFromInterface) Opts.EnableLazyTypecheck = false; return HadError; } static bool ValidateModulesOnceOptions(const ClangImporterOptions &Opts, DiagnosticEngine &Diags) { if (Opts.ValidateModulesOnce && Opts.BuildSessionFilePath.empty()) { Diags.diagnose(SourceLoc(), diag::error_clang_validate_once_requires_session_file); return true; } return false; } static bool ParseClangImporterArgs(ClangImporterOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, StringRef workingDirectory, const LangOptions &LangOpts, const FrontendOptions &FrontendOpts, const CASOptions &CASOpts) { using namespace options; if (const Arg *a = Args.getLastArg(OPT_tools_directory)) { // If a custom tools directory is specified, try to find Clang there. // This is useful when the Swift executable is located in a different // directory than the Clang/LLVM executables, for example, when building // the Swift project itself. llvm::SmallString<128> clangPath(a->getValue()); llvm::sys::path::append(clangPath, "clang"); if (llvm::sys::fs::exists(clangPath)) { Opts.clangPath = std::string(clangPath); } } if (const Arg *A = Args.getLastArg(OPT_module_cache_path)) { Opts.ModuleCachePath = A->getValue(); } if (const Arg *A = Args.getLastArg(OPT_clang_scanner_module_cache_path)) { Opts.ClangScannerModuleCachePath = A->getValue(); } if (const Arg *A = Args.getLastArg(OPT_target_cpu)) Opts.TargetCPU = A->getValue(); if (const Arg *A = Args.getLastArg(OPT_index_store_path)) Opts.IndexStorePath = A->getValue(); for (const Arg *A : Args.filtered(OPT_Xcc)) { StringRef clangArg = A->getValue(); if (clangArg.consume_front("-working-directory")) { if (!clangArg.empty() && clangArg.front() != '=') { // Have an old -working-directory<path> argument. Convert it into // two separate arguments as Clang no longer supports that format. Opts.ExtraArgs.push_back("-working-directory"); Opts.ExtraArgs.push_back(clangArg.str()); continue; } } Opts.ExtraArgs.push_back(A->getValue()); } Opts.DumpClangDiagnostics |= Args.hasArg(OPT_dump_clang_diagnostics); // When the repl is invoked directly (ie. `lldb --repl="..."`) the action // type seems to be NoneAction. if (FrontendOpts.RequestedAction != FrontendOptions::ActionType::REPL && FrontendOpts.RequestedAction != FrontendOptions::ActionType::NoneAction && LangOpts.hasFeature(Feature::ImportObjcForwardDeclarations)) { Opts.ImportForwardDeclarations = true; } if (Args.hasArg(OPT_embed_bitcode)) Opts.Mode = ClangImporterOptions::Modes::EmbedBitcode; else if (Args.hasArg(OPT_emit_pcm) || Args.hasArg(OPT_dump_pcm)) Opts.Mode = ClangImporterOptions::Modes::PrecompiledModule; bool hadNormalBridgingHeader = false; if (auto *A = Args.getLastArg(OPT_import_bridging_header, OPT_internal_import_bridging_header)) { Opts.BridgingHeader = A->getValue(); Opts.BridgingHeaderIsInternal = A->getOption().getID() == OPT_internal_import_bridging_header; } if (auto *A = Args.getLastArg(OPT_import_pch, OPT_internal_import_pch)) { Opts.BridgingHeaderPCH = A->getValue(); bool importAsInternal = A->getOption().getID() == OPT_internal_import_pch; if (hadNormalBridgingHeader && importAsInternal != Opts.BridgingHeaderIsInternal) { Diags.diagnose(SourceLoc(), diag::bridging_header_and_pch_internal_mismatch); } Opts.BridgingHeaderIsInternal = importAsInternal; } Opts.DisableSwiftBridgeAttr |= Args.hasArg(OPT_disable_swift_bridge_attr); Opts.DisableOverlayModules |= Args.hasArg(OPT_emit_imported_modules); if (Args.hasArg(OPT_disable_clang_spi)) { Opts.EnableClangSPI = false; } Opts.DirectClangCC1ModuleBuild |= Args.hasArg(OPT_direct_clang_cc1_module_build); if (const Arg *A = Args.getLastArg(OPT_pch_output_dir)) { Opts.PrecompiledHeaderOutputDir = A->getValue(); Opts.PCHDisableValidation |= Args.hasArg(OPT_pch_disable_validation); } Opts.LoadVersionIndependentAPINotes |= Args.hasArg(OPT_version_independent_apinotes); if (FrontendOpts.DisableImplicitModules) Opts.DisableImplicitClangModules = true; Opts.ValidateModulesOnce |= Args.hasArg(OPT_validate_clang_modules_once); if (auto *A = Args.getLastArg(OPT_clang_build_session_file)) Opts.BuildSessionFilePath = A->getValue(); if (ValidateModulesOnceOptions(Opts, Diags)) return true; if (Args.hasFlag(options::OPT_warnings_as_errors, options::OPT_no_warnings_as_errors, false)) Opts.ExtraArgs.push_back("-Werror"); Opts.DebuggerSupport |= Args.hasArg(OPT_debugger_support); Opts.DisableSourceImport |= Args.hasArg(OPT_disable_clangimporter_source_import); if (Args.hasArg(OPT_disable_const_value_importing)) Opts.EnableConstValueImporting = false; Opts.ClangImporterDirectCC1Scan |= Args.hasArg(OPT_experimental_clang_importer_direct_cc1_scan); // Forward the FrontendOptions to clang importer option so it can be // accessed when creating clang module compilation invocation. if (CASOpts.EnableCaching) { // Caching requires direct clang import cc1 scanning. Opts.ClangImporterDirectCC1Scan = true; } // If in direct clang cc1 module build mode, return early. if (Opts.DirectClangCC1ModuleBuild) return false; // Only amend the following path option when not in direct cc1 mode. for (const Arg *A : Args.filtered(OPT_file_prefix_map, OPT_debug_prefix_map)) { std::string Val(A->getValue()); // Forward -debug-prefix-map arguments from Swift to Clang as // -fdebug-prefix-map= and -file-prefix-map as -ffile-prefix-map=. // // This is required to ensure DIFiles created there, like /// "<swift-imported-modules>", as well as index data, have their paths // remapped properly. // // (Note, however, that Clang's usage of std::map means that the remapping // may not be applied in the same order, which can matter if one mapping is // a prefix of another.) if (A->getOption().matches(OPT_file_prefix_map)) Opts.ExtraArgs.push_back("-ffile-prefix-map=" + Val); else Opts.ExtraArgs.push_back("-fdebug-prefix-map=" + Val); } if (auto *A = Args.getLastArg(OPT_file_compilation_dir)) { // Forward the -file-compilation-dir flag to correctly set the // debug compilation directory. std::string Val(A->getValue()); Opts.ExtraArgs.push_back("-ffile-compilation-dir=" + Val); } if (!workingDirectory.empty()) { // Provide a working directory to Clang as well if there are any -Xcc // options, in case some of them are search-related. But do it at the // beginning, so that an explicit -Xcc -working-directory will win. Opts.ExtraArgs.insert(Opts.ExtraArgs.begin(), {"-working-directory", workingDirectory.str()}); } return false; } static void ParseSymbolGraphArgs(symbolgraphgen::SymbolGraphOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, LangOptions &LangOpts) { using namespace options; if (const Arg *A = Args.getLastArg(OPT_emit_symbol_graph_dir)) { Opts.OutputDir = A->getValue(); } Opts.Target = LangOpts.Target; Opts.SkipInheritedDocs = Args.hasArg(OPT_skip_inherited_docs); Opts.SkipProtocolImplementations = Args.hasArg(OPT_skip_protocol_implementations); Opts.IncludeSPISymbols = Args.hasArg(OPT_include_spi_symbols); Opts.ShortenOutputNames = Args.hasArg(OPT_symbol_graph_shorten_output_names); Opts.EmitExtensionBlockSymbols = Args.hasFlag(OPT_emit_extension_block_symbols, OPT_omit_extension_block_symbols, /*default=*/false); if (auto *A = Args.getLastArg(OPT_symbol_graph_minimum_access_level)) { Opts.MinimumAccessLevel = llvm::StringSwitch<AccessLevel>(A->getValue()) .Case("open", AccessLevel::Open) .Case("public", AccessLevel::Public) .Case("package", AccessLevel::Package) .Case("internal", AccessLevel::Internal) .Case("fileprivate", AccessLevel::FilePrivate) .Case("private", AccessLevel::Private) .Default(AccessLevel::Public); } else { Opts.MinimumAccessLevel = AccessLevel::Public; } if (auto *A = Args.getLastArg(OPT_symbol_graph_allow_availability_platforms, OPT_symbol_graph_block_availability_platforms)) { llvm::SmallVector<StringRef> AvailabilityPlatforms; StringRef(A->getValue()) .split(AvailabilityPlatforms, ',', /*MaxSplits*/ -1, /*KeepEmpty*/ false); Opts.AvailabilityPlatforms = llvm::DenseSet<StringRef>( AvailabilityPlatforms.begin(), AvailabilityPlatforms.end()); Opts.AvailabilityIsBlockList = A->getOption().matches(OPT_symbol_graph_block_availability_platforms); } // default values for generating symbol graphs during a build Opts.PrettyPrint = Args.hasArg(OPT_symbol_graph_pretty_print); Opts.EmitSynthesizedMembers = !Args.hasArg(OPT_symbol_graph_skip_synthesized_members); Opts.SkipInheritedDocs = !Args.hasArg(OPT_symbol_graph_skip_inherited_docs); Opts.PrintMessages = false; Opts.IncludeClangDocs = false; } static bool validateSwiftModuleFileArgumentAndAdd(const std::string &swiftModuleArgument, DiagnosticEngine &Diags, llvm::StringMap<std::string> &ExplicitSwiftModuleInputs) { std::size_t foundDelimeterPos = swiftModuleArgument.find_first_of("="); if (foundDelimeterPos == std::string::npos) { Diags.diagnose(SourceLoc(), diag::error_swift_module_file_requires_delimeter, swiftModuleArgument); return true; } std::string moduleName = swiftModuleArgument.substr(0, foundDelimeterPos), modulePath = swiftModuleArgument.substr(foundDelimeterPos+1); if (!Lexer::isIdentifier(moduleName)) { Diags.diagnose(SourceLoc(), diag::error_bad_module_name, moduleName, false); return true; } auto priorEntryIt = ExplicitSwiftModuleInputs.find(moduleName); if (priorEntryIt != ExplicitSwiftModuleInputs.end()) { Diags.diagnose(SourceLoc(), diag::warn_multiple_module_inputs_same_name, moduleName, priorEntryIt->getValue(), modulePath); ExplicitSwiftModuleInputs[moduleName] = modulePath; } else ExplicitSwiftModuleInputs.insert(std::make_pair(moduleName, modulePath)); return false; } static bool ParseSearchPathArgs(SearchPathOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, const CASOptions &CASOpts, const FrontendOptions &FrontendOpts, StringRef workingDirectory) { using namespace options; namespace path = llvm::sys::path; auto resolveSearchPath = [workingDirectory](StringRef searchPath) -> std::string { if (workingDirectory.empty() || path::is_absolute(searchPath)) return searchPath.str(); SmallString<64> fullPath{workingDirectory}; path::append(fullPath, searchPath); return std::string(fullPath.str()); }; std::vector<SearchPathOptions::SearchPath> ImportSearchPaths( Opts.getImportSearchPaths()); for (const Arg *A : Args.filtered(OPT_I, OPT_Isystem)) { ImportSearchPaths.push_back( {resolveSearchPath(A->getValue()), /*isSystem=*/A->getOption().getID() == OPT_Isystem}); } Opts.setImportSearchPaths(ImportSearchPaths); std::vector<SearchPathOptions::SearchPath> FrameworkSearchPaths( Opts.getFrameworkSearchPaths()); for (const Arg *A : Args.filtered(OPT_F, OPT_Fsystem)) { FrameworkSearchPaths.push_back( {resolveSearchPath(A->getValue()), /*isSystem=*/A->getOption().getID() == OPT_Fsystem}); } Opts.setFrameworkSearchPaths(FrameworkSearchPaths); if (const Arg *A = Args.getLastArg(OPT_in_process_plugin_server_path)) Opts.InProcessPluginServerPath = A->getValue(); // All plugin search options, i.e. '-load-plugin-library', // '-load-plugin-executable', '-plugin-path', and '-external-plugin-path' // are grouped, and plugins are searched by the order of these options. // e.g. For '-plugin-path A -load-plugin-library B/libModule.dylib', if // 'A/libModule.dylib' exists, it's used. for (const Arg *A : Args.filtered(OPT_plugin_search_Group)) { switch (A->getOption().getID()) { case OPT_load_plugin_library: { Opts.PluginSearchOpts.emplace_back(PluginSearchOption::LoadPluginLibrary{ resolveSearchPath(A->getValue())}); break; } case OPT_load_plugin_executable: { // '<path to executable>#<module names>' where the module names are // comma separated. StringRef path; StringRef modulesStr; std::tie(path, modulesStr) = StringRef(A->getValue()).rsplit('#'); std::vector<std::string> moduleNames; for (auto name : llvm::split(modulesStr, ',')) { moduleNames.emplace_back(name); } if (path.empty() || moduleNames.empty()) { Diags.diagnose(SourceLoc(), diag::error_load_plugin_executable, A->getValue()); } else { Opts.PluginSearchOpts.emplace_back( PluginSearchOption::LoadPluginExecutable{resolveSearchPath(path), std::move(moduleNames)}); } break; } case OPT_plugin_path: { Opts.PluginSearchOpts.emplace_back( PluginSearchOption::PluginPath{resolveSearchPath(A->getValue())}); break; } case OPT_external_plugin_path: { // '<plugin directory>#<plugin server executable path>'. // FIXME: '#' can be used in the paths. StringRef dylibPath; StringRef serverPath; std::tie(dylibPath, serverPath) = StringRef(A->getValue()).split('#'); Opts.PluginSearchOpts.emplace_back(PluginSearchOption::ExternalPluginPath{ resolveSearchPath(dylibPath), resolveSearchPath(serverPath)}); break; } case OPT_load_resolved_plugin: { StringRef libraryPath; StringRef executablePath; StringRef modulesStr; std::tie(libraryPath, executablePath) = StringRef(A->getValue()).split('#'); std::tie(executablePath, modulesStr) = executablePath.split('#'); if (modulesStr.empty() || (libraryPath.empty() && executablePath.empty())) { Diags.diagnose(SourceLoc(), diag::error_load_resolved_plugin, A->getValue()); } std::vector<std::string> moduleNames; for (auto name : llvm::split(modulesStr, ',')) { moduleNames.emplace_back(name); } Opts.PluginSearchOpts.emplace_back( PluginSearchOption::ResolvedPluginConfig{ libraryPath.str(), executablePath.str(), std::move(moduleNames)}); break; } default: llvm_unreachable("unhandled plugin search option"); } } for (const Arg *A : Args.filtered(OPT_L)) { Opts.LibrarySearchPaths.push_back(resolveSearchPath(A->getValue())); } for (const Arg *A : Args.filtered(OPT_vfsoverlay)) { Opts.VFSOverlayFiles.push_back(resolveSearchPath(A->getValue())); } if (const Arg *A = Args.getLastArg(OPT_sdk)) Opts.setSDKPath(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_windows_sdk_root)) Opts.setWinSDKRoot(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_windows_sdk_version)) Opts.setWinSDKVersion(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_visualc_tools_root)) Opts.setVCToolsRoot(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_visualc_tools_version)) Opts.setVCToolsVersion(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_sysroot)) Opts.setSysRoot(A->getValue()); if (const Arg *A = Args.getLastArg(OPT_resource_dir)) Opts.RuntimeResourcePath = A->getValue(); Opts.SkipAllImplicitImportPaths |= Args.hasArg(OPT_nostdimport); Opts.SkipSDKImportPaths |= Args.hasArg(OPT_nostdlibimport); Opts.DisableModulesValidateSystemDependencies |= Args.hasArg(OPT_disable_modules_validate_system_headers); if (const Arg *A = Args.getLastArg(OPT_explicit_swift_module_map)) Opts.ExplicitSwiftModuleMapPath = A->getValue(); for (auto A : Args.getAllArgValues(options::OPT_swift_module_file)) { if (validateSwiftModuleFileArgumentAndAdd(A, Diags, Opts.ExplicitSwiftModuleInputs)) return true; } for (auto A: Args.filtered(OPT_candidate_module_file)) { Opts.CandidateCompiledModules.push_back(resolveSearchPath(A->getValue())); } if (const Arg *A = Args.getLastArg(OPT_const_gather_protocols_file)) Opts.ConstGatherProtocolListFilePath = A->getValue(); for (auto A : Args.getAllArgValues(options::OPT_serialized_path_obfuscate)) { auto SplitMap = StringRef(A).split('='); Opts.DeserializedPathRecoverer.addMapping(SplitMap.first, SplitMap.second); } for (const Arg *A : Args.filtered(OPT_scanner_prefix_map_paths)) { Opts.ScannerPrefixMapper.push_back({A->getValue(0), A->getValue(1)}); } // Handle legacy prefix map option. for (StringRef Opt : Args.getAllArgValues(OPT_scanner_prefix_map)) { if (auto Mapping = llvm::MappedPrefix::getFromJoined(Opt)) Opts.ScannerPrefixMapper.push_back({Mapping->Old, Mapping->New}); } Opts.ResolvedPluginVerification |= Args.hasArg(OPT_resolved_plugin_verification); // rdar://132340493 disable scanner-side validation for non-caching builds Opts.ScannerModuleValidation |= Args.hasFlag(OPT_scanner_module_validation, OPT_no_scanner_module_validation, CASOpts.EnableCaching); Opts.BridgingHeaderChaining |= Args.hasFlag(OPT_auto_bridging_header_chaining, OPT_no_auto_bridging_header_chaining, false); bool buildingFromInterface = FrontendOpts.InputMode == FrontendOptions::ParseInputMode::SwiftModuleInterface; auto firstInputPath = FrontendOpts.InputsAndOutputs.hasInputs() ? FrontendOpts.InputsAndOutputs.getFilenameOfFirstInput() : ""; Opts.ResolveInPackageModuleDependencies |= !buildingFromInterface || StringRef(firstInputPath).ends_with(".package.swiftinterface"); std::optional<std::string> forceModuleLoadingMode; if (auto *A = Args.getLastArg(OPT_module_load_mode)) forceModuleLoadingMode = A->getValue(); else if (auto Env = llvm::sys::Process::GetEnv("SWIFT_FORCE_MODULE_LOADING")) forceModuleLoadingMode = Env; if (forceModuleLoadingMode) { if (*forceModuleLoadingMode == "prefer-interface" || *forceModuleLoadingMode == "prefer-parseable") Opts.ModuleLoadMode = ModuleLoadingMode::PreferInterface; else if (*forceModuleLoadingMode == "prefer-serialized") Opts.ModuleLoadMode = ModuleLoadingMode::PreferSerialized; else if (*forceModuleLoadingMode == "only-interface" || *forceModuleLoadingMode == "only-parseable") Opts.ModuleLoadMode = ModuleLoadingMode::OnlyInterface; else if (*forceModuleLoadingMode == "only-serialized") Opts.ModuleLoadMode = ModuleLoadingMode::OnlySerialized; else Diags.diagnose(SourceLoc(), diag::unknown_forced_module_loading_mode, *forceModuleLoadingMode); } for (auto *A : Args.filtered(OPT_swift_module_cross_import)) Opts.CrossImportInfo[A->getValue(0)].push_back(A->getValue(1)); for (auto &Name : Args.getAllArgValues(OPT_module_can_import)) Opts.CanImportModuleInfo.push_back({Name, {}, {}}); for (auto *A: Args.filtered(OPT_module_can_import_version)) { llvm::VersionTuple Version, UnderlyingVersion; if (Version.tryParse(A->getValue(1))) Diags.diagnose(SourceLoc(), diag::invalid_can_import_module_version, A->getValue(1)); if (UnderlyingVersion.tryParse(A->getValue(2))) Diags.diagnose(SourceLoc(), diag::invalid_can_import_module_version, A->getValue(2)); Opts.CanImportModuleInfo.push_back( {A->getValue(0), Version, UnderlyingVersion}); } Opts.DisableCrossImportOverlaySearch |= Args.hasArg(OPT_disable_cross_import_overlay_search); // Opts.RuntimeIncludePath is set by calls to // setRuntimeIncludePath() or setMainExecutablePath(). // Opts.RuntimeImportPath is set by calls to // setRuntimeIncludePath() or setMainExecutablePath() and // updated by calls to setTargetTriple() or parseArgs(). // Assumes exactly one of setMainExecutablePath() or setRuntimeIncludePath() // is called before setTargetTriple() and parseArgs(). // TODO: improve the handling of RuntimeIncludePath. return false; } /// Determine whether the given argument list enables Embedded Swift. static bool isEmbedded(ArgList &args) { using namespace swift::options; for (const Arg *arg : args.filtered_reverse( OPT_enable_experimental_feature, OPT_disable_experimental_feature)) { if (llvm::StringRef(arg->getValue()) != "Embedded") continue; return arg->getOption().matches(OPT_enable_experimental_feature); } return false; } /// Identifier modules for which we should temporarily suppress the diagnostics /// for Embedded restrictions despite building in Embedded Swift. static bool temporarilySuppressEmbeddedRestrictionDiagnostics(ArgList &args) { using namespace swift::options; if (const Arg *arg = args.getLastArgNoClaim(OPT_module_name)) { StringRef moduleName(arg->getValue()); if (moduleName == "Swift" || moduleName == "_Concurrency") return true; } return false; } static bool ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args, DiagnosticEngine &Diags) { // NOTE: This executes at the beginning of parsing the command line and cannot // depend on the results of parsing other options. using namespace options; if (Args.hasArg(OPT_verify)) Opts.VerifyMode = DiagnosticOptions::Verify; if (Args.hasArg(OPT_verify_apply_fixes)) Opts.VerifyMode = DiagnosticOptions::VerifyAndApplyFixes; Opts.VerifyIgnoreUnknown |= Args.hasArg(OPT_verify_ignore_unknown); Opts.VerifyIgnoreUnrelated |= Args.hasArg(OPT_verify_ignore_unrelated); Opts.VerifyIgnoreMacroLocationNote |= Args.hasArg(OPT_verify_ignore_macro_note); Opts.SkipDiagnosticPasses |= Args.hasArg(OPT_disable_diagnostic_passes); Opts.ShowDiagnosticsAfterFatalError |= Args.hasArg(OPT_show_diagnostics_after_fatal); for (Arg *A : Args.filtered(OPT_verify_additional_file)) Opts.AdditionalVerifierFiles.push_back(A->getValue()); for (Arg *A : Args.filtered(OPT_verify_additional_prefix)) Opts.AdditionalDiagnosticVerifierPrefixes.push_back(A->getValue()); Opts.UseColor |= Args.hasFlag(OPT_color_diagnostics, OPT_no_color_diagnostics, /*Default=*/llvm::sys::Process::StandardErrHasColors()); // If no style options are specified, default to Swift style, unless it is // under swift caching, which llvm style is preferred because LLVM style // replays a lot faster. Opts.PrintedFormattingStyle = Args.hasFlag(OPT_cache_compile_job, OPT_no_cache_compile_job, /*Default=*/false) ? DiagnosticOptions::FormattingStyle::LLVM : DiagnosticOptions::FormattingStyle::Swift; if (const Arg *arg = Args.getLastArg(OPT_diagnostic_style)) { StringRef contents = arg->getValue(); if (contents == "llvm") { Opts.PrintedFormattingStyle = DiagnosticOptions::FormattingStyle::LLVM; } else if (contents == "swift") { Opts.PrintedFormattingStyle = DiagnosticOptions::FormattingStyle::Swift; } else { Diags.diagnose(SourceLoc(), diag::error_unsupported_option_argument, arg->getOption().getPrefixedName(), arg->getValue()); return true; } } for (const Arg *arg: Args.filtered(OPT_emit_macro_expansion_files)) { StringRef contents = arg->getValue(); bool negated = contents.starts_with("no-"); if (negated) contents = contents.drop_front(3); if (contents == "diagnostics") Opts.EmitMacroExpansionFiles = !negated; } { OptSpecifier obsoleteOpts[] = { OPT_fixit_all, OPT_emit_fixits_path, }; for (auto option: obsoleteOpts) { if (auto *arg = Args.getLastArg(option)) { Diags.diagnose(SourceLoc(), diag::ignoring_option_obsolete, arg->getOption().getPrefixedName()); } } } // If the "embedded" flag was provided, enable the EmbeddedRestrictions // warning group. This group is opt-in in non-Embedded builds. if (isEmbedded(Args) && !Args.hasArg(OPT_suppress_warnings) && !temporarilySuppressEmbeddedRestrictionDiagnostics(Args)) Opts.WarningGroupControlRules.emplace_back(WarningGroupBehavior::AsWarning, DiagGroupID::EmbeddedRestrictions); Opts.SuppressWarnings |= Args.hasArg(OPT_suppress_warnings); Opts.SuppressNotes |= Args.hasArg(OPT_suppress_notes); Opts.SuppressRemarks |= Args.hasArg(OPT_suppress_remarks); for (const Arg *arg : Args.filtered(OPT_warning_treating_Group)) { switch (arg->getOption().getID()) { case OPT_warnings_as_errors: Opts.WarningGroupControlRules.emplace_back(WarningGroupBehavior::AsError); break; case OPT_no_warnings_as_errors: Opts.WarningGroupControlRules.emplace_back( WarningGroupBehavior::AsWarning); break; case OPT_Werror: { auto groupID = getDiagGroupIDByName(arg->getValue()); if (groupID && *groupID != DiagGroupID::no_group) { Opts.WarningGroupControlRules.emplace_back( WarningGroupBehavior::AsError, *groupID); } else { Opts.UnknownWarningGroups.push_back(arg->getValue()); } break; } case OPT_Wwarning: { auto groupID = getDiagGroupIDByName(arg->getValue()); if (groupID && *groupID != DiagGroupID::no_group) { Opts.WarningGroupControlRules.emplace_back( WarningGroupBehavior::AsWarning, *groupID); } else { Opts.UnknownWarningGroups.push_back(arg->getValue()); } break; } default: llvm_unreachable("unhandled warning as error option"); }; } // `-require-explicit-sendable` is an alias to `-Wwarning ExplicitSendable`. if (Args.hasArg(OPT_require_explicit_sendable) && !Args.hasArg(OPT_suppress_warnings)) { Opts.WarningGroupControlRules.emplace_back( WarningGroupBehavior::AsWarning, DiagGroupID::ExplicitSendable); } if (Args.hasArg(OPT_debug_diagnostic_names)) { Opts.PrintDiagnosticNames = PrintDiagnosticNamesMode::Identifier; } if (Arg *A = Args.getLastArg(OPT_diagnostic_documentation_path)) { Opts.DiagnosticDocumentationPath = A->getValue(); } if (Arg *A = Args.getLastArg(OPT_locale)) { std::string localeCode = A->getValue(); // Check if the locale code is available. if (llvm::none_of(localeCodes, [&](const char *locale) { return localeCode == locale; })) { std::string availableLocaleCodes = ""; llvm::interleave( std::begin(localeCodes), std::end(localeCodes), [&](std::string locale) { availableLocaleCodes += locale; }, [&] { availableLocaleCodes += ", "; }); Diags.diagnose(SourceLoc(), diag::warning_invalid_locale_code, availableLocaleCodes); } else { Opts.LocalizationCode = localeCode; } } if (Arg *A = Args.getLastArg(OPT_localization_path)) { if (!llvm::sys::fs::exists(A->getValue())) { Diags.diagnose(SourceLoc(), diag::warning_locale_path_not_found, A->getValue()); } else if (!Opts.LocalizationCode.empty()) { // Check if the localization path exists but it doesn't have a file // for the specified locale code. llvm::SmallString<128> localizationPath(A->getValue()); llvm::sys::path::append(localizationPath, Opts.LocalizationCode); llvm::sys::path::replace_extension(localizationPath, ".strings"); if (!llvm::sys::fs::exists(localizationPath)) { Diags.diagnose(SourceLoc(), diag::warning_cannot_find_locale_file, Opts.LocalizationCode, localizationPath); } Opts.LocalizationPath = A->getValue(); } } return false; } static void configureDiagnosticEngine( const DiagnosticOptions &Options, std::optional<version::Version> effectiveLanguageVersion, StringRef mainExecutablePath, DiagnosticEngine &Diagnostics) { if (Options.ShowDiagnosticsAfterFatalError) { Diagnostics.setShowDiagnosticsAfterFatalError(); } if (Options.SuppressWarnings) { Diagnostics.setSuppressWarnings(true); } if (Options.SuppressNotes) { Diagnostics.setSuppressNotes(true); } if (Options.SuppressRemarks) { Diagnostics.setSuppressRemarks(true); } Diagnostics.setWarningGroupControlRules(Options.WarningGroupControlRules); Diagnostics.setPrintDiagnosticNamesMode(Options.PrintDiagnosticNames); std::string docsPath = Options.DiagnosticDocumentationPath; if (docsPath.empty()) { // Point at the latest Markdown documentation on GitHub. docsPath = "https://docs.swift.org/compiler/documentation/diagnostics"; } Diagnostics.setDiagnosticDocumentationPath(docsPath); if (!Options.LocalizationCode.empty()) { std::string locPath = Options.LocalizationPath; if (locPath.empty()) { llvm::SmallString<128> locPathBuffer(mainExecutablePath); llvm::sys::path::remove_filename(locPathBuffer); // Remove /swift llvm::sys::path::remove_filename(locPathBuffer); // Remove /bin llvm::sys::path::append(locPathBuffer, "share", "swift", "diagnostics"); locPath = locPathBuffer.str(); } Diagnostics.setLocalization(Options.LocalizationCode, locPath); } if (effectiveLanguageVersion) Diagnostics.setLanguageVersion(*effectiveLanguageVersion); } /// Configures the diagnostic engine for the invocation's options. void CompilerInvocation::setUpDiagnosticEngine(DiagnosticEngine &diags) { configureDiagnosticEngine(DiagnosticOpts, LangOpts.EffectiveLanguageVersion, FrontendOpts.MainExecutablePath, diags); // Once configured, immediately diagnose any unknown warning groups that were // encountered while parsing the diagnostic options. for (const auto &unknownGroup : DiagnosticOpts.UnknownWarningGroups) { diags.diagnose(SourceLoc(), diag::unknown_warning_group, unknownGroup); } } /// Parse -enforce-exclusivity=... options void parseExclusivityEnforcementOptions(const llvm::opt::Arg *A, SILOptions &Opts, DiagnosticEngine &Diags) { StringRef Argument = A->getValue(); if (Argument == "unchecked") { // This option is analogous to the -Ounchecked optimization setting. // It will disable dynamic checking but still diagnose statically. Opts.EnforceExclusivityStatic = true; Opts.EnforceExclusivityDynamic = false; } else if (Argument == "checked") { Opts.EnforceExclusivityStatic = true; Opts.EnforceExclusivityDynamic = true; } else if (Argument == "dynamic-only") { // This option is intended for staging purposes. The intent is that // it will eventually be removed. Opts.EnforceExclusivityStatic = false; Opts.EnforceExclusivityDynamic = true; } else if (Argument == "none") { // This option is for staging purposes. Opts.EnforceExclusivityStatic = false; Opts.EnforceExclusivityDynamic = false; } else { Diags.diagnose(SourceLoc(), diag::error_unsupported_option_argument, A->getOption().getPrefixedName(), A->getValue()); } } static std::optional<IRGenLLVMLTOKind> ParseLLVMLTOKind(const ArgList &Args, DiagnosticEngine &Diags) { std::optional<IRGenLLVMLTOKind> LLVMLTOKind; if (const Arg *A = Args.getLastArg(options::OPT_lto)) { LLVMLTOKind = llvm::StringSwitch<std::optional<IRGenLLVMLTOKind>>(A->getValue()) .Case("llvm-thin", IRGenLLVMLTOKind::Thin) .Case("llvm-full", IRGenLLVMLTOKind::Full) .Default(std::nullopt); if (!LLVMLTOKind) Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } return LLVMLTOKind; } static bool ParseSILArgs(SILOptions &Opts, ArgList &Args, IRGenOptions &IRGenOpts, const FrontendOptions &FEOpts, const TypeCheckerOptions &TCOpts, DiagnosticEngine &Diags, LangOptions &LangOpts, ClangImporterOptions &ClangOpts) { using namespace options; if (const Arg *A = Args.getLastArg(OPT_sil_inline_threshold)) { if (StringRef(A->getValue()).getAsInteger(10, Opts.InlineThreshold)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } } if (const Arg *A = Args.getLastArg(OPT_sil_inline_caller_benefit_reduction_factor)) { if (StringRef(A->getValue()).getAsInteger(10, Opts.CallerBaseBenefitReductionFactor)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } } if (const Arg *A = Args.getLastArg(OPT_sil_unroll_threshold)) { if (StringRef(A->getValue()).getAsInteger(10, Opts.UnrollThreshold)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } } // If we're only emitting a module, stop optimizations once we've serialized // the SIL for the module. if (FEOpts.RequestedAction == FrontendOptions::ActionType::EmitModuleOnly || FEOpts.RequestedAction == FrontendOptions::ActionType::CompileModuleFromInterface || FEOpts.RequestedAction == FrontendOptions::ActionType::EmitSIB) Opts.StopOptimizationAfterSerialization = true; if (Args.getLastArg(OPT_emit_empty_object_file)) { Opts.StopOptimizationAfterSerialization = true; } // Propagate the typechecker's understanding of // -experimental-skip-*-function-bodies to SIL. Opts.SkipFunctionBodies = TCOpts.SkipFunctionBodies; // Propagate -experimental-skip-non-exportable-decls to SIL. Opts.SkipNonExportableDecls = LangOpts.SkipNonExportableDecls; // Parse the optimization level. // Default to Onone settings if no option is passed. Opts.OptMode = OptimizationMode::NoOptimization; if (const Arg *A = Args.getLastArg(OPT_O_Group)) { if (A->getOption().matches(OPT_Onone)) { // Already set. } else if (A->getOption().matches(OPT_Ounchecked)) { // Turn on optimizations and remove all runtime checks. Opts.OptMode = OptimizationMode::ForSpeed; // Removal of cond_fail (overflow on binary operations). Opts.RemoveRuntimeAsserts = true; Opts.AssertConfig = SILOptions::Unchecked; } else if (A->getOption().matches(OPT_Oplayground)) { // For now -Oplayground is equivalent to -Onone. Opts.OptMode = OptimizationMode::NoOptimization; } else if (A->getOption().matches(OPT_Osize)) { Opts.OptMode = OptimizationMode::ForSize; } else { assert(A->getOption().matches(OPT_O)); Opts.OptMode = OptimizationMode::ForSpeed; } if (Opts.shouldOptimize()) { ClangOpts.Optimization = "-Os"; } } IRGenOpts.OptMode = Opts.OptMode; if (Args.getLastArg(OPT_AssumeSingleThreaded)) { Opts.AssumeSingleThreaded = true; } Opts.IgnoreAlwaysInline |= Args.hasArg(OPT_ignore_always_inline); // Parse the assert configuration identifier. if (const Arg *A = Args.getLastArg(OPT_AssertConfig)) { StringRef Configuration = A->getValue(); if (Configuration == "DisableReplacement") { Opts.AssertConfig = SILOptions::DisableReplacement; } else if (Configuration == "Debug") { Opts.AssertConfig = SILOptions::Debug; } else if (Configuration == "Release") { Opts.AssertConfig = SILOptions::Release; } else if (Configuration == "Unchecked") { Opts.AssertConfig = SILOptions::Unchecked; } else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } } else if (FEOpts.ParseStdlib) { // Disable assertion configuration replacement when we build the standard // library. Opts.AssertConfig = SILOptions::DisableReplacement; } else if (Opts.AssertConfig == SILOptions::Debug) { // Set the assert configuration according to the optimization level if it // has not been set by the -Ounchecked flag. Opts.AssertConfig = (IRGenOpts.shouldOptimize() ? SILOptions::Release : SILOptions::Debug); } // -Ounchecked might also set removal of runtime asserts (cond_fail). Opts.RemoveRuntimeAsserts |= Args.hasArg(OPT_RemoveRuntimeAsserts); std::optional<DestroyHoistingOption> specifiedDestroyHoistingOption; if (Arg *A = Args.getLastArg(OPT_enable_destroy_hoisting)) { specifiedDestroyHoistingOption = llvm::StringSwitch<std::optional<DestroyHoistingOption>>(A->getValue()) .Case("true", DestroyHoistingOption::On) .Case("false", DestroyHoistingOption::Off) .Default(std::nullopt); } std::optional<CopyPropagationOption> specifiedCopyPropagationOption; if (Arg *A = Args.getLastArg(OPT_copy_propagation_state_EQ)) { specifiedCopyPropagationOption = llvm::StringSwitch<std::optional<CopyPropagationOption>>(A->getValue()) .Case("always", CopyPropagationOption::Always) .Case("optimizing", CopyPropagationOption::Optimizing) .Case("false", CopyPropagationOption::Off) .Case("requested-passes-only", CopyPropagationOption::RequestedPassesOnly) .Default(std::nullopt); // Error if copy propagation has been set via the flag at the same time. if (auto *Flag = Args.getLastArg(OPT_enable_copy_propagation)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_combination, Flag->getAsString(Args), A->getAsString(Args)); } } // Have ManualOwnership imply MandatoryCopyPropagation. // Once that pass becomes enabled by default, we don't need this. if (LangOpts.hasFeature(ManualOwnership)) { specifiedCopyPropagationOption = CopyPropagationOption::Always; if (auto *Flag = Args.getLastArg(OPT_copy_propagation_state_EQ)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_combination, Flag->getAsString(Args), "-enable-experimental-feature ManualOwnership"); } } if (Args.hasArg(OPT_enable_copy_propagation)) { specifiedCopyPropagationOption = CopyPropagationOption::Always; } if (specifiedCopyPropagationOption) { Opts.CopyPropagation = *specifiedCopyPropagationOption; } // Allow command line flags to override the default value of // Opts.LexicalLifetimes. If no explicit flags are passed, then // Opts.LexicalLifetimes retains its initial value. std::optional<bool> enableLexicalLifetimesFlag; if (Arg *A = Args.getLastArg(OPT_enable_lexical_lifetimes)) { enableLexicalLifetimesFlag = llvm::StringSwitch<std::optional<bool>>(A->getValue()) .Case("true", true) .Case("false", false) .Default(std::nullopt); } if (Args.getLastArg(OPT_enable_lexical_lifetimes_noArg)) { if (!enableLexicalLifetimesFlag.value_or(true)) { // Error if lexical lifetimes have been disabled via the meta-var form // and enabled via the flag. Diags.diagnose(SourceLoc(), diag::error_invalid_arg_combination, "enable-lexical-lifetimes", "enable-lexical-lifetimes=false"); return true; } else { enableLexicalLifetimesFlag = true; } } // Unless overridden below, enabling copy propagation means enabling lexical // lifetimes. if (Opts.CopyPropagation >= CopyPropagationOption::Optimizing) { Opts.LexicalLifetimes = LexicalLifetimesOption::On; Opts.DestroyHoisting = DestroyHoistingOption::On; } // Unless overridden below, disable copy propagation means disabling lexical // lifetimes. if (Opts.CopyPropagation == CopyPropagationOption::Off) { Opts.LexicalLifetimes = LexicalLifetimesOption::DiagnosticMarkersOnly; Opts.DestroyHoisting = DestroyHoistingOption::Off; } // If move-only is enabled, always enable lexical lifetime as well. Move-only // depends on lexical lifetimes. if (Args.hasArg(OPT_enable_experimental_move_only)) Opts.LexicalLifetimes = LexicalLifetimesOption::On; if (enableLexicalLifetimesFlag) { if (*enableLexicalLifetimesFlag) { Opts.LexicalLifetimes = LexicalLifetimesOption::On; } else { Opts.LexicalLifetimes = LexicalLifetimesOption::DiagnosticMarkersOnly; } } if (specifiedDestroyHoistingOption) Opts.DestroyHoisting = *specifiedDestroyHoistingOption; std::optional<bool> enablePackMetadataStackPromotionFlag; if (Arg *A = Args.getLastArg(OPT_enable_pack_metadata_stack_promotion)) { enablePackMetadataStackPromotionFlag = llvm::StringSwitch<std::optional<bool>>(A->getValue()) .Case("true", true) .Case("false", false) .Default(std::nullopt); } if (Args.getLastArg(OPT_enable_pack_metadata_stack_promotion_noArg)) { if (!enablePackMetadataStackPromotionFlag.value_or(true)) { // Error if pack metadata stack promotion has been disabled via the // meta-var form and enabled via the flag. Diags.diagnose(SourceLoc(), diag::error_invalid_arg_combination, "enable-pack-metadata-stack-promotion", "enable-pack-metadata-stack-promotion=false"); return true; } else { enablePackMetadataStackPromotionFlag = true; } } if (enablePackMetadataStackPromotionFlag) Opts.EnablePackMetadataStackPromotion = enablePackMetadataStackPromotionFlag.value(); Opts.EnableARCOptimizations &= !Args.hasArg(OPT_disable_arc_opts); Opts.EnableRecompilationToOSSAModule |= Args.hasArg(OPT_enable_recompilation_to_ossa_module); Opts.EnableOSSAOptimizations &= !Args.hasArg(OPT_disable_ossa_opts); Opts.EnableSILOpaqueValues = Args.hasFlag( OPT_enable_sil_opaque_values, OPT_disable_sil_opaque_values, false); Opts.EnableAsyncDemotion |= Args.hasArg(OPT_enable_async_demotion); Opts.EnableThrowsPrediction = Args.hasFlag( OPT_enable_throws_prediction, OPT_disable_throws_prediction, Opts.EnableThrowsPrediction); Opts.EnableNoReturnCold = Args.hasFlag( OPT_enable_noreturn_prediction, OPT_disable_noreturn_prediction, Opts.EnableNoReturnCold); Opts.EnableActorDataRaceChecks |= Args.hasFlag( OPT_enable_actor_data_race_checks, OPT_disable_actor_data_race_checks, /*default=*/false); Opts.DisableSILPerfOptimizations |= Args.hasArg(OPT_disable_sil_perf_optzns); if (Args.hasArg(OPT_CrossModuleOptimization)) { Opts.CMOMode = CrossModuleOptimizationMode::Aggressive; } else if (Args.hasArg(OPT_EnableDefaultCMO)) { Opts.CMOMode = CrossModuleOptimizationMode::Default; } else if (Args.hasArg(OPT_EnableCMOEverything)) { Opts.CMOMode = CrossModuleOptimizationMode::Everything; } if (Args.hasArg(OPT_ExperimentalPackageCMO) || Args.hasArg(OPT_PackageCMO) || LangOpts.hasFeature(Feature::PackageCMO)) { if (!LangOpts.AllowNonResilientAccess) { if (FEOpts.RequestedAction != FrontendOptions::ActionType::TypecheckModuleFromInterface) Diags.diagnose(SourceLoc(), diag::ignoring_option_requires_option, "-package-cmo", "-allow-non-resilient-access"); } else if (!LangOpts.hasFeature(Feature::LibraryEvolution)) { Diags.diagnose(SourceLoc(), diag::package_cmo_requires_library_evolution); } else { Opts.EnableSerializePackage = true; Opts.CMOMode = CrossModuleOptimizationMode::Default; } } Opts.EnableStackProtection = Args.hasFlag(OPT_enable_stack_protector, OPT_disable_stack_protector, Opts.EnableStackProtection); Opts.EnableMoveInoutStackProtection = Args.hasArg( OPT_enable_move_inout_stack_protector, OPT_disable_stack_protector, Opts.EnableMoveInoutStackProtection); Opts.EnableImportPtrauthFieldFunctionPointers = !Args.hasArg(OPT_disable_import_ptrauth_field_function_pointers); Opts.EnableLifetimeDependenceDiagnostics = Args.hasFlag(OPT_enable_lifetime_dependence_diagnostics, OPT_disable_lifetime_dependence_diagnostics, Opts.EnableLifetimeDependenceDiagnostics); Opts.VerifyAll |= Args.hasArg(OPT_sil_verify_all); Opts.VerifyNone |= Args.hasArg(OPT_sil_verify_none); Opts.VerifyOwnershipAll |= Args.hasArg(OPT_sil_ownership_verify_all); Opts.DebugSerialization |= Args.hasArg(OPT_sil_debug_serialization); Opts.EmitVerboseSIL |= Args.hasArg(OPT_emit_verbose_sil); Opts.EmitSortedSIL |= Args.hasArg(OPT_emit_sorted_sil); Opts.PrintFullConvention |= Args.hasArg(OPT_experimental_print_full_convention); Opts.PrintInstCounts |= Args.hasArg(OPT_print_inst_counts); Opts.StopOptimizationBeforeLoweringOwnership |= Args.hasArg(OPT_sil_stop_optzns_before_lowering_ownership); if (const Arg *A = Args.getLastArg(OPT_external_pass_pipeline_filename)) Opts.ExternalPassPipelineFilename = A->getValue(); Opts.GenerateProfile |= Args.hasArg(OPT_profile_generate); const Arg *ProfileUse = Args.getLastArg(OPT_profile_use); Opts.UseProfile = ProfileUse ? ProfileUse->getValue() : ""; Opts.EmitProfileCoverageMapping |= Args.hasArg(OPT_profile_coverage_mapping); Opts.DisableSILPartialApply |= Args.hasArg(OPT_disable_sil_partial_apply); Opts.VerifySILOwnership &= !Args.hasArg(OPT_disable_sil_ownership_verifier); Opts.EnableDynamicReplacementCanCallPreviousImplementation = !Args.hasArg( OPT_disable_previous_implementation_calls_in_dynamic_replacements); Opts.ParseStdlib = FEOpts.ParseStdlib; Opts.emitTBD = FEOpts.InputsAndOutputs.hasTBDPath(); if (const Arg *A = Args.getLastArg(OPT_save_optimization_record_EQ)) { llvm::Expected<llvm::remarks::Format> formatOrErr = llvm::remarks::parseFormat(A->getValue()); if (llvm::Error err = formatOrErr.takeError()) { Diags.diagnose(SourceLoc(), diag::error_creating_remark_serializer, toString(std::move(err))); return true; } Opts.OptRecordFormat = *formatOrErr; } Opts.EnableGlobalAssemblyVision = Args.hasFlag( OPT_enable_assembly_vision_all, OPT_disable_assembly_vision_all, Opts.EnableGlobalAssemblyVision); if (const Arg *A = Args.getLastArg(OPT_save_optimization_record_passes)) Opts.OptRecordPasses = A->getValue(); // Only use getLastArg for single -save-optimization-record-path. // With multiple paths (multi-threaded WMO), FrontendTool will populate // OptRecordFile and AuxOptRecordFiles from command-line arguments. auto allOptRecordPaths = Args.getAllArgValues(OPT_save_optimization_record_path); if (allOptRecordPaths.size() == 1) Opts.OptRecordFile = allOptRecordPaths[0]; // If any of the '-g<kind>', except '-gnone', is given, // tell the SILPrinter to print debug info as well if (const Arg *A = Args.getLastArg(OPT_g_Group)) { if (!A->getOption().matches(options::OPT_gnone)) Opts.PrintDebugInfo = true; } if (Args.hasArg(OPT_legacy_gsil)) llvm::WithColor::warning() << "'-gsil' is deprecated, " << "use '-sil-based-debuginfo' instead\n"; if (Args.hasArg(OPT_debug_on_sil)) { // Derive the name of the SIL file for debugging from // the regular outputfile. std::string BaseName = FEOpts.InputsAndOutputs.getSingleOutputFilename(); // If there are no or multiple outputfiles, derive the name // from the module name. if (BaseName.empty()) BaseName = FEOpts.ModuleName; Opts.SILOutputFileNameForDebugging = BaseName; } if (const Arg *A = Args.getLastArg(options::OPT_sanitize_EQ)) { Opts.Sanitizers = parseSanitizerArgValues( Args, A, LangOpts.Target, Diags, /* sanitizerRuntimeLibExists= */ [](StringRef libName, bool shared) { // The driver has checked the existence of the library // already. return true; }); IRGenOpts.Sanitizers = Opts.Sanitizers; } if (const Arg *A = Args.getLastArg(options::OPT_sanitize_recover_EQ)) { IRGenOpts.SanitizersWithRecoveryInstrumentation = parseSanitizerRecoverArgValues(A, Opts.Sanitizers, Diags, /*emitWarnings=*/true); } if (const Arg *A = Args.getLastArg(options::OPT_sanitize_address_use_odr_indicator)) { IRGenOpts.SanitizeAddressUseODRIndicator = parseSanitizerAddressUseODRIndicator(A, Opts.Sanitizers, Diags); } if (const Arg *A = Args.getLastArg(options::OPT_sanitize_stable_abi_EQ)) { IRGenOpts.SanitizerUseStableABI = parseSanitizerUseStableABI(A, Opts.Sanitizers, Diags); } if (auto A = Args.getLastArg(OPT_enable_verify_exclusivity, OPT_disable_verify_exclusivity)) { Opts.VerifyExclusivity = A->getOption().matches(OPT_enable_verify_exclusivity); } // If runtime asserts are disabled in general, also disable runtime // exclusivity checks unless explicitly requested. if (Opts.RemoveRuntimeAsserts) Opts.EnforceExclusivityDynamic = false; if (const Arg *A = Args.getLastArg(options::OPT_enforce_exclusivity_EQ)) { parseExclusivityEnforcementOptions(A, Opts, Diags); // In the short term, we ignore -enforce-exclusivity=checked unless // in Embedded Swift unless EmbeddedDynamicExclusivity is also enabled. if (LangOpts.hasFeature(Feature::Embedded) && Opts.EnforceExclusivityDynamic && !LangOpts.hasFeature(Feature::EmbeddedDynamicExclusivity)) { Diags.diagnose(SourceLoc(), diag::embedded_dynamic_exclusivity_staging); Opts.EnforceExclusivityDynamic = false; } } else if (!Opts.RemoveRuntimeAsserts && LangOpts.hasFeature(Feature::Embedded)) { // Embedded Swift defaults to -enforce-exclusivity=unchecked for now. Opts.EnforceExclusivityStatic = true; Opts.EnforceExclusivityDynamic = false; } Opts.NoAllocations = Args.hasArg(OPT_no_allocations); // If these optimizations are enabled never preserve functions for the // debugger. Opts.ShouldFunctionsBePreservedToDebugger = !Args.hasArg(OPT_enable_llvm_wme); Opts.ShouldFunctionsBePreservedToDebugger &= !Args.hasArg(OPT_enable_llvm_vfe); if (auto LTOKind = ParseLLVMLTOKind(Args, Diags)) Opts.ShouldFunctionsBePreservedToDebugger &= LTOKind.value() == IRGenLLVMLTOKind::None; Opts.EnableAddressDependencies = Args.hasFlag( OPT_enable_address_dependencies, OPT_disable_address_dependencies, Opts.EnableAddressDependencies); if (LangOpts.Target.isOSDarwin() || LangOpts.Target.isOSLinux()) { // On Darwin and Linux, use yield_once_2 by default. Opts.CoroutineAccessorsUseYieldOnce2 = true; } Opts.CoroutineAccessorsUseYieldOnce2 = Args.hasFlag(OPT_enable_callee_allocated_coro_abi, OPT_disable_callee_allocated_coro_abi, Opts.CoroutineAccessorsUseYieldOnce2); Opts.MergeableTraps = Args.hasArg(OPT_mergeable_traps); return false; } void CompilerInvocation::buildDebugFlags(std::string &Output, const ArgList &Args, StringRef SDKPath, StringRef ResourceDir) { ArgStringList ReducedArgs; for (auto *A : Args) { // Do not encode cache invariant options, even for non-caching build. // Those options do not affect compilation task thus do not need to be // tracked. if (A->getOption().hasFlag(options::CacheInvariant)) continue; A->render(Args, ReducedArgs); // If the argument is file list, the path itself is irrelevant. if (A->getOption().hasFlag(options::ArgumentIsFileList)) { assert(A->getValues().size() == 1 && A->getOption().getRenderStyle() == Option::RenderSeparateStyle && "filelist options all have one argument and are all Separate<>"); ReducedArgs.pop_back(); ReducedArgs.push_back("<filelist>"); } } // This isn't guaranteed to be the same temp directory as what the driver // uses, but it's highly likely. llvm::SmallString<128> TDir; llvm::sys::path::system_temp_directory(true, TDir); llvm::raw_string_ostream OS(Output); interleave(ReducedArgs, [&](const char *Argument) { PrintArg(OS, Argument, TDir.str()); }, [&] { OS << " "; }); // Inject the SDK path and resource dir if they are nonempty and missing. bool haveSDKPath = SDKPath.empty(); bool haveResourceDir = ResourceDir.empty(); for (auto A : ReducedArgs) { StringRef Arg(A); // FIXME: this should distinguish between key and value. if (!haveSDKPath && Arg == "-sdk") haveSDKPath = true; if (!haveResourceDir && Arg == "-resource-dir") haveResourceDir = true; } if (!haveSDKPath) { OS << " -sdk "; PrintArg(OS, SDKPath.data(), TDir.str()); } if (!haveResourceDir) { OS << " -resource-dir "; PrintArg(OS, ResourceDir.data(), TDir.str()); } } static bool ParseTBDGenArgs(TBDGenOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, CompilerInvocation &Invocation) { using namespace options; Opts.HasMultipleIGMs = Invocation.getIRGenOptions().hasMultipleIGMs(); if (const Arg *A = Args.getLastArg(OPT_module_link_name)) { Opts.ModuleLinkName = A->getValue(); } if (const Arg *A = Args.getLastArg(OPT_tbd_install_name)) { Opts.InstallName = A->getValue(); } Opts.IsInstallAPI = Args.hasArg(OPT_tbd_is_installapi); Opts.VirtualFunctionElimination = Args.hasArg(OPT_enable_llvm_vfe); Opts.WitnessMethodElimination = Args.hasArg(OPT_enable_llvm_wme); Opts.FragileResilientProtocols = Args.hasArg(OPT_enable_fragile_resilient_protocol_witnesses); if (const Arg *A = Args.getLastArg(OPT_tbd_compatibility_version)) { Opts.CompatibilityVersion = A->getValue(); } if (const Arg *A = Args.getLastArg(OPT_tbd_current_version)) { Opts.CurrentVersion = A->getValue(); } if (const Arg *A = Args.getLastArg(OPT_previous_module_installname_map_file)) { Opts.ModuleInstallNameMapPath = A->getValue(); } for (auto A : Args.getAllArgValues(OPT_embed_tbd_for_module)) { Opts.embedSymbolsFromModules.push_back(StringRef(A).str()); } return false; } static bool ParseIRGenArgs(IRGenOptions &Opts, ArgList &Args, DiagnosticEngine &Diags, const FrontendOptions &FrontendOpts, const SILOptions &SILOpts, const LangOptions &LangOpts, const CASOptions &CASOpts, StringRef SDKPath, StringRef ResourceDir, const llvm::Triple &Triple) { using namespace options; if (!SILOpts.SILOutputFileNameForDebugging.empty()) { Opts.DebugInfoLevel = IRGenDebugInfoLevel::LineTables; } else if (const Arg *A = Args.getLastArg(OPT_g_Group)) { if (A->getOption().matches(OPT_g)) Opts.DebugInfoLevel = IRGenDebugInfoLevel::Normal; else if (A->getOption().matches(options::OPT_gline_tables_only)) Opts.DebugInfoLevel = IRGenDebugInfoLevel::LineTables; else if (A->getOption().matches(options::OPT_gdwarf_types)) Opts.DebugInfoLevel = IRGenDebugInfoLevel::DwarfTypes; else assert(A->getOption().matches(options::OPT_gnone) && "unknown -g<kind> option"); } if (Opts.DebugInfoLevel >= IRGenDebugInfoLevel::LineTables) { if (Args.hasArg(options::OPT_debug_info_store_invocation)) CompilerInvocation::buildDebugFlags(Opts.DebugFlags, Args, SDKPath, ResourceDir); if (const Arg *A = Args.getLastArg(OPT_file_compilation_dir)) Opts.DebugCompilationDir = A->getValue(); else { llvm::SmallString<256> cwd; llvm::sys::fs::current_path(cwd); Opts.DebugCompilationDir = std::string(cwd.str()); } } if (const Arg *A = Args.getLastArg(options::OPT_debug_info_format)) { if (A->containsValue("dwarf")) Opts.DebugInfoFormat = IRGenDebugInfoFormat::DWARF; else if (A->containsValue("codeview")) Opts.DebugInfoFormat = IRGenDebugInfoFormat::CodeView; else Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } else if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::None) { // If -g was specified but not -debug-info-format, DWARF is assumed. Opts.DebugInfoFormat = IRGenDebugInfoFormat::DWARF; } if (Args.hasArg(options::OPT_debug_info_format) && !Args.hasArg(options::OPT_g_Group)) { const Arg *debugFormatArg = Args.getLastArg(options::OPT_debug_info_format); Diags.diagnose(SourceLoc(), diag::error_option_missing_required_argument, debugFormatArg->getAsString(Args), "-g"); } if (Opts.DebugInfoFormat == IRGenDebugInfoFormat::CodeView && (Opts.DebugInfoLevel == IRGenDebugInfoLevel::LineTables || Opts.DebugInfoLevel == IRGenDebugInfoLevel::DwarfTypes)) { const Arg *debugFormatArg = Args.getLastArg(options::OPT_debug_info_format); Diags.diagnose(SourceLoc(), diag::error_argument_not_allowed_with, debugFormatArg->getAsString(Args), Opts.DebugInfoLevel == IRGenDebugInfoLevel::LineTables ? "-gline-tables-only" : "-gdwarf_types"); } if (auto A = Args.getLastArg(OPT_dwarf_version)) { unsigned vers; if (!StringRef(A->getValue()).getAsInteger(10, vers) && vers >= 2 && vers <= 5) Opts.DWARFVersion = vers; else Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } if (const Arg *A = Args.getLastArg(options::OPT_debug_module_path)) Opts.DebugModulePath = A->getValue(); for (auto A : Args.getAllArgValues(options::OPT_file_prefix_map)) { auto SplitMap = StringRef(A).split('='); Opts.FilePrefixMap.addMapping(SplitMap.first, SplitMap.second); Opts.DebugPrefixMap.addMapping(SplitMap.first, SplitMap.second); Opts.CoveragePrefixMap.addMapping(SplitMap.first, SplitMap.second); } for (auto A : Args.getAllArgValues(options::OPT_debug_prefix_map)) { auto SplitMap = StringRef(A).split('='); Opts.DebugPrefixMap.addMapping(SplitMap.first, SplitMap.second); } for (auto A : Args.getAllArgValues(options::OPT_coverage_prefix_map)) { auto SplitMap = StringRef(A).split('='); Opts.CoveragePrefixMap.addMapping(SplitMap.first, SplitMap.second); } for (const Arg *A : Args.filtered(OPT_Xcc)) { StringRef Opt = A->getValue(); if (Opt.starts_with("-D") || Opt.starts_with("-U")) Opts.ClangDefines.push_back(Opt.str()); } for (const Arg *A : Args.filtered(OPT_l, OPT_framework)) { LibraryKind Kind; if (A->getOption().matches(OPT_l)) { Kind = LibraryKind::Library; } else if (A->getOption().matches(OPT_framework)) { Kind = LibraryKind::Framework; } else { llvm_unreachable("Unknown LinkLibrary option kind"); } Opts.LinkLibraries.emplace_back( A->getValue(), Kind, /*static=*/false); } if (auto valueNames = Args.getLastArg(OPT_disable_llvm_value_names, OPT_enable_llvm_value_names)) { Opts.HasValueNamesSetting = true; Opts.ValueNames = valueNames->getOption().matches(OPT_enable_llvm_value_names); } Opts.DisableLLVMOptzns |= Args.hasArg(OPT_disable_llvm_optzns); Opts.DisableSwiftSpecificLLVMOptzns |= Args.hasArg(OPT_disable_swift_specific_llvm_optzns); if (Args.hasArg(OPT_disable_llvm_verify)) Opts.Verify = false; Opts.VerifyEach = Args.hasFlag(OPT_enable_llvm_verify_each, OPT_disable_llvm_verify_each, Opts.VerifyEach); Opts.EmitStackPromotionChecks |= Args.hasArg(OPT_stack_promotion_checks); if (const Arg *A = Args.getLastArg(OPT_stack_promotion_limit)) { unsigned limit; if (StringRef(A->getValue()).getAsInteger(10, limit)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } Opts.StackPromotionSizeLimit = limit; } if (Args.hasArg(OPT_trap_function)) Opts.TrapFuncName = Args.getLastArgValue(OPT_trap_function).str(); Opts.FunctionSections = Args.hasArg(OPT_function_sections); Opts.VerboseAsm = Args.hasFlag(OPT_verbose_asm, OPT_no_verbose_asm, /*default*/ true); if (Args.hasArg(OPT_autolink_force_load)) Opts.ForceLoadSymbolName = Args.getLastArgValue(OPT_module_link_name).str(); Opts.ModuleName = FrontendOpts.ModuleName; if (Args.hasArg(OPT_no_clang_module_breadcrumbs)) Opts.DisableClangModuleSkeletonCUs = true; if (auto A = Args.getLastArg(OPT_enable_round_trip_debug_types, OPT_disable_round_trip_debug_types)) { Opts.DisableRoundTripDebugTypes = A->getOption().matches(OPT_disable_round_trip_debug_types); } if (Args.hasArg(OPT_disable_debugger_shadow_copies)) Opts.DisableDebuggerShadowCopies = true; if (Args.hasArg(OPT_disable_concrete_type_metadata_mangled_name_accessors)) Opts.DisableConcreteTypeMetadataMangledNameAccessors = true; if (Args.hasArg(OPT_disable_standard_substitutions_in_reflection_mangling)) Opts.DisableStandardSubstitutionsInReflectionMangling = true; if (Args.hasArg(OPT_use_jit)) { Opts.UseJIT = true; if (const Arg *A = Args.getLastArg(OPT_dump_jit)) { std::optional<swift::JITDebugArtifact> artifact = llvm::StringSwitch<std::optional<swift::JITDebugArtifact>>( A->getValue()) .Case("llvm-ir", JITDebugArtifact::LLVMIR) .Case("object", JITDebugArtifact::Object) .Default(std::nullopt); if (!artifact) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getOption().getName(), A->getValue()); return true; } Opts.DumpJIT = *artifact; } } for (const Arg *A : Args.filtered(OPT_load_pass_plugin_EQ)) { Opts.LLVMPassPlugins.push_back(A->getValue()); } for (const Arg *A : Args.filtered(OPT_verify_type_layout)) { Opts.VerifyTypeLayoutNames.push_back(A->getValue()); } for (const Arg *A : Args.filtered(OPT_disable_autolink_framework)) { Opts.DisableAutolinkFrameworks.push_back(A->getValue()); } for (const Arg *A : Args.filtered(OPT_disable_autolink_library)) { Opts.DisableAutolinkLibraries.push_back(A->getValue()); } Opts.DisableFrameworkAutolinking = Args.hasArg(OPT_disable_autolink_frameworks); Opts.DisableAllAutolinking = Args.hasArg(OPT_disable_all_autolinking); Opts.DisableForceLoadSymbols = Args.hasArg(OPT_disable_force_load_symbols); Opts.GenerateProfile |= Args.hasArg(OPT_profile_generate); const Arg *ProfileUse = Args.getLastArg(OPT_profile_use); Opts.UseProfile = ProfileUse ? ProfileUse->getValue() : ""; const Arg *ProfileSampleUse = Args.getLastArg(OPT_profile_sample_use); Opts.UseSampleProfile = ProfileSampleUse ? ProfileSampleUse->getValue() : ""; Opts.EnableIRProfileGen = Args.hasArg(OPT_ir_profile_generate) || Args.hasArg(OPT_ir_profile_generate_EQ); if (auto V = Args.getLastArgValue(OPT_ir_profile_generate_EQ); !V.empty()) Opts.InstrProfileOutput = V.str(); Opts.EnableCSIRProfileGen = Args.hasArg(OPT_cs_profile_generate) || Args.hasArg(OPT_cs_profile_generate_EQ); if (auto V = Args.getLastArgValue(OPT_cs_profile_generate_EQ); !V.empty()) Opts.InstrProfileOutput = V.str(); const Arg *IRProfileUse = Args.getLastArg(OPT_ir_profile_use); Opts.UseIRProfile = IRProfileUse ? IRProfileUse->getValue() : ""; Opts.DebugInfoForProfiling |= Args.hasArg(OPT_debug_info_for_profiling); if (const Arg *A = Args.getLastArg(OPT_min_valid_pointer_value)) { // The LeastValidPointerValue is hard-coded in the runtime. Therefore it // can only safely customized in embedded swift - which doesn't have a runtime. if (!LangOpts.hasFeature(Feature::Embedded)) { Diags.diagnose(SourceLoc(), diag::min_ptr_value_without_embedded); return true; } if (StringRef(A->getValue()).getAsInteger(0, Opts.CustomLeastValidPointerValue)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } } Opts.PrintInlineTree |= Args.hasArg(OPT_print_llvm_inline_tree); // Always producing all outputs when caching is enabled. Opts.AlwaysCompile |= Args.hasArg(OPT_always_compile_output_files) || CASOpts.EnableCaching; Opts.EnableDynamicReplacementChaining |= Args.hasArg(OPT_enable_dynamic_replacement_chaining); if (auto A = Args.getLastArg(OPT_enable_type_layouts, OPT_disable_type_layouts)) { Opts.UseTypeLayoutValueHandling = A->getOption().matches(OPT_enable_type_layouts); } else if (Opts.OptMode == OptimizationMode::NoOptimization) { // Disable type layouts at Onone except if explicitly requested. Opts.UseTypeLayoutValueHandling = false; } Opts.ForceStructTypeLayouts = Args.hasArg(OPT_force_struct_type_layouts) && Opts.UseTypeLayoutValueHandling; // This is set to true by default. Opts.UseIncrementalLLVMCodeGen &= !Args.hasArg(OPT_disable_incremental_llvm_codegeneration); if (Args.hasArg(OPT_embed_bitcode)) Opts.EmbedMode = IRGenEmbedMode::EmbedBitcode; else if (Args.hasArg(OPT_embed_bitcode_marker)) Opts.EmbedMode = IRGenEmbedMode::EmbedMarker; if (Opts.EmbedMode == IRGenEmbedMode::EmbedBitcode) { // Keep track of backend options so we can embed them in a separate data // section and use them when building from the bitcode. This can be removed // when all the backend options are recorded in the IR. for (const Arg *A : Args) { // Do not encode output and input. if (A->getOption().getID() == options::OPT_o || A->getOption().getID() == options::OPT_INPUT || A->getOption().getID() == options::OPT_primary_file || A->getOption().getID() == options::OPT_embed_bitcode) continue; ArgStringList ASL; A->render(Args, ASL); for (ArgStringList::iterator it = ASL.begin(), ie = ASL.end(); it != ie; ++ it) { StringRef ArgStr(*it); Opts.CmdArgs.insert(Opts.CmdArgs.end(), ArgStr.begin(), ArgStr.end()); // using \00 to terminate to avoid problem decoding. Opts.CmdArgs.push_back('\0'); } } } if (auto LTOKind = ParseLLVMLTOKind(Args, Diags)) Opts.LLVMLTOKind = LTOKind.value(); if (const Arg *A = Args.getLastArg(options::OPT_sanitize_coverage_EQ)) { Opts.SanitizeCoverage = parseSanitizerCoverageArgValue(A, Triple, Diags, Opts.Sanitizers); } else if (Opts.Sanitizers & SanitizerKind::Fuzzer) { // Automatically set coverage flags, unless coverage type was explicitly // requested. // Updated to match clang at Jul 2019. Opts.SanitizeCoverage.IndirectCalls = true; Opts.SanitizeCoverage.TraceCmp = true; Opts.SanitizeCoverage.PCTable = true; if (Triple.isOSLinux()) { Opts.SanitizeCoverage.StackDepth = true; } Opts.SanitizeCoverage.Inline8bitCounters = true; Opts.SanitizeCoverage.CoverageType = llvm::SanitizerCoverageOptions::SCK_Edge; } if (Args.hasArg(OPT_disable_reflection_metadata)) { Opts.ReflectionMetadata = ReflectionMetadataMode::None; Opts.EnableReflectionNames = false; } if (Args.hasArg(OPT_reflection_metadata_for_debugger_only)) { Opts.ReflectionMetadata = ReflectionMetadataMode::DebuggerOnly; Opts.EnableReflectionNames = true; } if (Args.hasArg(OPT_enable_anonymous_context_mangled_names)) Opts.EnableAnonymousContextMangledNames = true; if (Args.hasArg(OPT_disable_reflection_names)) { Opts.EnableReflectionNames = false; } if (Args.hasArg(OPT_disable_llvm_merge_functions_pass)) { Opts.DisableLLVMMergeFunctions = true; } if (Args.hasArg(OPT_force_public_linkage)) { Opts.ForcePublicLinkage = true; } // PE/COFF cannot deal with the cross-module reference to the metadata parent // (e.g. NativeObject). Force the lazy initialization of the VWT always. Opts.LazyInitializeClassMetadata = Triple.isOSBinFormatCOFF(); // PE/COFF cannot deal with cross-module reference to the protocol conformance // witness. Use a runtime initialized value for the protocol conformance // witness. Opts.LazyInitializeProtocolConformances = Triple.isOSBinFormatCOFF(); // PE/COFF cannot deal with the cross-module reference to the // AsyncFunctionPointer data block. Force the use of indirect // AsyncFunctionPointer access. Opts.IndirectAsyncFunctionPointer = Triple.isOSBinFormatCOFF(); // PE/COFF cannot deal with the cross-module reference to the // CoroFunctionPointer data block. Force the use of indirect // CoroFunctionPointer access. Opts.IndirectCoroFunctionPointer = Triple.isOSBinFormatCOFF(); // On some Harvard architectures that allow sliding code and data address space // offsets independently, it's impossible to make direct relative reference to // code from data because the relative offset between them is not representable. // Use absolute function references instead of relative ones on such targets. // TODO(katei): This is a short-term solution until the WebAssembly target stabilizes // PIC and 64-bit specifications and toolchain support. Opts.CompactAbsoluteFunctionPointer = Triple.isOSBinFormatWasm(); if (Args.hasArg(OPT_disable_legacy_type_info)) { Opts.DisableLegacyTypeInfo = true; } if (Args.hasArg(OPT_prespecialize_generic_metadata) && !Args.hasArg(OPT_disable_generic_metadata_prespecialization)) { Opts.PrespecializeGenericMetadata = true; } if (Args.hasArg(OPT_emit_singleton_metadata_pointer)) { Opts.EmitSingletonMetadataPointers = true; } if (const Arg *A = Args.getLastArg(OPT_read_legacy_type_info_path_EQ)) { Opts.ReadLegacyTypeInfoPath = A->getValue(); } for (const auto &Lib : Args.getAllArgValues(options::OPT_autolink_library)) Opts.LinkLibraries.emplace_back( Lib, LibraryKind::Library, /*static=*/false); for (const auto &Lib : Args.getAllArgValues(options::OPT_public_autolink_library)) Opts.PublicLinkLibraries.push_back(std::make_tuple(Lib, /*static=*/false)); if (const Arg *A = Args.getLastArg(OPT_type_info_dump_filter_EQ)) { StringRef mode(A->getValue()); if (mode == "all") Opts.TypeInfoFilter = IRGenOptions::TypeInfoDumpFilter::All; else if (mode == "resilient") Opts.TypeInfoFilter = IRGenOptions::TypeInfoDumpFilter::Resilient; else if (mode == "fragile") Opts.TypeInfoFilter = IRGenOptions::TypeInfoDumpFilter::Fragile; else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } } auto getRuntimeCompatVersion = [&]() -> std::optional<llvm::VersionTuple> { std::optional<llvm::VersionTuple> runtimeCompatibilityVersion; if (auto versionArg = Args.getLastArg( options::OPT_runtime_compatibility_version)) { auto version = StringRef(versionArg->getValue()); if (version == "none") { runtimeCompatibilityVersion = std::nullopt; } else if (version == "5.0") { runtimeCompatibilityVersion = llvm::VersionTuple(5, 0); } else if (version == "5.1") { runtimeCompatibilityVersion = llvm::VersionTuple(5, 1); } else if (version == "5.5") { runtimeCompatibilityVersion = llvm::VersionTuple(5, 5); } else if (version == "5.6") { runtimeCompatibilityVersion = llvm::VersionTuple(5, 6); } else if (version == "5.8") { runtimeCompatibilityVersion = llvm::VersionTuple(5, 8); } else if (version == "6.0") { runtimeCompatibilityVersion = llvm::VersionTuple(6, 0); } else if (version == "6.2") { runtimeCompatibilityVersion = llvm::VersionTuple(6, 2); } else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, versionArg->getAsString(Args), version); } } else { runtimeCompatibilityVersion = getSwiftRuntimeCompatibilityVersionForTarget(Triple); } return runtimeCompatibilityVersion; }; // Autolink runtime compatibility libraries, if asked to. if (!Args.hasArg(options::OPT_disable_autolinking_runtime_compatibility)) { Opts.AutolinkRuntimeCompatibilityLibraryVersion = getRuntimeCompatVersion(); } if (!Args.hasArg(options:: OPT_disable_autolinking_runtime_compatibility_dynamic_replacements)) { Opts.AutolinkRuntimeCompatibilityDynamicReplacementLibraryVersion = getRuntimeCompatVersion(); } if (!Args.hasArg( options::OPT_disable_autolinking_runtime_compatibility_concurrency)) { Opts.AutolinkRuntimeCompatibilityConcurrencyLibraryVersion = getRuntimeCompatVersion(); } Opts.AutolinkRuntimeCompatibilityBytecodeLayoutsLibrary = Args.hasArg( options::OPT_enable_autolinking_runtime_compatibility_bytecode_layouts); if (const Arg *A = Args.getLastArg(OPT_num_threads)) { if (StringRef(A->getValue()).getAsInteger(10, Opts.NumThreads)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); return true; } if (environmentVariableRequestedMaximumDeterminism()) { Opts.NumThreads = 1; Diags.diagnose(SourceLoc(), diag::remark_max_determinism_overriding, "-num-threads"); } } Opts.UseSingleModuleLLVMEmission = Opts.NumThreads != 0 && Args.hasArg(OPT_enable_single_module_llvm_emission); if (SWIFT_ENABLE_GLOBAL_ISEL_ARM64 && Triple.getArch() == llvm::Triple::aarch64 && Triple.getArchName() != "arm64e") { Opts.EnableGlobalISel = true; } if (Args.hasArg(OPT_enable_llvm_vfe)) { Opts.VirtualFunctionElimination = true; } if (Args.hasArg(OPT_enable_llvm_wme)) { Opts.WitnessMethodElimination = true; } if (Args.hasArg(OPT_conditional_runtime_records)) { Opts.ConditionalRuntimeRecords = true; } if (Args.hasArg(OPT_internalize_at_link)) { Opts.InternalizeAtLink = true; } Opts.InternalizeSymbols = FrontendOpts.Static; if (Args.hasArg(OPT_disable_preallocated_instantiation_caches)) { Opts.NoPreallocatedInstantiationCaches = true; } if (Args.hasArg(OPT_disable_readonly_static_objects)) { Opts.DisableReadonlyStaticObjects = true; } // Default to disabling swift async extended frame info on anything but // darwin. Other platforms are unlikely to have support for extended frame // pointer information. if (!Triple.isOSDarwin()) { Opts.SwiftAsyncFramePointer = SwiftAsyncFramePointerKind::Never; } if (const Arg *A = Args.getLastArg(OPT_swift_async_frame_pointer_EQ)) { StringRef mode(A->getValue()); if (mode == "auto") Opts.SwiftAsyncFramePointer = SwiftAsyncFramePointerKind::Auto; else if (mode == "always") Opts.SwiftAsyncFramePointer = SwiftAsyncFramePointerKind::Always; else if (mode == "never") Opts.SwiftAsyncFramePointer = SwiftAsyncFramePointerKind::Never; else { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, A->getAsString(Args), A->getValue()); } } else if (Triple.isWatchOS() && !Triple.isSimulatorEnvironment()) { // watchOS does not support auto async frame pointers due to bitcode, so // silently override "auto" to "never" when back-deploying. This approach // sacrifices async backtraces when back-deploying but prevents crashes in // older tools that cannot handle the async frame bit in the frame pointer. llvm::VersionTuple OSVersion = Triple.getWatchOSVersion(); if (OSVersion.getMajor() < 8) Opts.SwiftAsyncFramePointer = SwiftAsyncFramePointerKind::Never; } Opts.EmitGenericRODatas = Args.hasFlag(OPT_enable_emit_generic_class_ro_t_list, OPT_disable_emit_generic_class_ro_t_list, Opts.EmitGenericRODatas); Opts.ColocateTypeDescriptors = Args.hasFlag(OPT_enable_colocate_type_descriptors, OPT_disable_colocate_type_descriptors, Opts.ColocateTypeDescriptors); Opts.CollocatedMetadataFunctions = Args.hasFlag(OPT_enable_collocate_metadata_functions, OPT_disable_collocate_metadata_functions, Opts.CollocatedMetadataFunctions); Opts.UseRelativeProtocolWitnessTables = Args.hasFlag(OPT_enable_relative_protocol_witness_tables, OPT_disable_relative_protocol_witness_tables, Opts.UseRelativeProtocolWitnessTables); Opts.UseFragileResilientProtocolWitnesses = Args.hasFlag(OPT_enable_fragile_resilient_protocol_witnesses, OPT_disable_fragile_resilient_protocol_witnesses, Opts.UseFragileResilientProtocolWitnesses); Opts.UseProfilingMarkerThunks = Args.hasFlag( OPT_enable_profiling_marker_thunks, OPT_disable_profiling_marker_thunks, Opts.UseProfilingMarkerThunks); Opts.EnableHotColdSplit = Args.hasFlag(OPT_enable_split_cold_code, OPT_disable_split_cold_code, Opts.EnableHotColdSplit); Opts.EmitAsyncFramePushPopMetadata = Args.hasFlag(OPT_enable_async_frame_push_pop_metadata, OPT_disable_async_frame_push_pop_metadata, Opts.EmitAsyncFramePushPopMetadata); bool platformSupportsTypedMalloc = !llvm::Triple(Triple).isOSLinux() && !llvm::Triple(Triple).isOSWindows(); Opts.EmitTypeMallocForCoroFrame = Args.hasFlag(OPT_enable_emit_type_malloc_for_coro_frame, OPT_disable_emit_type_malloc_for_coro_frame, (Opts.EmitTypeMallocForCoroFrame && platformSupportsTypedMalloc)); Opts.AsyncFramePointerAll = Args.hasFlag(OPT_enable_async_frame_pointer_all, OPT_disable_async_frame_pointer_all, Opts.AsyncFramePointerAll); Opts.EnableLargeLoadableTypesReg2Mem = Args.hasFlag(OPT_enable_large_loadable_types_reg2mem, OPT_disable_large_loadable_types_reg2mem, Opts.EnableLargeLoadableTypesReg2Mem); Opts.UseCoroCCX8664 = Args.hasFlag( OPT_enable_x86_64_corocc, OPT_disable_x86_64_corocc, Opts.UseCoroCCX8664); Opts.UseCoroCCArm64 = Args.hasFlag( OPT_enable_arm64_corocc, OPT_disable_arm64_corocc, Opts.UseCoroCCArm64); Opts.EnableLayoutStringValueWitnesses = Args.hasFlag(OPT_enable_layout_string_value_witnesses, OPT_disable_layout_string_value_witnesses, Opts.EnableLayoutStringValueWitnesses); Opts.EnableLayoutStringValueWitnessesInstantiation = Args.hasFlag(OPT_enable_layout_string_value_witnesses_instantiation, OPT_disable_layout_string_value_witnesses_instantiation, Opts.EnableLayoutStringValueWitnessesInstantiation); Opts.AnnotateCondFailMessage = Args.hasFlag(OPT_enable_cond_fail_message_annotation, OPT_disable_cond_fail_message_annotation, Opts.AnnotateCondFailMessage); if (Opts.EnableLayoutStringValueWitnessesInstantiation && !Opts.EnableLayoutStringValueWitnesses) { Diags.diagnose(SourceLoc(), diag::layout_string_instantiation_without_layout_strings); return true; } Opts.MergeableTraps = Args.hasArg(OPT_mergeable_traps); Opts.EnableSwiftDirectRetainRelease = Args.hasFlag(OPT_enable_direct_retain_release, OPT_disable_direct_retain_release, Opts.EnableSwiftDirectRetainRelease); Opts.EnableObjectiveCProtocolSymbolicReferences = Args.hasFlag(OPT_enable_objective_c_protocol_symbolic_references, OPT_disable_objective_c_protocol_symbolic_references, Opts.EnableObjectiveCProtocolSymbolicReferences); if (const Arg *A = Args.getLastArg(options::OPT_platform_c_calling_convention)) { Opts.PlatformCCallingConvention = llvm::StringSwitch<llvm::CallingConv::ID>(A->getValue()) .Case("c", llvm::CallingConv::C) .Case("arm_apcs", llvm::CallingConv::ARM_APCS) .Case("arm_aapcs", llvm::CallingConv::ARM_AAPCS) .Case("arm_aapcs_vfp", llvm::CallingConv::ARM_AAPCS_VFP) .Default(llvm::CallingConv::C); } if (Arg *A = Args.getLastArg(OPT_cas_backend_mode)) { Opts.CASObjMode = llvm::StringSwitch<llvm::CASBackendMode>(A->getValue()) .Case("native", llvm::CASBackendMode::Native) .Case("casid", llvm::CASBackendMode::CASID) .Case("verify", llvm::CASBackendMode::Verify) .Default(llvm::CASBackendMode::Native); } Opts.UseCASBackend |= Args.hasArg(OPT_cas_backend); Opts.EmitCASIDFile |= Args.hasArg(OPT_cas_emit_casid_file); Opts.DebugCallsiteInfo |= Args.hasArg(OPT_debug_callsite_info); if (Args.hasArg(OPT_mergeable_symbols)) Diags.diagnose(SourceLoc(), diag::warn_flag_deprecated, "-mergeable-symbols"); return false; } static std::string getScriptFileName(StringRef name, version::Version &ver) { if (ver.isVersionAtLeast(4, 2)) return (Twine(name) + "42" + ".json").str(); else return (Twine(name) + "4" + ".json").str(); } static bool ParseMigratorArgs(MigratorOptions &Opts, LangOptions &LangOpts, const FrontendOptions &FrontendOpts, StringRef ResourcePath, const ArgList &Args, DiagnosticEngine &Diags) { using namespace options; Opts.KeepObjcVisibility |= Args.hasArg(OPT_migrate_keep_objc_visibility); Opts.DumpUsr = Args.hasArg(OPT_dump_usr); if (Args.hasArg(OPT_disable_migrator_fixits)) { Opts.EnableMigratorFixits = false; } if (auto RemapFilePath = Args.getLastArg(OPT_emit_remap_file_path)) { Opts.EmitRemapFilePath = RemapFilePath->getValue(); } if (auto MigratedFilePath = Args.getLastArg(OPT_emit_migrated_file_path)) { Opts.EmitMigratedFilePath = MigratedFilePath->getValue(); } if (auto Dumpster = Args.getLastArg(OPT_dump_migration_states_dir)) { Opts.DumpMigrationStatesDir = Dumpster->getValue(); } if (auto DataPath = Args.getLastArg(OPT_api_diff_data_file)) { Opts.APIDigesterDataStorePaths.push_back(DataPath->getValue()); } else { auto &Triple = LangOpts.Target; llvm::SmallString<128> basePath; if (auto DataDir = Args.getLastArg(OPT_api_diff_data_dir)) { basePath = DataDir->getValue(); } else { basePath = ResourcePath; llvm::sys::path::append(basePath, "migrator"); } bool Supported = true; llvm::SmallString<128> dataPath(basePath); auto &langVer = LangOpts.EffectiveLanguageVersion; if (Triple.isMacOSX()) llvm::sys::path::append(dataPath, getScriptFileName("macos", langVer)); else if (Triple.isiOS()) llvm::sys::path::append(dataPath, getScriptFileName("ios", langVer)); else if (Triple.isTvOS()) llvm::sys::path::append(dataPath, getScriptFileName("tvos", langVer)); else if (Triple.isWatchOS()) llvm::sys::path::append(dataPath, getScriptFileName("watchos", langVer)); else Supported = false; if (Supported) { llvm::SmallString<128> authoredDataPath(basePath); llvm::sys::path::append(authoredDataPath, getScriptFileName("overlay", langVer)); // Add authored list first to take higher priority. Opts.APIDigesterDataStorePaths.push_back(std::string(authoredDataPath.str())); Opts.APIDigesterDataStorePaths.push_back(std::string(dataPath.str())); } } if (Opts.shouldRunMigrator()) { assert(!FrontendOpts.InputsAndOutputs.isWholeModule()); // FIXME: In order to support batch mode properly, the migrator would have // to support having one remap file path and one migrated file path per // primary input. The easiest way to do this would be to move processing of // these paths into FrontendOptions, like other supplementary outputs, and // to call migrator::updateCodeAndEmitRemapIfNeeded once for each primary // file. // // Supporting WMO would be similar, but WMO is set up to only produce one // supplementary output for the whole compilation instead of one per input, // so it's probably not worth it. FrontendOpts.InputsAndOutputs.assertMustNotBeMoreThanOnePrimaryInput(); // Always disable typo-correction in the migrator. LangOpts.TypoCorrectionLimit = 0; } return false; } bool CompilerInvocation::parseArgs( ArrayRef<const char *> Args, DiagnosticEngine &Diags, SmallVectorImpl<std::unique_ptr<llvm::MemoryBuffer>> *ConfigurationFileBuffers, StringRef workingDirectory, StringRef mainExecutablePath) { using namespace options; if (Args.empty()) return false; // Parse frontend command line options using Swift's option table. unsigned MissingIndex; unsigned MissingCount; std::unique_ptr<llvm::opt::OptTable> Table = createSwiftOptTable(); llvm::opt::InputArgList ParsedArgs = Table->ParseArgs(Args, MissingIndex, MissingCount, FrontendOption); if (MissingCount) { Diags.diagnose(SourceLoc(), diag::error_missing_arg_value, ParsedArgs.getArgString(MissingIndex), MissingCount); return true; } if (ParsedArgs.hasArg(OPT_UNKNOWN)) { for (const Arg *A : ParsedArgs.filtered(OPT_UNKNOWN)) { Diags.diagnose(SourceLoc(), diag::error_unknown_arg, A->getAsString(ParsedArgs)); } return true; } // Parse options that control diagnostic behavior as early as possible, so // that they can influence the behavior of diagnostics emitted during the // rest of parsing. if (ParseDiagnosticArgs(DiagnosticOpts, ParsedArgs, Diags)) { return true; } configureDiagnosticEngine(DiagnosticOpts, /*effectiveLanguageVersion=*/std::nullopt, mainExecutablePath, Diags); ParseAssertionArgs(ParsedArgs); if (ParseFrontendArgs(FrontendOpts, ParsedArgs, Diags, ConfigurationFileBuffers)) { return true; } if (!mainExecutablePath.empty()) { setMainExecutablePath(mainExecutablePath); } ParseModuleInterfaceArgs(ModuleInterfaceOpts, ParsedArgs, Diags); SaveModuleInterfaceArgs(ModuleInterfaceOpts, FrontendOpts, ParsedArgs, Diags); if (ParseCASArgs(CASOpts, ParsedArgs, Diags, FrontendOpts)) { return true; } if (ParseLangArgs(LangOpts, ParsedArgs, Diags, ModuleInterfaceOpts, FrontendOpts)) { return true; } if (ParseTypeCheckerArgs(TypeCheckerOpts, ParsedArgs, Diags, LangOpts, FrontendOpts)) { return true; } if (ParseClangImporterArgs(ClangImporterOpts, ParsedArgs, Diags, workingDirectory, LangOpts, FrontendOpts, CASOpts)) { return true; } ParseSymbolGraphArgs(SymbolGraphOpts, ParsedArgs, Diags, LangOpts); if (ParseSearchPathArgs(SearchPathOpts, ParsedArgs, Diags, CASOpts, FrontendOpts, workingDirectory)) { return true; } if (ParseSILArgs(SILOpts, ParsedArgs, IRGenOpts, FrontendOpts, TypeCheckerOpts, Diags, LangOpts, ClangImporterOpts)) { return true; } if (ParseIRGenArgs(IRGenOpts, ParsedArgs, Diags, FrontendOpts, SILOpts, LangOpts, CASOpts, getSDKPath(), SearchPathOpts.RuntimeResourcePath, LangOpts.Target)) { return true; } if (ParseTBDGenArgs(TBDGenOpts, ParsedArgs, Diags, *this)) { return true; } if (ParseMigratorArgs(MigratorOpts, LangOpts, FrontendOpts, SearchPathOpts.RuntimeResourcePath, ParsedArgs, Diags)) { return true; } SDKInfo = parseSDKSettings(*llvm::vfs::createPhysicalFileSystem(), LangOpts, SearchPathOpts, Diags); updateRuntimeLibraryPaths(SearchPathOpts, FrontendOpts, LangOpts, SDKInfo); updateImplicitFrameworkSearchPaths(SearchPathOpts, LangOpts, SDKInfo); setDefaultPrebuiltCacheIfNecessary(); setDefaultBlocklistsIfNecessary(); setDefaultInProcessPluginServerPathIfNecessary(); // Now that we've parsed everything, setup some inter-option-dependent state. setIRGenOutputOptsFromFrontendOptions(IRGenOpts, FrontendOpts); setBridgingHeaderFromFrontendOptions(ClangImporterOpts, FrontendOpts); computeCXXStdlibOptions(); computeAArch64TBIOptions(); if (LangOpts.hasFeature(Feature::Embedded)) { IRGenOpts.InternalizeAtLink = true; IRGenOpts.DisableLegacyTypeInfo = true; IRGenOpts.ReflectionMetadata = ReflectionMetadataMode::None; IRGenOpts.EnableReflectionNames = false; FrontendOpts.DisableBuildingInterface = true; SearchPathOpts.ModuleLoadMode = ModuleLoadingMode::OnlySerialized; TypeCheckerOpts.SkipFunctionBodies = FunctionBodySkipping::None; SILOpts.SkipFunctionBodies = FunctionBodySkipping::None; SILOpts.CMOMode = CrossModuleOptimizationMode::Everything; SILOpts.EmbeddedSwift = true; SILOpts.UseAggressiveReg2MemForCodeSize = true; // -g is promoted to -gdwarf-types in embedded Swift if (IRGenOpts.DebugInfoLevel == IRGenDebugInfoLevel::ASTTypes) { IRGenOpts.DebugInfoLevel = IRGenDebugInfoLevel::DwarfTypes; } } else { if (SILOpts.NoAllocations) { Diags.diagnose(SourceLoc(), diag::no_allocations_without_embedded); return true; } } if (LangOpts.hasFeature(Feature::StrictMemorySafety)) { if (SILOpts.RemoveRuntimeAsserts || SILOpts.AssertConfig == SILOptions::Unchecked) { Diags.diagnose(SourceLoc(), diag::command_line_conflicts_with_strict_safety, "-Ounchecked"); } if (!LangOpts.EnableAccessControl && FrontendOpts.ModuleName != SWIFT_ONONE_SUPPORT) { Diags.diagnose(SourceLoc(), diag::command_line_conflicts_with_strict_safety, "-disable-access-control"); } } SILOpts.UseAggressiveReg2MemForCodeSize = ParsedArgs.hasFlag(OPT_enable_aggressive_reg2mem, OPT_disable_aggressive_reg2mem, SILOpts.UseAggressiveReg2MemForCodeSize); // With Swift 6, enable @_spiOnly by default. This also enables proper error // reporting of ioi references from spi decls. if (LangOpts.EffectiveLanguageVersion.isVersionAtLeast(6)) { LangOpts.EnableSPIOnlyImports = true; } return false; } serialization::Status CompilerInvocation::loadFromSerializedAST(StringRef data) { serialization::ExtendedValidationInfo extendedInfo; serialization::ValidationInfo info = serialization::validateSerializedAST( data, LangOpts.SDKName, &extendedInfo); if (info.status != serialization::Status::Valid) return info.status; LangOpts.EffectiveLanguageVersion = info.compatibilityVersion; setTargetTriple(info.targetTriple); if (!extendedInfo.getSDKPath().empty()) setSDKPath(extendedInfo.getSDKPath().str()); auto &extraClangArgs = getClangImporterOptions().ExtraArgs; for (StringRef Arg : extendedInfo.getExtraClangImporterOptions()) extraClangArgs.push_back(Arg.str()); return info.status; } llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CompilerInvocation::setUpInputForSILTool( StringRef inputFilename, StringRef moduleNameArg, bool alwaysSetModuleToMain, bool bePrimary, serialization::ExtendedValidationInfo &extendedInfo) { // Load the input file. llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileBufOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); if (!fileBufOrErr) { return fileBufOrErr; } // If it looks like we have an AST, set the source file kind to SIL and the // name of the module to the file's name. getFrontendOptions().InputsAndOutputs.addInput( InputFile(inputFilename, bePrimary, fileBufOrErr.get().get(), file_types::TY_SIL)); auto result = serialization::validateSerializedAST( fileBufOrErr.get()->getBuffer(), LangOpts.SDKName, &extendedInfo); bool hasSerializedAST = result.status == serialization::Status::Valid; if (hasSerializedAST) { const StringRef stem = !moduleNameArg.empty() ? moduleNameArg : llvm::sys::path::stem(inputFilename); setModuleName(stem); getFrontendOptions().InputMode = FrontendOptions::ParseInputMode::SwiftLibrary; } else { const StringRef name = (alwaysSetModuleToMain || moduleNameArg.empty()) ? "main" : moduleNameArg; setModuleName(name); getFrontendOptions().InputMode = FrontendOptions::ParseInputMode::SIL; } return fileBufOrErr; }
cpp
github
https://github.com/apple/swift
lib/Frontend/CompilerInvocation.cpp
#ifndef COROUTINE_WIN32_CONTEXT_H #define COROUTINE_WIN32_CONTEXT_H 1 /* * This file is part of the "Coroutine" project and released under the MIT License. * * Created by Samuel Williams on 10/5/2018. * Copyright, 2018, by Samuel Williams. */ #pragma once #include <assert.h> #include <stddef.h> #include <stdint.h> #include <string.h> #define COROUTINE __declspec(noreturn) void __fastcall #define COROUTINE_DECL void __fastcall #define COROUTINE_LIMITED_ADDRESS_SPACE /* This doesn't include thread information block */ enum {COROUTINE_REGISTERS = 4}; struct coroutine_context { void **stack_pointer; void *argument; }; typedef void(__fastcall * coroutine_start)(struct coroutine_context *from, struct coroutine_context *self); static inline void coroutine_initialize_main(struct coroutine_context * context) { context->stack_pointer = NULL; } static inline void coroutine_initialize( struct coroutine_context *context, coroutine_start start, void *stack, size_t size ) { assert(start && stack && size >= 1024); // Stack grows down. Force 16-byte alignment. char * top = (char*)stack + size; context->stack_pointer = (void**)((uintptr_t)top & ~0xF); *--context->stack_pointer = (void*)(uintptr_t)start; /* Windows Thread Information Block */ *--context->stack_pointer = (void*)0xFFFFFFFF; /* fs:[0] */ *--context->stack_pointer = (void*)top; /* fs:[4] */ *--context->stack_pointer = (void*)stack; /* fs:[8] */ context->stack_pointer -= COROUTINE_REGISTERS; memset(context->stack_pointer, 0, sizeof(void*) * COROUTINE_REGISTERS); } struct coroutine_context * __fastcall coroutine_transfer(struct coroutine_context * current, struct coroutine_context * target); static inline void coroutine_destroy(struct coroutine_context * context) { } #endif /* COROUTINE_WIN32_CONTEXT_H */
c
github
https://github.com/ruby/ruby
coroutine/win32/Context.h
from encodings.aliases import aliases import datetime from lxml import etree import urllib2 import re from django.core.urlresolvers import reverse from django.db import models from django.dispatch import receiver from django.utils.feedgenerator import Atom1Feed import feedparser PATTERN_CDATA = re.compile('<!\[cdata\[(.*)\]\]>', re.DOTALL) PATTERN_COMMENT = re.compile('<!--(.*)-->', re.DOTALL) PATTERN_HTTP_URI_BASE = re.compile('^http[s]?://\w[\.\w]+') def get_available_charsets(): charsets = set() for i in aliases.values(): i = i.replace('_', '-') charsets.add(i) charsets = list(charsets) charsets.sort() return [(i,i) for i in charsets] class Feed(models.Model): # Purge articles from the database, which are more than 7 days old and are missing # from the source feed. PURGE_AGE = 7 name = models.CharField(db_index=True, help_text="This name is also used for addressing the fulltext feed through the URL.", max_length=50, verbose_name="Feed name") source = models.URLField( help_text="The source (remote) RSS feed which should be parsed and handled.", verbose_name="Source URL" ) xpath_expression = models.CharField( help_text="The XPath query which will be used to extract the text from the site where the RSS feed points to.", max_length=254, verbose_name="XPath expression" ) article_charset = models.CharField( choices=get_available_charsets(), default="utf-8", help_text="When loading the full-text articles we'll need to know their character set, so you can specifiy it here.", max_length=32, verbose_name="Source article charset" ) def __unicode__(self): return self.name def get_absolute_url(self): return reverse("fulltextfeed_show", args=[self.name]) def get_fulltext_feed(self): source = feedparser.parse(self.source) articles = {} for i in self.article_set.all(): articles[i.link] = i pks = [] feed = Atom1Feed( title=source.feed.title, link=source.feed.link, description=source.feed.description ) for entry in source.entries: if entry.link not in articles: a = Article() a.feed = self a.link = entry.link a.save() else: a = articles[entry.link] pks.append(a.id) feed.add_item( title=entry.title, link=entry.link, description=a.text, pubdate=datetime.datetime(*entry.published_parsed[:6]) ) min_delete_datetime = datetime.datetime.now() - datetime.timedelta(days=self.PURGE_AGE) self.article_set.filter( added__lt=min_delete_datetime ).exclude( id__in=pks ).delete() return feed.writeString('utf-8') class Article(models.Model): feed = models.ForeignKey(Feed) link = models.URLField(blank=False) text = models.TextField(blank=True, editable=False) added = models.DateTimeField(auto_now_add=True) def _refresh_text(self): """ Reloads the article from the web, applies the appropriate XPath query and stores the result in the text field. """ print "loading %s..." %self.link uri_base = PATTERN_HTTP_URI_BASE.findall(self.link)[0] uri_base += '/' response = urllib2.urlopen(self.link).read() response = response.decode(self.feed.article_charset) tree = etree.HTML(response) div = tree.xpath(self.feed.xpath_expression) full_text = "" for i in div: full_text += etree.tostring(i) full_text += "\n" #@TODO: strip out comments full_text = re.sub(PATTERN_CDATA, '', full_text) #full_text = re.sub(PATTERN_COMMENT, '', full_text) full_text = full_text.replace('src="/', 'src="'+uri_base) full_text = full_text.replace('href="/', 'href="'+uri_base) self.text = full_text @receiver(models.signals.pre_save, sender=Article) def _handle_new_articles(instance, **kwargs): """ Sets the information for the following fields before adding the article to the database: - text """ instance._refresh_text()
unknown
codeparrot/codeparrot-clean
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A database of Python protocol buffer generated symbols. SymbolDatabase is the MessageFactory for messages generated at compile time, and makes it easy to create new instances of a registered type, given only the type's protocol buffer symbol name. Example usage: db = symbol_database.SymbolDatabase() # Register symbols of interest, from one or multiple files. db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) db.RegisterMessage(my_proto_pb2.MyMessage) db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) # The database can be used as a MessageFactory, to generate types based on # their name: types = db.GetMessages(['my_proto.proto']) my_message_instance = types['MyMessage']() # The database's underlying descriptor pool can be queried, so it's not # necessary to know a type's filename to be able to generate it: filename = db.pool.FindFileContainingSymbol('MyMessage') my_message_instance = db.GetMessages([filename])['MyMessage']() # This functionality is also provided directly via a convenience method: my_message_instance = db.GetSymbol('MyMessage')() """ from google.protobuf import descriptor_pool from google.protobuf import message_factory class SymbolDatabase(message_factory.MessageFactory): """A database of Python generated symbols.""" def RegisterMessage(self, message): """Registers the given message type in the local database. Calls to GetSymbol() and GetMessages() will return messages registered here. Args: message: a message.Message, to be registered. Returns: The provided message. """ desc = message.DESCRIPTOR self._classes[desc.full_name] = message self.pool.AddDescriptor(desc) return message def RegisterEnumDescriptor(self, enum_descriptor): """Registers the given enum descriptor in the local database. Args: enum_descriptor: a descriptor.EnumDescriptor. Returns: The provided descriptor. """ self.pool.AddEnumDescriptor(enum_descriptor) return enum_descriptor def RegisterFileDescriptor(self, file_descriptor): """Registers the given file descriptor in the local database. Args: file_descriptor: a descriptor.FileDescriptor. Returns: The provided descriptor. """ self.pool.AddFileDescriptor(file_descriptor) def GetSymbol(self, symbol): """Tries to find a symbol in the local database. Currently, this method only returns message.Message instances, however, if may be extended in future to support other symbol types. Args: symbol: A str, a protocol buffer symbol. Returns: A Python class corresponding to the symbol. Raises: KeyError: if the symbol could not be found. """ return self._classes[symbol] def GetMessages(self, files): # TODO(amauryfa): Fix the differences with MessageFactory. """Gets all registered messages from a specified file. Only messages already created and registered will be returned; (this is the case for imported _pb2 modules) But unlike MessageFactory, this version also returns already defined nested messages, but does not register any message extensions. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. Raises: KeyError: if a file could not be found. """ def _GetAllMessageNames(desc): """Walk a message Descriptor and recursively yields all message names.""" yield desc.full_name for msg_desc in desc.nested_types: for full_name in _GetAllMessageNames(msg_desc): yield full_name result = {} for file_name in files: file_desc = self.pool.FindFileByName(file_name) for msg_desc in file_desc.message_types_by_name.values(): for full_name in _GetAllMessageNames(msg_desc): try: result[full_name] = self._classes[full_name] except KeyError: # This descriptor has no registered class, skip it. pass return result _DEFAULT = SymbolDatabase(pool=descriptor_pool.Default()) def Default(): """Returns the default SymbolDatabase.""" return _DEFAULT
unknown
codeparrot/codeparrot-clean
from helpdesk.models import Queue, CustomField, Ticket from django.test import TestCase from django.core import mail from django.test.client import Client from django.core.urlresolvers import reverse class PublicActionsTestCase(TestCase): """ Tests for public actions: - View a ticket - Add a followup - Close resolved case """ def setUp(self): """ Create a queue & ticket we can use for later tests. """ self.queue = Queue.objects.create(title='Queue 1', slug='q', allow_public_submission=True, new_ticket_cc='new.public@example.com', updated_ticket_cc='update.public@example.com') self.ticket = Ticket.objects.create(title='Test Ticket', queue=self.queue, submitter_email='test.submitter@example.com', description='This is a test ticket.') self.client = Client() def test_public_view_ticket(self): response = self.client.get('%s' % (reverse('helpdesk_ticket_url_view', args=[self.ticket.encode()]))) self.assertEqual(response.status_code, 200) self.assertTemplateNotUsed(response, 'helpdesk/public_view_form.html') def test_public_close(self): old_status = self.ticket.status old_resolution = self.ticket.resolution resolution_text = 'Resolved by test script' ticket = Ticket.objects.get(id=self.ticket.id) ticket.status = Ticket.RESOLVED_STATUS ticket.resolution = resolution_text ticket.save() current_followups = ticket.followup_set.all().count() response = self.client.get('%s' % (reverse('helpdesk_ticket_url_view', args=[ticket.encode()]))) ticket = Ticket.objects.get(id=self.ticket.id) """ ToDo Why do we need redirect here ? self.assertEqual(response.status_code, 302) self.assertTemplateNotUsed(response, 'helpdesk/public_view_form.html') self.assertEqual(ticket.status, Ticket.CLOSED_STATUS) self.assertEqual(ticket.resolution, resolution_text) self.assertEqual(current_followups+1, ticket.followup_set.all().count()) ticket.resolution = old_resolution ticket.status = old_status ticket.save() """
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true require "delegate" module ActionMailer class << self # Enqueue many emails at once to be delivered through Active Job. # When the individual job runs, it will send the email using +deliver_now+. def deliver_all_later(*deliveries, **options) _deliver_all_later("deliver_now", *deliveries, **options) end # Enqueue many emails at once to be delivered through Active Job. # When the individual job runs, it will send the email using +deliver_now!+. # That means that the message will be sent bypassing checking +perform_deliveries+ # and +raise_delivery_errors+, so use with caution. def deliver_all_later!(*deliveries, **options) _deliver_all_later("deliver_now!", *deliveries, **options) end private def _deliver_all_later(delivery_method, *deliveries, **options) deliveries = deliveries.first if deliveries.first.is_a?(Array) jobs = deliveries.map do |delivery| mailer_class = delivery.mailer_class delivery_job = mailer_class.delivery_job delivery_job .new(mailer_class.name, delivery.action.to_s, delivery_method, params: delivery.params, args: delivery.args) .set(options) end ActiveJob.perform_all_later(jobs) end end # = Action Mailer \MessageDelivery # # The +ActionMailer::MessageDelivery+ class is used by # ActionMailer::Base when creating a new mailer. # <tt>MessageDelivery</tt> is a wrapper (+Delegator+ subclass) around a lazy # created +Mail::Message+. You can get direct access to the # +Mail::Message+, deliver the email or schedule the email to be sent # through Active Job. # # Notifier.welcome(User.first) # an ActionMailer::MessageDelivery object # Notifier.welcome(User.first).deliver_now # sends the email # Notifier.welcome(User.first).deliver_later # enqueue email delivery as a job through Active Job # Notifier.welcome(User.first).message # a Mail::Message object class MessageDelivery < Delegator attr_reader :mailer_class, :action, :params, :args # :nodoc: def initialize(mailer_class, action, *args) # :nodoc: @mailer_class, @action, @args = mailer_class, action, args # The mail is only processed if we try to call any methods on it. # Typical usage will leave it unloaded and call deliver_later. @processed_mailer = nil @mail_message = nil end ruby2_keywords(:initialize) # Method calls are delegated to the Mail::Message that's ready to deliver. def __getobj__ # :nodoc: @mail_message ||= processed_mailer.message end # Unused except for delegator internals (dup, marshalling). def __setobj__(mail_message) # :nodoc: @mail_message = mail_message end # Returns the resulting Mail::Message def message __getobj__ end # Was the delegate loaded, causing the mailer action to be processed? def processed? @processed_mailer || @mail_message end # Enqueues the email to be delivered through Active Job. When the # job runs it will send the email using +deliver_now!+. That means # that the message will be sent bypassing checking +perform_deliveries+ # and +raise_delivery_errors+, so use with caution. # # Notifier.welcome(User.first).deliver_later! # Notifier.welcome(User.first).deliver_later!(wait: 1.hour) # Notifier.welcome(User.first).deliver_later!(wait_until: 10.hours.from_now) # Notifier.welcome(User.first).deliver_later!(priority: 10) # # Options: # # * <tt>:wait</tt> - Enqueue the email to be delivered with a delay # * <tt>:wait_until</tt> - Enqueue the email to be delivered at (after) a specific date / time # * <tt>:queue</tt> - Enqueue the email on the specified queue # * <tt>:priority</tt> - Enqueues the email with the specified priority # # By default, the email will be enqueued using ActionMailer::MailDeliveryJob on # the default queue. Mailer classes can customize the queue name used for the default # job by assigning a +deliver_later_queue_name+ class variable, or provide a custom job # by assigning a +delivery_job+. When a custom job is used, it controls the queue name. # # class AccountRegistrationMailer < ApplicationMailer # self.delivery_job = RegistrationDeliveryJob # end def deliver_later!(options = {}) enqueue_delivery :deliver_now!, options end # Enqueues the email to be delivered through Active Job. When the # job runs it will send the email using +deliver_now+. # # Notifier.welcome(User.first).deliver_later # Notifier.welcome(User.first).deliver_later(wait: 1.hour) # Notifier.welcome(User.first).deliver_later(wait_until: 10.hours.from_now) # Notifier.welcome(User.first).deliver_later(priority: 10) # # Options: # # * <tt>:wait</tt> - Enqueue the email to be delivered with a delay. # * <tt>:wait_until</tt> - Enqueue the email to be delivered at (after) a specific date / time. # * <tt>:queue</tt> - Enqueue the email on the specified queue. # * <tt>:priority</tt> - Enqueues the email with the specified priority # # By default, the email will be enqueued using ActionMailer::MailDeliveryJob on # the default queue. Mailer classes can customize the queue name used for the default # job by assigning a +deliver_later_queue_name+ class variable, or provide a custom job # by assigning a +delivery_job+. When a custom job is used, it controls the queue name. # # class AccountRegistrationMailer < ApplicationMailer # self.delivery_job = RegistrationDeliveryJob # end def deliver_later(options = {}) enqueue_delivery :deliver_now, options end # Delivers an email without checking +perform_deliveries+ and +raise_delivery_errors+, # so use with caution. # # Notifier.welcome(User.first).deliver_now! # def deliver_now! processed_mailer.handle_exceptions do processed_mailer.run_callbacks(:deliver) do message.deliver! end end end # Delivers an email: # # Notifier.welcome(User.first).deliver_now # def deliver_now processed_mailer.handle_exceptions do processed_mailer.run_callbacks(:deliver) do message.deliver end end end private # Returns the processed Mailer instance. We keep this instance # on hand so we can run callbacks and delegate exception handling to it. def processed_mailer @processed_mailer ||= @mailer_class.new.tap do |mailer| mailer.process @action, *@args end end def enqueue_delivery(delivery_method, options = {}) if processed? ::Kernel.raise "You've accessed the message before asking to " \ "deliver it later, so you may have made local changes that would " \ "be silently lost if we enqueued a job to deliver it. Why? Only " \ "the mailer method *arguments* are passed with the delivery job! " \ "Do not access the message in any way if you mean to deliver it " \ "later. Workarounds: 1. don't touch the message before calling " \ "#deliver_later, 2. only touch the message *within your mailer " \ "method*, or 3. use a custom Active Job instead of #deliver_later." else @mailer_class.delivery_job.set(options).perform_later( @mailer_class.name, @action.to_s, delivery_method.to_s, args: @args) end end end end
ruby
github
https://github.com/rails/rails
actionmailer/lib/action_mailer/message_delivery.rb
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import openerp def login(db, login, password): res_users = openerp.registry(db)['res.users'] return res_users.login(db, login, password) def check_super(passwd): if passwd == openerp.tools.config['admin_passwd']: return True else: raise openerp.exceptions.AccessDenied() def check(db, uid, passwd): res_users = openerp.registry(db)['res.users'] return res_users.check(db, uid, passwd) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Dec 22 22:03:23 2017 @author: alek """ import sys sys.path.append('../../') from pyDist import Nodes import logging import sys from pyDist.TaskManager import TaskManager #change these up for use in other cases taskManager = TaskManager() #taskManager.executor = concurrent.futures.ThreadPoolExecutor(4) #logging utility logging.basicConfig(format='%(filename)-20s:%(lineno)-43s | %(levelname)-8s | %(message)s' , stream=sys.stdout, level=logging.DEBUG) logger = logging.getLogger() def run_this(i): logger.debug('run_this() was called at index %d' % i) return True def callback(a): logger.debug('callback(%s)' % a) def start_node(): logger.debug('starting the node (PROCESS MAIN)') node = Nodes.ClusterNode() for i in range(0,3): task = node.taskManager.executor.submit(run_this,i) task.add_done_callback(node.work_item_finished_callback) node.taskManager.submit(task) #node.start_updating() #node.boot('0.0.0.0', 9000) #logger.debug('node stopped') return node if __name__ == '__main__': logger.debug('basic task running test') node = start_node()
unknown
codeparrot/codeparrot-clean
A pattern for a struct fails to specify a sub-pattern for every one of the struct's fields. Erroneous code example: ```compile_fail,E0027 struct Dog { name: String, age: u32, } let d = Dog { name: "Rusty".to_string(), age: 8 }; // This is incorrect. match d { Dog { age: x } => {} } ``` To fix this error, ensure that each field from the struct's definition is mentioned in the pattern, or use `..` to ignore unwanted fields. Example: ``` struct Dog { name: String, age: u32, } let d = Dog { name: "Rusty".to_string(), age: 8 }; match d { Dog { name: ref n, age: x } => {} } // This is also correct (ignore unused fields). match d { Dog { age: x, .. } => {} } ```
unknown
github
https://github.com/rust-lang/rust
compiler/rustc_error_codes/src/error_codes/E0027.md
/* Copyright 2017 - 2025 R. Thomas * Copyright 2017 - 2025 Quarkslab * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "LIEF/Visitor.hpp" #include "LIEF/MachO/SubFramework.hpp" #include "MachO/Structures.hpp" namespace LIEF { namespace MachO { SubFramework::SubFramework(const details::sub_framework_command& cmd) : LoadCommand::LoadCommand{LoadCommand::TYPE(cmd.cmd), cmd.cmdsize} {} void SubFramework::accept(Visitor& visitor) const { visitor.visit(*this); } std::ostream& SubFramework::print(std::ostream& os) const { LoadCommand::print(os) << '\n'; os << "Umbrella:" << umbrella(); return os; } } }
cpp
github
https://github.com/nodejs/node
deps/LIEF/src/MachO/SubFramework.cpp
#!/usr/bin/env python import scipy as sp import numpy as np import pylab as plt from scipy import interpolate def getOpacity(filenumber=200, molname='ch4'): # table = np.genfromtxt('opacs/' + molname+'/fort.' + str(int(filenumber))) table=np.genfromtxt('http://www.ucolick.org/~cmorley/data/opacities/'+molname+'/fort.' + str(int(filenumber))) wl = table[::,0] op = table[::,1] for i in range(len(op)):#check to see if grid is full of NaNs/zeros. if np.isnan(op[i]) : # replace NaNs with 1e-220 op[i] = 1e-220 if op[i] < 1e-300 or op[i]==0.0: # replace 0s with 1e-220 op[i] = 1e-220 return wl,op def getPT(log_pressure_layer=1.5, temp_layer=1600): # This function takes a pressure and temperature and finds the bracketing four # # PT points in our opacity grid. # Steps: # 1. find the bracketing temperatures # # 2. find the bracketing pressures # # 3. for value == grid value, use the grid value twice # # Input variables # # pressure_layer : float of pressure, in bar, for atm. layer # # temp_layer : float of temperature in K for atmopshere layer # # opacityPTlist : list of [index, P[bar], T[K]] for 736/1060 opacity grid # # should be an increasing (P, T) array. # # program will return an error and quit if the values are outside the table # # Output variables # # list of indices [n1, n2, n3, n4] corresponding to closest 4 points. Index can be # # repeated if value == grid value. ZERO INDEXED NOT ONE INDEXED # opacityPTlist = np.genfromtxt('opacs/PTgrid1060.txt', skip_header=1) pressure_layer = 10.0**log_pressure_layer index_opac = opacityPTlist[::,0] p_opac = opacityPTlist[::,1] t_opac = opacityPTlist[::,2] # Find closest temperature first # for i in range(len(index_opac)): if t_opac[i] == temp_layer: # if the layer temperature is on the opacity grid, use upperT = t_opac[i] # that value lowerT = t_opac[i] break elif t_opac[i] > temp_layer: # find the index where t_opac > temp_layer; that index upperT = t_opac[i] # is the upper bound, i-1 is the lower bound lowerT = t_opac[i-1] break elif t_opac[len(index_opac)-1] < temp_layer: # if the layer temperature is higher than our T grid, upperT = t_opac[len(index_opac)-1] # extrapolate from grid edge. index_opac-1 is the upper bound, lowerT = t_opac[len(index_opac)-20] # index_opac-2 is the lower bound break if pressure_layer > np.max(p_opac) : # print "warning, layer pressure greater than opacity grid, but we're going to extrapolate! " for i in range(len(index_opac)): if t_opac[i] == upperT: upperPupperTindex = i #this should find the last (largest) P point with that T lowerPupperTindex = i-1 elif t_opac[i] == lowerT: #this should find the last (largest) P point with that T upperPlowerTindex = i lowerPlowerTindex = i-1 indices = [upperPupperTindex, lowerPupperTindex, upperPlowerTindex, lowerPlowerTindex] return indices for i in range(len(index_opac)): if t_opac[i] == upperT: # just for indexes with the right temperature, if p_opac[i] == pressure_layer: # find where p_opac is equal to layer pressure upperPupperTindex = i lowerPupperTindex = i break elif p_opac[i] > pressure_layer: # if no equal point on the grid, find where p_opac upperPupperTindex = i # becomes > the layer pressure; i-1 is the lower index lowerPupperTindex = i-1 break for i in range(len(index_opac)): # and repeat the above for the lower temperature bound if t_opac[i] == lowerT: if p_opac[i] == pressure_layer: upperPlowerTindex = i lowerPlowerTindex = i break elif p_opac[i] > pressure_layer: upperPlowerTindex = i lowerPlowerTindex = i-1 break try: # we tried to catch values off the grid earlier, but due to non-square grid can still get # errors where we're not on the grid. This catches those, returns [0,0,0,0] and a warning. indices = [upperPupperTindex, lowerPupperTindex, upperPlowerTindex, lowerPlowerTindex] x= p_opac[indices[0]], p_opac[indices[1]], p_opac[indices[2]], p_opac[indices[3]] y= t_opac[indices[0]], t_opac[indices[1]], t_opac[indices[2]], t_opac[indices[3]] return indices except UnboundLocalError: return [0,0,0,0] def getMoleculeList(ch4=True,h2o=True,nh3=True,co=True,co2=True,feh=False,h2s=True,ph3=True,tio=False,vo=False, **args): #print args molList = [] for i in args: # print i # print args[i] if i == u'H\u2082O' and args[i]==True: molList.append('h2o') if i == u'CH\u2084' and args[i]==True: molList.append('ch4') if i == u'CO' and args[i]==True: molList.append('co') if i == u'CO\u2082' and args[i]==True: molList.append('co2') if i == u'NH\u2083' and args[i]==True: molList.append('nh3') if i == u'PH\u2083' and args[i]==True: molList.append('ph3') if i == u'FeH' and args[i]==True: molList.append('feh') if i == u'TiO' and args[i]==True: molList.append('tio') if i == u'VO' and args[i]==True: molList.append('vo') if i == u'H\u2082S' and args[i]==True: molList.append('h2s') return molList def getMetallicity(metallicity): return metallicity def interpolateOpacityFiles(pressure_layer, temp_layer, indices, opacity_array): # This function takes a set of four indices which represent the pressure and # # temperatures to interpolate, the true PT point we want, and absorber name and returns # # the array of freq vs. opacity for that absorber. # # Steps: # 1. Find P and T of each index in indices # # 2. Interpolate linearly in log10(pressure) # # 3. Then interpolate those linearly in temperature # # (this is an algorithm for a standard bilinear interpolation, # # e.g. http://en.wikipedia.org/wiki/Bilinear_interpolation ) # # Input variables # # log_pressure_layer : float, the pressure of the layer we're interpolating for in bar # # temp_layer : float, the temperature of the layer we're intepolating for in K # # opacityPTlist : list of [index, P[bar], T[K]] for 736/1060 opacity grid # # indices : ZERO INDEXED indices that we want. Note that opacityPTlist[i][0] will # # return i+1. Pressure[i] = opacityPTlist[i][1] # # [upperPupperTindex, lowerPupperTindex, upperPlowerTindex, lowerPlowerTindex]# # opacity_array : numpy array of opacities for each PT point in our 4 indexed points # # length is 4 x len(freqs). # Output variables # # opacities_interp : the interpolated opacities. len(freqs) # #Get the pressures and the temperatures of our indexed points opacityPTlist = np.genfromtxt('opacs/PTgrid1060.txt', skip_header=1) p1 = opacityPTlist[indices[1]][1] p2 = opacityPTlist[indices[0]][1] t1 = opacityPTlist[indices[2]][2] t2 = opacityPTlist[indices[0]][2] # print 'pressures:' , p1,p2 # print 'temps: ', t1,t2 # print 'point wanted: ', pressure_layer, temp_layer #Make sure our grid is rectangular. (x1,y1), (x2,y1), (x1,y2), (x2,y2) if opacityPTlist[indices[3]][1] != p1: sys.exit("bilinear interpolation doesn't have a square grid in interpolateOpacityFiles!") if opacityPTlist[indices[2]][1] != p2: sys.exit("bilinear interpolation doesn't have a square grid in interpolateOpacityFiles!") if opacityPTlist[indices[3]][2] != t1: sys.exit("bilinear interpolation doesn't have a square grid in interpolateOpacityFiles!") if opacityPTlist[indices[1]][2] != t2: sys.exit("bilinear interpolation doesn't have a square grid in interpolateOpacityFiles!") p1 = np.log10(p1) p2 = np.log10(p2) t1 = np.log10(t1) t2 = np.log10(t2) opacity_array = np.log10(opacity_array) temp_layer = np.log10(temp_layer) pressure_layer = np.log10(pressure_layer) #This line fixes a problem where we were extrapolating negative infinity opacities: opacity_array[opacity_array < -500] = -500 #If p1=p2 and t1=t2, no interpolation, return point if ((p1==p2) and (t1==t2)): return 10.**opacity_array[0] #If p1=p2, do linear interpolation in t elif (p1==p2) : opacities_interp = opacity_array[2] + (opacity_array[0] -opacity_array[2]) * (temp_layer-t1)/(t2-t1) return 10.**opacities_interp #If t1=t2, do linear interpolation in p elif (t1==t2) : opacities_interp = opacity_array[1] + (opacity_array[0] -opacity_array[1]) * (pressure_layer-p1)/(p2-p1) return 10.**opacities_interp #Otherwise, do the full bilinear interpolation # print 'Q11', opacity_array[3] # print 'Q12', opacity_array[2] # print 'Q21', opacity_array[1] # print 'Q22', opacity_array[0] # print 'calculating R1: ', p2, pressure_layer, p1, opacity_array[3], opacity_array[2] R1 = ((p2-pressure_layer) / (p2-p1)) * opacity_array[3] + ((pressure_layer-p1)/(p2-p1)) * opacity_array[2] R2 = ((p2-pressure_layer) / (p2-p1)) * opacity_array[1] + ((pressure_layer-p1)/(p2-p1)) * opacity_array[0] # print 'R1', R1 # print 'R2', R2 opacities_interp = ((t2-temp_layer) / (t2-t1)) * R1 + ((temp_layer-t1)/(t2-t1)) * R2 # print 'opacities_interp', opacities_interp opacities_interp = 10.0**opacities_interp return opacities_interp def getAbundances(pressures, temps, moleculelist, metallicity='0.0'): # This function takes pressure and temperature arrays for the model layers and a list # # of additional molecules that we'd like to include the abundances of. The equilibrium # # abundances of the additional molecules will be returned in the same order as the list.# # Input variables # # pressures : numpy array of the pressures of model layers in bar, length N_layer # # temps : numpy array of the temperatures of model layers in K, length N_layer # # moleculelist : list of strings of additional molecules we're including # # Output variables # # molecule_abunds : list of numpy arrays of additional molecules. # # [len(moleculelist) x N_layer] # #read in the abunds file and the corresponding PT grid abundsfile = 'abunds/abunds.'+metallicity ptfile = 'abunds/PTpoints.txt' f= open(abundsfile, 'r') #opens the file to read in firstline = f.readline().split() #reads and splits first line nMolecules = len(firstline) #finds the number of "molecules" (also includes atoms etc.) abundsGrid = np.genfromtxt(abundsfile, skip_header=1) #get grid of abundances f.close() f= open(ptfile, 'r') #opens the file to read in firstline = f.readline().split() #reads and splits first line PTgrid = np.genfromtxt(ptfile, skip_header=1) #get grid of PTs for abundances # the number of lines here corresponds to the number of lines in the abunds file # (736 or 1060 usually). These are just written in order here. So the first line # in the abundances file corresponds to the first line here in terms of pressure # and temperature. # Grid is in 3 columns. First is line (file) number. Second pressure in bar. # Third is temperature in K. nlines_ptgrid = len(PTgrid) f.close() pressures = np.array([pressures]) temps = np.array([temps]) if (nlines_ptgrid != len(abundsGrid)): print "ERROR! Abundance file has diff. line length than PTpoints file!" #Clean up arrays, replace NaNs with 1e-220 and zeros with 1e-220 pressArr = PTgrid[:,1] #pressures array, same length as abunds file tempArr = PTgrid[:,2] #temperature array, same length as abunds file for i in range(len(abundsGrid)):#check to see if grid is full of NaNs. for j in range(len(abundsGrid[i])): if np.isnan(abundsGrid[i][j]) : # replace NaNs with 1e-220 abundsGrid[i][j] = 1e-220 if abundsGrid[i][j] == 0.0: # replace 0s with 1e-220 abundsGrid[i][j] = 1e-220 #dictionary of columns columns = {'e-': 2, 'h2':3, 'h':4, 'h+':5,'h-':6 , 'h2-':7, 'h2+':8, 'h3+':9, 'he':10, 'h2o':11, 'ch4':12, 'co':13, 'nh3':14, 'n2':15, 'ph3':16, 'h2s':17, 'tio':18, 'vo':19, 'fe':20, 'feh':21, 'crh':22, 'na':23, 'k':24, 'rb':25, 'cs':26, 'co2':27} #setup array for abundances molecule_abunds = np.zeros((len(moleculelist), len(pressures))) for i in range(len(moleculelist)): moleculename = moleculelist[i] try: columnnum = columns[moleculename] except KeyError: print 'molecule name: ', moleculename, 'not found in abunds dictionary!' abunds_molecule_grid = abundsGrid[::,columnnum] #set up meshes of our variables mT,mP = np.meshgrid(temps, np.log10(pressures)) #print mT mT= np.transpose(mT) mP= np.transpose(mP) # meshgrid creates a grid of all the P, T points. We then tranpose # them to be the way around that we prefer. #print mT molecule_abunds[i] = 10.0**np.diag(interpolate.griddata((tempArr, np.log10(pressArr)), np.log10(abunds_molecule_grid), (mT, mP), method='linear')) # interpolate the abunds grid onto the model P, T grid made earlier. # Then takes just the diagonal for just the P, T points we care about, # e.g. (1,1) gives us the top point in the atmosphere. for j in range(len(molecule_abunds[i])): if np.isnan(molecule_abunds[i][j]): # print i, j, molecule_abunds[i][j] #fill in NaN missing numbers with previous layer's value molecule_abunds[i][j]=molecule_abunds[i][j-1] return molecule_abunds def makeOpacityPlot(opac_kwargs, opac_result, abund_result, met_result): wl,op=getOpacity(filenumber=1) op_array = np.zeros((4,len(op))) #color_array=['Blue', 'Red', 'DarkViolet', 'LimeGreen', 'DarkOrange', 'DeepPink', 'Teal'] color_list = plt.cm.rainbow(np.linspace(0, 1, len(abund_result) )) leg_dict = { 'h2o':'H$_2$O', 'ch4':'CH$_4$', 'co':'CO', 'nh3':'NH$_3$', 'n2':'N$_2$', 'ph3':'PH$_3$', 'h2s':'H$_2$S', 'tio':'TiO', 'vo':'VO', 'feh':'FeH', 'crh':'CrH', 'na':'Na', 'k':'K', 'rb':'Rb', 'cs':'Cs', 'co2':'CO$_2$'} fig1 = plt.figure(num=1, figsize=(12,8)) for j in range(len(abund_result)): # print abund_result[j] for i in range(len(opac_result)): ii = opac_result[i]+1 wl,op_array[i] = getOpacity(filenumber=ii, molname=abund_result[j]) op = interpolateOpacityFiles(10.0**opac_kwargs['log_pressure_layer'], opac_kwargs['temp_layer'], opac_result, op_array) abunds = getAbundances(10.0**opac_kwargs['log_pressure_layer'], opac_kwargs['temp_layer'], [abund_result[j]], metallicity=met_result) plt.loglog(wl,op*abunds[0],color=color_list[j],label=leg_dict[abund_result[j]]) plt.xlim(0.8,20); plt.ylim(1e-35,1e-18); plt.xlabel('wavelength ($\mu$m)', size='xx-large'); plt.ylabel('opacity $\\times$ mixing ratio (cm$^2$/molecule) ', size='xx-large'); plt.minorticks_on(); plt.tick_params(length=10, width=1, labelsize='x-large', which='major'); plt.tick_params(length=5, width=1, which='minor'); plt.legend(loc='upper left',frameon=False, ncol=len(abund_result)/3); plt.annotate('[M/H]: ' + met_result, xy=(0.7,0.92), xycoords='axes fraction', size='x-large', color='Black') plt.annotate('pressure: ' + str(round(10.0**opac_kwargs['log_pressure_layer'],1)) + ' bar', xy=(0.7,0.87), xycoords='axes fraction', size='x-large', color='Black') plt.annotate('temperature: ' + str(opac_kwargs['temp_layer']) + ' K', xy=(0.7,0.82), xycoords='axes fraction', size='x-large', color='Black') return fig1 def makeAbundsPlot(opac_kwargs, opac_result, abund_result, met_result): abunds = getAbundances(10.0**opac_kwargs['log_pressure_layer'], opac_kwargs['temp_layer'], abund_result, metallicity=met_result) color_list = plt.cm.rainbow(np.linspace(0, 1, len(abund_result) )) leg_dict = { 'h2o':'H$_2$O', 'ch4':'CH$_4$', 'co':'CO', 'nh3':'NH$_3$', 'n2':'N$_2$', 'ph3':'PH$_3$', 'h2s':'H$_2$S', 'tio':'TiO', 'vo':'VO', 'feh':'FeH', 'crh':'CrH', 'na':'Na', 'k':'K', 'rb':'Rb', 'cs':'Cs', 'co2':'CO$_2$'} xaxis = np.arange(0,len(abunds),1) molnames=[] fig = plt.figure(num=1, figsize=(11,7)) ax = plt.subplot(111) for i in range(len(abunds)): rect1=ax.bar(0.5+i, abunds[i], width=0.8, color=color_list[i],edgecolor=color_list[i]) molnames.append(leg_dict[abund_result[i]]) rect=rect1[0] height=rect.get_height() plt.ylim(0,max(abunds)+0.15*max(abunds)) ax.text(rect.get_x()+rect.get_width()/2., height+0.025*max(abunds), '%.2e'%height, ha='center', va='bottom', color=color_list[i], size='large') spines_to_remove = ['top', 'right'] for spine in spines_to_remove: ax.spines[spine].set_visible(False) ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') ax.yaxis.grid(color='white', linestyle='solid', lw=1.5) plt.xticks(xaxis+0.9, molnames, size='xx-large') ax.tick_params(axis='y', which='major', labelsize='x-large') plt.ylabel('number mixing ratio', size='xx-large') plt.annotate('[M/H]: ' + met_result, xy=(0.7,0.95), xycoords='axes fraction', size='xx-large', color='Black') plt.annotate('pressure: ' + str(round(10.0**opac_kwargs['log_pressure_layer'],1)) + ' bar', xy=(0.7,0.9), xycoords='axes fraction', size='xx-large', color='Black') plt.annotate('temperature: ' + str(opac_kwargs['temp_layer']) + ' K', xy=(0.7,0.85), xycoords='axes fraction', size='xx-large', color='Black') return fig
unknown
codeparrot/codeparrot-clean
import wx import os import static import threading import inspect import image_viewer from optparse import OptionParser import sys try: from maggregator.src.multiAggregator import main_aggregator import features from features import * except: sys.path.append(".") sys.path.append('./maggregator') import maggregator from maggregator.multiAggregator import main_aggregator import features from features import * ''' app = wx.App(False) # Create a new app, don't redirect stdout/stderr to a window. frame = wx.Frame(None, wx.ID_ANY, "Hello World",size=(200,100)) # A Frame is a top-level window. frame.control = wx.TextCtrl(frame, style=wx.TE_MULTILINE) frame.Show(True) # Show the frame. app.MainLoop() ''' class MainWindow(wx.Frame): def __init__(self, parent, title): wx.Frame.__init__(self, parent, title=title, size=(850,600)) #0s elf.control = wx.TextCtrl(self, style=wx.TE_MULTILINE) self.CreateStatusBar() # A Statusbar in the bottom of the window self.features = [] for mod in inspect.getmembers(features,inspect.ismodule): for cla in inspect.getmembers(mod[1],inspect.isclass): if issubclass(cla[1],features.feature.Feature): self.features.append((cla[0],cla[1])) #self.features = inspect.getmembers(feature, inspect.isclass) # Setting up the menu. filemenu= wx.Menu() helpmenu= wx.Menu() # wx.ID_ABOUT and wx.ID_EXIT are standard ids provided by wxWidgets. menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program") menuAbout = helpmenu.Append(wx.ID_ABOUT, "&About"," Information about this program") menuConfigLoad = filemenu.Append(wx.ID_ANY,"&Load Configuration File", "Load a configuration file for prefined values") menuConfigSave = filemenu.Append(wx.ID_ANY,"&Save Configuration File", "Save the configuration to a file") # Creating the menubar. menuBar = wx.MenuBar() menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar menuBar.Append(helpmenu,"&Help") # Adding the "filemenu" to the MenuBar self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content. # Set events. self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout) self.Bind(wx.EVT_MENU, self.OnExit, menuExit) self.Bind(wx.EVT_MENU, self.onLoadConfig,menuConfigLoad) self.Bind(wx.EVT_MENU, self.onSaveConfig,menuConfigSave) self.Bind(wx.EVT_BUTTON, self.OnBrowse, id=10) self.Bind(wx.EVT_BUTTON, self.OnRun, id=11) self.Bind(wx.EVT_CHAR, self.onKeyDown) sizer = wx.GridBagSizer(12, 4) #Setting up the panels. #Input sizer.Add(wx.StaticText(self,wx.ID_ANY,'Input', (20,20)),(0,0)) self.input_file = wx.TextCtrl(self,size=(400,25)) sizer.Add(self.input_file,(0,1)) sizer.Add(wx.Button(self, 10, 'Browse', (80, 220)),(0,2)) #Alpha sizer.Add(wx.StaticText(self,wx.ID_ANY,'Aggregation %', (20,20)),(1,0)) #self.alpha = wx.SpinCtrl(self, -1, '2', (150, 75), (60, -1)) self.alpha = wx.TextCtrl(self,size=(100,25)) sizer.Add(self.alpha,(1,1)) #Window sizer.Add(wx.StaticText(self,wx.ID_ANY,'Window', (20,20)),(2,0)) self.window = wx.TextCtrl(self,size=(400,25)) sizer.Add(self.window,(2,1)) #RegExp sizer.Add(wx.StaticText(self,wx.ID_ANY,'RegExp', (20,20)),(3,0)) self.regexp = wx.TextCtrl(self,size=(400,25)) sizer.Add(self.regexp,(3,1)) #Fields sizer.Add(wx.StaticText(self,wx.ID_ANY,'Fields (prefix by \'d_\' to specify fields to use)', (20,20)),(4,0)) self.fields =wx.TextCtrl(self,size=(400,25)) sizer.Add(self.fields,(4,1)) #Dimensions #sizer.Add(wx.StaticText(self,wx.ID_ANY,'Dimentions', (20,20)),(5,0)) #self.dimensions = wx.TextCtrl(self,size=(400,25)) #sizer.Add(self.dimensions,(5,1)) #Types tsizer = wx.GridBagSizer(1, 3) sizer.Add(wx.StaticText(self,wx.ID_ANY,'Types', (20,20)),(5,0)) self.types = wx.ListBox(self, wx.ID_ANY, wx.DefaultPosition, (170, 130),map(lambda x:x[0],self.features) , wx.LB_MULTIPLE) self.types.Bind(wx.EVT_LISTBOX, self.onSelectFeature) self.selected_types = wx.ListBox(self, wx.ID_ANY, wx.DefaultPosition, (170, 130), [] , wx.LB_MULTIPLE) self.selected_types.Bind(wx.EVT_LISTBOX, self.onDeselectFeature) tsizer.Add(self.types,(0,1)) btsizer = wx.GridBagSizer(2, 1) addb = wx.Button(self, 1, '>', (50, 130)) addb.Bind(wx.EVT_BUTTON,self.addSelected) btsizer.Add(addb, (0,0)) remb=wx.Button(self, 1, '<', (50, 130)) remb.Bind(wx.EVT_BUTTON,self.removeSelected) btsizer.Add(remb,(1,0)) tsizer.Add(btsizer,(0,2)) tsizer.Add(self.selected_types,(0,3)) sizer.Add(tsizer,(5,1)) #self.types =wx.TextCtrl(self,size=(400,25)) #sizer.Add(self.types,(6,1)) #Strategy sizer.Add(wx.StaticText(self,wx.ID_ANY,'Strategy', (20,20)),(6,0)) self.strategy =wx.ComboBox(self, choices=['Root','LRU'], style=wx.CB_READONLY) sizer.Add(self.strategy,(6,1)) #Max Nodes sizer.Add(wx.StaticText(self,wx.ID_ANY,'Max Nodes', (20,20)),(7,0)) self.nodes = wx.TextCtrl(self,size=(400,25)) sizer.Add(self.nodes,(7,1)) #Node Size highlighting sizer.Add(wx.StaticText(self,wx.ID_ANY,'Aggregation Highlighting', (20,20)),(8,0)) self.ahighlight = wx.ComboBox(self, choices=['Accumulated','Node'], style=wx.CB_READONLY) sizer.Add(self.ahighlight,(8,1)) #Stability threshold highlighting sizer.Add(wx.StaticText(self,wx.ID_ANY,'Stability', (20,20)),(9,0)) self.shighlight = wx.ComboBox(self, choices=['Average','Min'], style=wx.CB_READONLY) sizer.Add(self.shighlight,(9,1)) sizer.Add(wx.Button(self, 11, 'Run', (80, 220)),(10,1)) self.SetSizer(sizer) self.Show(True) def removeSelected(self,event): for i in self.selected_types.GetSelections(): self.selected_types.Delete(i) def addSelected(self,event): pos = 0 for i in self.types.GetSelections(): self.selected_types.Insert(self.features[i][0],pos) pos+=1 def onDeselectFeature(self,event): '' def onSelectFeature(self,event): #self.selected_types.Insert(self.features[event.GetSelection()][0],0) pass def OnAbout(self,e): # A message dialog box with an OK button. wx.OK is a standard ID in wxWidgets. dlg = wx.MessageDialog( self, static.about, "About MaM", wx.OK) dlg.ShowModal() # Show it dlg.Destroy() # finally destroy it when finished. def OnExit(self,e): self.Close(True) # Close the frame. def OnBrowse(self,e): """ Open a file""" self.dirname = '' dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.OPEN) if dlg.ShowModal() == wx.ID_OK: self.filename = dlg.GetFilename() self.dirname = dlg.GetDirectory() self.input_file.SetValue("%s/%s"%(self.dirname,self.filename)) def onLoadConfig(self,e): """ Open a file""" self.dirname = '.' dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*", wx.OPEN) if dlg.ShowModal() == wx.ID_OK: self.filename = dlg.GetFilename() self.dirname = dlg.GetDirectory() f = open(os.path.join(self.dirname, self.filename), 'r') for line in (lines for lines in f): try: if '=' in line: field,value = line.split('=')[0].strip(),line.split('=')[1].strip() if field == 'types': features_indexes = map(lambda x: map(lambda x: x[0], self.features).index(x), value.split()) for fi in features_indexes: self.types.SetSelection(fi) self.selected_types.Insert(self.features[fi][0],len(self.selected_types.GetItems())) else: eval('''self.%s.SetValue("%s")'''%(field,value)) except Exception, e: print e f.close() dlg.Destroy() def onSaveConfig(self,e): """ Save a file""" self.dirname = '' dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*", wx.SAVE) if dlg.ShowModal() == wx.ID_OK: f = open(dlg.GetPath(), 'w') for fie in ["input_file","alpha","window","regexp","fields","nodes","shighlight","ahighlight","strategy"]: f.write("%s = %s\n" % (fie,eval("str(self.%s.GetValue())" % fie))) f.close() dlg.Destroy() def onKeyDown(self,event): #print "keydown" pass def OnRun(self,e): # Set the panel types = " ".join(self.selected_types.GetItems()) fields = self.fields.GetValue().split(" ") dimensions =filter(lambda x: "d_" in x,fields) strategy = "" if self.strategy.GetValue() != None else self.strategy.GetValue() for i in xrange(0,len(fields)): if fields[i][0:2] == 'd_': fields[i] = fields[i][2:] for i in xrange(0,len(dimensions)): if dimensions[i][0:2] == 'd_': dimensions[i] = dimensions[i][2:] fields = " ".join(fields) dimensions = " ".join(dimensions) #types = " ".join(map(lambda x: x[0], map(lambda x: self.features[x],[i for i in self.types.GetSelections()]))) #print ' '.join(map(lambda x: self.features[x],list(self.types.GetSelections()))) lineparser = OptionParser("") lineparser.add_option('-i','--input', dest='input', default=self.input_file.GetValue().encode('utf8'),type='string',help="input file (txt flow file)", metavar="FILE") lineparser.add_option('-w','--window-size', dest='window', default=int(self.window.GetValue()),type='int',help="window size in seconds") lineparser.add_option('-r','--reg-exp', dest='reg_exp', default=self.regexp.GetValue(),type='string',help="regular expression to extract flow information") lineparser.add_option('-f','--fields', dest='fields', default=fields,type='string',help="fields naming corresponding to the regular expression, have to be split by a space character and HAS TO INCLUDE value and timestamp") lineparser.add_option('-d','--dimensions', dest='dim', default=dimensions,type='string',help="dimension to use for the radix tree, have to be split by a space character and correspond to the field naming") lineparser.add_option('-t','--type-dimension', dest='types', default=types,type='string',help="types of dimension") lineparser.add_option('-c','--cut', dest='cut', default=0.02,type='float',help="threshold (%) under which removing a node is not allowed during the construction(it's include the parents values)") lineparser.add_option('-a','--aggregate', dest='aggregate', default=float(self.alpha.GetValue()),type='float',help="threshold (%) for the aggregation") lineparser.add_option('-l','--log-file', dest='log',default="log.att",type='string',help="log file containing the attacks", metavar="FILE") lineparser.add_option('-s','--split', dest='split', default=20,type='float',help="percentage of data used for training") lineparser.add_option('-g','--type-aggregation', dest='type_aggr', default="NumericalValueNode",type='string',help="type of the aggregation for nodes") lineparser.add_option('-n','--name', dest='namefile', default="bytes",type='string',help="suffix for name of file results") lineparser.add_option('-S','--strategy', dest='strategy', default='',type='string',help="stratrgy for selecting nodes to aggregate") lineparser.add_option('-m','--max-nodes', dest='max_nodes', default=int(self.nodes.GetValue()),type='int',help="max size of tree") lineparser.add_option('-A','--aggregation-highlight', dest='ahighlight', default=self.ahighlight.GetValue(),type='string',help="Highligh nodes on behalf aggregation") lineparser.add_option('-T','--stability-highlight', dest='shighlight', default=self.ahighlight.GetValue(),type='string',help="Highligh nodes on behalf node stability") #main_aggregator(lineparser) class FuncThread(threading.Thread): def __init__(self, target, *args): self._target = target self._args = args threading.Thread.__init__(self) def run(self): return self._target(*self._args) t1 = FuncThread(main_aggregator, lineparser) list_res = t1.run() print list_res files = list_res[1] frame = image_viewer.ImgViewer(None, -1, 'Tree',files) frame.Show(True) class MyFrame(wx.Frame): def __init__(self, parent, id, title,bitmapfile): wx.Frame.__init__(self, parent, id, title, size = (1024, 800)) self.panel = wx.Panel(self, -1) self.panel.SetScrollbar(wx.VERTICAL, 0, 6, 50); self.panel.SetScrollbar(wx.HORIZONTAL, 0, 6, 50); bitmap = wx.Bitmap(bitmapfile) self.bitmap = self.scale_bitmap(bitmap, 600,800) self.imageCtrl = wx.StaticBitmap(self.panel, wx.ID_ANY, self.bitmap) self.panel.Refresh() #wx.EVT_PAINT(self, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) self.Centre() def OnSize(self, event): event.GetSize()[0], event.GetSize()[1] def scale_bitmap(self,bitmap, w,h): image = wx.ImageFromBitmap(bitmap) '''if image.GetWidth() > image.GetHeight(): neww = w newh = h * h / float(w) else: newh = h neww = w * w / float(h) image = image.Scale(neww, newh, wx.IMAGE_QUALITY_HIGH)''' result = wx.BitmapFromImage(image) return result def OnPaint(self, event): self.imageCtrl.SetBitmap(self.bitmap) self.panel.Refresh() app = wx.App(False) frame = MainWindow(None, "MaM") app.MainLoop()
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true module ActionView module Helpers module Tags # :nodoc: class PasswordField < TextField # :nodoc: def render @options = { value: nil }.merge!(@options) super end end end end end
ruby
github
https://github.com/rails/rails
actionview/lib/action_view/helpers/tags/password_field.rb
"""Tests for making sure experimental imports work as expected.""" import textwrap import pytest from sklearn.utils._testing import assert_run_python_script_without_output from sklearn.utils.fixes import _IS_WASM @pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess") def test_imports_strategies(): # Make sure different import strategies work or fail as expected. # Since Python caches the imported modules, we need to run a child process # for every test case. Else, the tests would not be independent # (manually removing the imports from the cache (sys.modules) is not # recommended and can lead to many complications). pattern = "IterativeImputer is experimental" good_import = """ from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer """ assert_run_python_script_without_output( textwrap.dedent(good_import), pattern=pattern ) good_import_with_ensemble_first = """ import sklearn.ensemble from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer """ assert_run_python_script_without_output( textwrap.dedent(good_import_with_ensemble_first), pattern=pattern, ) bad_imports = f""" import pytest with pytest.raises(ImportError, match={pattern!r}): from sklearn.impute import IterativeImputer import sklearn.experimental with pytest.raises(ImportError, match={pattern!r}): from sklearn.impute import IterativeImputer """ assert_run_python_script_without_output( textwrap.dedent(bad_imports), pattern=pattern, )
python
github
https://github.com/scikit-learn/scikit-learn
sklearn/experimental/tests/test_enable_iterative_imputer.py
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Adapted from http://code.google.com/p/django-mako/source/browse/trunk/djangomako/shortcuts.py import os import tempfile import django.template from django.conf import settings from django.contrib.staticfiles.storage import staticfiles_storage from django.http import HttpResponse from mako.lookup import TemplateLookup, TemplateCollection from desktop.lib import apputil, i18n register = django.template.Library() ENCODING_ERRORS = 'replace' # Things to automatically import into all template namespaces IMPORTS=[ "from django.utils.html import escape", "from desktop.lib.django_mako import url", "from desktop.lib.django_mako import csrf_token", "from desktop.lib.django_mako import static", ] class DesktopLookup(TemplateCollection): """ Template loader for Mako which uses the app-specific template directories, and sets up our default options. The core desktop template dir is automatically searched for templates. """ def __init__(self): self.loaders = {} self.module_dir = None self.desktop_template_dir = os.path.join(os.path.dirname(__file__), '../templates') def _get_loader(self, app): if app in self.loaders: return self.loaders[app] # Lazily find a temp dir for module_dir. # This laziness is important because at initialization time # we might still be running as root during desktop startup # and thus the temp dir would be owned as root, not the # unpriveleged user! if self.module_dir is None: self.module_dir = tempfile.mkdtemp() # TODO(todd) configurable? app_module = __import__(app) app_dir = os.path.dirname(app_module.__file__) app_template_dir = os.path.join(app_dir, 'templates') loader = TemplateLookup(directories=[app_template_dir, self.desktop_template_dir], module_directory=os.path.join(self.module_dir, app), output_encoding=i18n.get_site_encoding(), input_encoding=i18n.get_site_encoding(), encoding_errors=ENCODING_ERRORS, default_filters=['unicode', 'escape'], imports=IMPORTS) # TODO(philip): Make a django_aware default filter, that understands # django safe strings. See http://www.makotemplates.org/docs/filtering.html. self.loaders[app] = loader return loader def get_template(self, uri): app = apputil.get_current_app() if not app: raise Exception("no app!") real_loader = self._get_loader(app) return real_loader.get_template(uri) lookup = DesktopLookup() def render_to_string_test(template_name, django_context): """ In tests, send a template rendered signal. This puts the template context into HttpResponse.context when you use Client.get(). Django's templating libraries do similar work (search for template_rendered). """ from django.test import signals signals.template_rendered.send(sender=None, template=template_name, context=django_context) return render_to_string_normal(template_name, django_context) def render_to_string_normal(template_name, django_context): data_dict = dict() if isinstance(django_context, django.template.Context): for d in reversed(django_context.dicts): data_dict.update(d) else: data_dict = django_context template = lookup.get_template(template_name) data_dict = dict(map(lambda k: (str(k), data_dict.get(k)), data_dict.keys())) result = template.render(**data_dict) return i18n.smart_unicode(result) # This variable is overridden in test code. render_to_string = render_to_string_normal def render_to_response(template_name, data_dictionary, **kwargs): """ Returns a HttpResponse whose content is filled with the result of calling lookup.get_template(args[0]).render with the passed arguments. """ return HttpResponse(render_to_string(template_name, data_dictionary), **kwargs) def url(view_name, *args, **view_args): """URL tag for use in templates - like {% url ... %} in django""" from django.core.urlresolvers import reverse return reverse(view_name, args=args, kwargs=view_args) from django.core.context_processors import csrf def csrf_token(request): """ Returns the rendered common footer """ csrf_token = unicode(csrf(request)["csrf_token"]) return str.format("<input type='hidden' name='csrfmiddlewaretoken' value='{0}' />", csrf_token) def static(path): """ Returns the URL to a file using the staticfiles's storage engine """ try: return staticfiles_storage.url(path) except ValueError: # django.contrib.staticfiles raises a ValueError if the file we are looking # for is not in the staticfiles directory. This will result in a 500 error # in a mako script, which is a little unfriendly. Instead we'll return a # path to a non-existing file so the template renders and we can see the # missing file in the logs. return settings.STATIC_URL + path
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals import datetime from collections import OrderedDict from django.contrib.auth.models import User from django.test import TestCase from .models import Order, RevisionableModel, TestObject class ExtraRegressTests(TestCase): def setUp(self): self.u = User.objects.create_user( username="fred", password="secret", email="fred@example.com" ) def test_regression_7314_7372(self): """ Regression tests for #7314 and #7372 """ rm = RevisionableModel.objects.create( title='First Revision', when=datetime.datetime(2008, 9, 28, 10, 30, 0) ) self.assertEqual(rm.pk, rm.base.pk) rm2 = rm.new_revision() rm2.title = "Second Revision" rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0) rm2.save() self.assertEqual(rm2.title, 'Second Revision') self.assertEqual(rm2.base.title, 'First Revision') self.assertNotEqual(rm2.pk, rm.pk) self.assertEqual(rm2.base.pk, rm.pk) # Queryset to match most recent revision: qs = RevisionableModel.objects.extra( where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % { 'table': RevisionableModel._meta.db_table, }] ) self.assertQuerysetEqual(qs, [('Second Revision', 'First Revision')], transform=lambda r: (r.title, r.base.title) ) # Queryset to search for string in title: qs2 = RevisionableModel.objects.filter(title__contains="Revision") self.assertQuerysetEqual(qs2, [ ('First Revision', 'First Revision'), ('Second Revision', 'First Revision'), ], transform=lambda r: (r.title, r.base.title), ordered=False ) # Following queryset should return the most recent revision: self.assertQuerysetEqual(qs & qs2, [('Second Revision', 'First Revision')], transform=lambda r: (r.title, r.base.title), ordered=False ) def test_extra_stay_tied(self): # Extra select parameters should stay tied to their corresponding # select portions. Applies when portions are updated or otherwise # moved around. qs = User.objects.extra( select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))), select_params=(1, 3) ) qs = qs.extra(select={"beta": 4}) qs = qs.extra(select={"alpha": "%s"}, select_params=[5]) self.assertEqual( list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')), [{'alpha': 5, 'beta': 4, 'gamma': 3}] ) def test_regression_7957(self): """ Regression test for #7957: Combining extra() calls should leave the corresponding parameters associated with the right extra() bit. I.e. internal dictionary must remain sorted. """ self.assertEqual( (User.objects .extra(select={"alpha": "%s"}, select_params=(1,)) .extra(select={"beta": "%s"}, select_params=(2,))[0].alpha), 1 ) self.assertEqual( (User.objects .extra(select={"beta": "%s"}, select_params=(1,)) .extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha), 2 ) def test_regression_7961(self): """ Regression test for #7961: When not using a portion of an extra(...) in a query, remove any corresponding parameters from the query as well. """ self.assertEqual( list(User.objects .extra(select={"alpha": "%s"}, select_params=(-6,)) .filter(id=self.u.id) .values_list('id', flat=True)), [self.u.id] ) def test_regression_8063(self): """ Regression test for #8063: limiting a query shouldn't discard any extra() bits. """ qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id]) self.assertQuerysetEqual(qs, ['<User: fred>']) self.assertQuerysetEqual(qs[:1], ['<User: fred>']) def test_regression_8039(self): """ Regression test for #8039: Ordering sometimes removed relevant tables from extra(). This test is the critical case: ordering uses a table, but then removes the reference because of an optimization. The table should still be present because of the extra() call. """ self.assertQuerysetEqual( (Order.objects .extra(where=["username=%s"], params=["fred"], tables=["auth_user"]) .order_by('created_by')), [] ) def test_regression_8819(self): """ Regression test for #8819: Fields in the extra(select=...) list should be available to extra(order_by=...). """ self.assertQuerysetEqual( User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(), ['<User: fred>'] ) self.assertQuerysetEqual( User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']), ['<User: fred>'] ) self.assertQuerysetEqual( User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(), ['<User: fred>'] ) def test_dates_query(self): """ When calling the dates() method on a queryset with extra selection columns, we can (and should) ignore those columns. They don't change the result and cause incorrect SQL to be produced otherwise. """ RevisionableModel.objects.create( title='First Revision', when=datetime.datetime(2008, 9, 28, 10, 30, 0) ) self.assertQuerysetEqual( RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'), [datetime.datetime(2008, 9, 1, 0, 0)], transform=lambda d: d, ) def test_values_with_extra(self): """ Regression test for #10256... If there is a values() clause, Extra columns are only returned if they are explicitly mentioned. """ obj = TestObject(first='first', second='second', third='third') obj.save() self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values()), [{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}] ) # Extra clauses after an empty values clause are still included self.assertEqual( list(TestObject.objects.values().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))), [{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}] ) # Extra columns are ignored if not mentioned in the values() clause self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second')), [{'second': 'second', 'first': 'first'}] ) # Extra columns after a non-empty values() clause are ignored self.assertEqual( list(TestObject.objects.values('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))), [{'second': 'second', 'first': 'first'}] ) # Extra columns can be partially returned self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second', 'foo')), [{'second': 'second', 'foo': 'first', 'first': 'first'}] ) # Also works if only extra columns are included self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('foo', 'whiz')), [{'foo': 'first', 'whiz': 'third'}] ) # Values list works the same way # All columns are returned for an empty values_list() self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list()), [('first', 'second', 'third', obj.pk, 'first', 'second', 'third')] ) # Extra columns after an empty values_list() are still included self.assertEqual( list(TestObject.objects.values_list().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))), [('first', 'second', 'third', obj.pk, 'first', 'second', 'third')] ) # Extra columns ignored completely if not mentioned in values_list() self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second')), [('first', 'second')] ) # Extra columns after a non-empty values_list() clause are ignored completely self.assertEqual( list(TestObject.objects.values_list('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))), [('first', 'second')] ) self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('second', flat=True)), ['second'] ) # Only the extra columns specified in the values_list() are returned self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second', 'whiz')), [('first', 'second', 'third')] ) # ...also works if only extra columns are included self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('foo', 'whiz')), [('first', 'third')] ) self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', flat=True)), ['third'] ) # ... and values are returned in the order they are specified self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'foo')), [('third', 'first')] ) self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'id')), [('first', obj.pk)] ) self.assertEqual( list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'first', 'bar', 'id')), [('third', 'first', 'second', obj.pk)] ) def test_regression_10847(self): """ Regression for #10847: the list of extra columns can always be accurately evaluated. Using an inner query ensures that as_sql() is producing correct output without requiring full evaluation and execution of the inner query. """ obj = TestObject(first='first', second='second', third='third') obj.save() self.assertEqual( list(TestObject.objects.extra(select={'extra': 1}).values('pk')), [{'pk': obj.pk}] ) self.assertQuerysetEqual( TestObject.objects.filter( pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk') ), ['<TestObject: TestObject: first,second,third>'] ) self.assertEqual( list(TestObject.objects.values('pk').extra(select={'extra': 1})), [{'pk': obj.pk}] ) self.assertQuerysetEqual( TestObject.objects.filter( pk__in=TestObject.objects.values('pk').extra(select={'extra': 1}) ), ['<TestObject: TestObject: first,second,third>'] ) self.assertQuerysetEqual( TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]), ['<TestObject: TestObject: first,second,third>'] ) def test_regression_17877(self): """ Ensure that extra WHERE clauses get correctly ANDed, even when they contain OR operations. """ # Test Case 1: should appear in queryset. t = TestObject(first='a', second='a', third='a') t.save() # Test Case 2: should appear in queryset. t = TestObject(first='b', second='a', third='a') t.save() # Test Case 3: should not appear in queryset, bug case. t = TestObject(first='a', second='a', third='b') t.save() # Test Case 4: should not appear in queryset. t = TestObject(first='b', second='a', third='b') t.save() # Test Case 5: should not appear in queryset. t = TestObject(first='b', second='b', third='a') t.save() # Test Case 6: should not appear in queryset, bug case. t = TestObject(first='a', second='b', third='b') t.save() self.assertQuerysetEqual( TestObject.objects.extra( where=["first = 'a' OR second = 'a'", "third = 'a'"], ), ['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'], ordered=False ) def test_extra_values_distinct_ordering(self): t1 = TestObject.objects.create(first='a', second='a', third='a') t2 = TestObject.objects.create(first='a', second='b', third='b') qs = TestObject.objects.extra( select={'second_extra': 'second'} ).values_list('id', flat=True).distinct() self.assertQuerysetEqual( qs.order_by('second_extra'), [t1.pk, t2.pk], lambda x: x) self.assertQuerysetEqual( qs.order_by('-second_extra'), [t2.pk, t1.pk], lambda x: x) # Note: the extra ordering must appear in select clause, so we get two # non-distinct results here (this is on purpose, see #7070). self.assertQuerysetEqual( qs.order_by('-second_extra').values_list('first', flat=True), ['a', 'a'], lambda x: x)
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.serialization; import java.io.Closeable; import java.util.Map; /** * The interface for wrapping a serializer and deserializer for the given data type. * * @param <T> Type to be serialized from and deserialized into. * * A class that implements this interface is expected to have a constructor with no parameter. */ public interface Serde<T> extends Closeable { /** * Configure this class, which will configure the underlying serializer and deserializer. * * @param configs configs in key/value pairs * @param isKey whether is for key or value */ default void configure(Map<String, ?> configs, boolean isKey) { // intentionally left blank } /** * Close this serde class, which will close the underlying serializer and deserializer. * <p> * This method has to be idempotent because it might be called multiple times. */ @Override default void close() { // intentionally left blank } Serializer<T> serializer(); Deserializer<T> deserializer(); }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/serialization/Serde.java
initialized = True def main(): print("Hello world!") if __name__ == '__main__': main()
python
github
https://github.com/python/cpython
Lib/__phello__/__init__.py
/* * Copyright (c) 2007 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockito.exceptions.misusing; import org.mockito.exceptions.base.MockitoException; public class UnfinishedStubbingException extends MockitoException { private static final long serialVersionUID = 1L; public UnfinishedStubbingException(String message) { super(message); } }
java
github
https://github.com/mockito/mockito
mockito-core/src/main/java/org/mockito/exceptions/misusing/UnfinishedStubbingException.java
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : Virtual layers plugin for DB Manager Date : December 2015 copyright : (C) 2015 by Hugo Mercier email : hugo dot mercier at oslandia dot com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from qgis.PyQt.QtWidgets import QApplication from ..info_model import DatabaseInfo from ..html_elems import HtmlTable class LDatabaseInfo(DatabaseInfo): def __init__(self, db): self.db = db def connectionDetails(self): tbl = [ ] return HtmlTable(tbl) def generalInfo(self): self.db.connector.getInfo() tbl = [ (QApplication.translate("DBManagerPlugin", "SQLite version:"), "3") ] return HtmlTable(tbl) def privilegesDetails(self): return None
unknown
codeparrot/codeparrot-clean
#!/bin/sh test_description='session ID in capabilities' . ./test-lib.sh REPO="$(pwd)/repo" LOCAL_PRISTINE="$(pwd)/local_pristine" test_expect_success 'setup repos for session ID capability tests' ' git init "$REPO" && test_commit -C "$REPO" a && git clone "file://$REPO" "$LOCAL_PRISTINE" && test_commit -C "$REPO" b ' for PROTO in 0 1 2 do test_expect_success "session IDs not advertised by default (fetch v${PROTO})" ' test_when_finished "rm -rf local tr2-client-events tr2-server-events" && cp -r "$LOCAL_PRISTINE" local && GIT_TRACE2_EVENT="$(pwd)/tr2-client-events" \ git -c protocol.version=$PROTO -C local fetch \ --upload-pack "GIT_TRACE2_EVENT=\"$(pwd)/tr2-server-events\" git-upload-pack" \ origin && test -z "$(grep \"key\":\"server-sid\" tr2-client-events)" && test -z "$(grep \"key\":\"client-sid\" tr2-server-events)" ' test_expect_success "session IDs not advertised by default (push v${PROTO})" ' test_when_finished "rm -rf local tr2-client-events tr2-server-events" && test_when_finished "git -C local push --delete origin new-branch" && cp -r "$LOCAL_PRISTINE" local && git -C local pull --no-rebase origin && GIT_TRACE2_EVENT="$(pwd)/tr2-client-events" \ git -c protocol.version=$PROTO -C local push \ --receive-pack "GIT_TRACE2_EVENT=\"$(pwd)/tr2-server-events\" git-receive-pack" \ origin HEAD:new-branch && test -z "$(grep \"key\":\"server-sid\" tr2-client-events)" && test -z "$(grep \"key\":\"client-sid\" tr2-server-events)" ' done test_expect_success 'enable SID advertisement' ' git -C "$REPO" config transfer.advertiseSID true && git -C "$LOCAL_PRISTINE" config transfer.advertiseSID true ' for PROTO in 0 1 2 do test_expect_success "session IDs advertised (fetch v${PROTO})" ' test_when_finished "rm -rf local tr2-client-events tr2-server-events" && cp -r "$LOCAL_PRISTINE" local && GIT_TRACE2_EVENT="$(pwd)/tr2-client-events" \ git -c protocol.version=$PROTO -C local fetch \ --upload-pack "GIT_TRACE2_EVENT=\"$(pwd)/tr2-server-events\" git-upload-pack" \ origin && grep \"key\":\"server-sid\" tr2-client-events && grep \"key\":\"client-sid\" tr2-server-events ' test_expect_success "session IDs advertised (push v${PROTO})" ' test_when_finished "rm -rf local tr2-client-events tr2-server-events" && test_when_finished "git -C local push --delete origin new-branch" && cp -r "$LOCAL_PRISTINE" local && git -C local pull --no-rebase origin && GIT_TRACE2_EVENT="$(pwd)/tr2-client-events" \ git -c protocol.version=$PROTO -C local push \ --receive-pack "GIT_TRACE2_EVENT=\"$(pwd)/tr2-server-events\" git-receive-pack" \ origin HEAD:new-branch && grep \"key\":\"server-sid\" tr2-client-events && grep \"key\":\"client-sid\" tr2-server-events ' test_expect_success "client & server log negotiated version (v${PROTO})" ' test_when_finished "rm -rf local tr2-client-events tr2-server-events" && cp -r "$LOCAL_PRISTINE" local && GIT_TRACE2_EVENT="$(pwd)/tr2-client-events" \ git -c protocol.version=$PROTO -C local fetch \ --upload-pack "GIT_TRACE2_EVENT=\"$(pwd)/tr2-server-events\" git-upload-pack" \ origin && grep \"key\":\"negotiated-version\",\"value\":\"$PROTO\" tr2-client-events && grep \"key\":\"negotiated-version\",\"value\":\"$PROTO\" tr2-server-events ' done test_done
unknown
github
https://github.com/git/git
t/t5705-session-id-in-capabilities.sh
# conftest.py # Copyright (c) 2013-2019 Pablo Acosta-Serafini # See LICENSE for details # pylint: disable=C0111,C0411,C0413,E0012,E0611,E1101,E1103,F0401,W0212 # Standard library imports import os import pickle import sys import warnings if sys.hexversion < 0x03000000: import __builtin__ else: import builtins as __builtin__ # PyPI imports with warnings.catch_warnings(): from _pytest.warning_types import PytestWarning warnings.filterwarnings("ignore", category=PytestWarning) import pytest import pexdoc.exh ### # Functions ### def log(line, append=True): """Debug xdist.""" with open( os.path.join(os.environ["HOME"], "xdist-debug.log"), "a" if append else "w" ) as fobj: fobj.write("{0}\n".format(line)) def pytest_configure(config): """Configure Pytest, both slave and master.""" if not hasattr(config, "slaveinput"): # Master configuration pass def pytest_configure_node(node): """Configure node.""" # pylint: disable=W0613 if hasattr(__builtin__, "_EXDOC_EXCLUDE"): node.slaveinput["exclude"] = pickle.dumps(__builtin__._EXDOC_EXCLUDE) if hasattr(__builtin__, "_EXDOC_FULL_CNAME"): node.slaveinput["full_cname"] = pickle.dumps(__builtin__._EXDOC_FULL_CNAME) if hasattr(__builtin__, "_EXDOC_CALLABLES_FNAME"): node.slaveinput["callables_fname"] = pickle.dumps( __builtin__._EXDOC_CALLABLES_FNAME ) def pytest_testnodedown(node, error): """Integrate received exception handler form sub-process into main one.""" if error: raise RuntimeError("Slave node reported an error") if "msg" in node.slaveoutput: obj = pickle.loads(node.slaveoutput["msg"]) if not hasattr(__builtin__, "_EXH_LIST"): setattr(__builtin__, "_EXH_LIST", [obj]) else: getattr(__builtin__, "_EXH_LIST").append(obj) @pytest.fixture(autouse=True, scope="module") def exhobj(request): """ Get global exception handler in sub-process and send it after tests done. This fixture runs in the slave session with NO connection to master except through slaveinput/slaveoutput """ xdist_run = hasattr(request.config, "slaveinput") def fin(): """Tear down function.""" if hasattr(request.config, "slaveoutput") and hasattr( request.module.__builtin__, "_EXH" ): request.config.slaveoutput["msg"] = pickle.dumps( getattr(request.module.__builtin__, "_EXH") ) request.addfinalizer(fin) if xdist_run: # sub-process modname = "__builtin__" if sys.hexversion < 0x03000000 else "builtins" if not hasattr(request.module, "__builtin__"): setattr(request.module, "__builtin__", __import__(modname)) exclude = ( pickle.loads(request.config.slaveinput["exclude"]) if "exclude" in request.config.slaveinput else None ) full_cname = ( pickle.loads(request.config.slaveinput["full_cname"]) if "full_cname" in request.config.slaveinput else False ) callables_fname = ( pickle.loads(request.config.slaveinput["callables_fname"]) if "callables_fname" in request.config.slaveinput else None ) setattr( request.module.__builtin__, "_EXH", pexdoc.exh.ExHandle( full_cname=full_cname, exclude=exclude, callables_fname=callables_fname ), )
unknown
codeparrot/codeparrot-clean
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class AIXNetwork(GenericBsdIfconfigNetwork): """ This is the AIX Network Class. It uses the GenericBsdIfconfigNetwork unchanged. """ platform = 'AIX' def get_default_interfaces(self, route_path): interface = dict(v4={}, v6={}) netstat_path = self.module.get_bin_path('netstat') if netstat_path: rc, out, err = self.module.run_command([netstat_path, '-nr']) lines = out.splitlines() for line in lines: words = line.split() if len(words) > 1 and words[0] == 'default': if '.' in words[1]: interface['v4']['gateway'] = words[1] interface['v4']['interface'] = words[5] elif ':' in words[1]: interface['v6']['gateway'] = words[1] interface['v6']['interface'] = words[5] return interface['v4'], interface['v6'] # AIX 'ifconfig -a' does not have three words in the interface line def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'): interfaces = {} current_if = {} ips = dict( all_ipv4_addresses=[], all_ipv6_addresses=[], ) uname_rc = None uname_out = None uname_err = None uname_path = self.module.get_bin_path('uname') if uname_path: uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W']) rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options]) for line in out.splitlines(): if line: words = line.split() # only this condition differs from GenericBsdIfconfigNetwork if re.match(r'^\w*\d*:', line): current_if = self.parse_interface_line(words) interfaces[current_if['device']] = current_if elif words[0].startswith('options='): self.parse_options_line(words, current_if, ips) elif words[0] == 'nd6': self.parse_nd6_line(words, current_if, ips) elif words[0] == 'ether': self.parse_ether_line(words, current_if, ips) elif words[0] == 'media:': self.parse_media_line(words, current_if, ips) elif words[0] == 'status:': self.parse_status_line(words, current_if, ips) elif words[0] == 'lladdr': self.parse_lladdr_line(words, current_if, ips) elif words[0] == 'inet': self.parse_inet_line(words, current_if, ips) elif words[0] == 'inet6': self.parse_inet6_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) # don't bother with wpars it does not work # zero means not in wpar if not uname_rc and uname_out.split()[0] == '0': if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']): entstat_path = self.module.get_bin_path('entstat') if entstat_path: rc, out, err = self.module.run_command([entstat_path, current_if['device']]) if rc != 0: break for line in out.splitlines(): if not line: pass buff = re.match('^Hardware Address: (.*)', line) if buff: current_if['macaddress'] = buff.group(1) buff = re.match('^Device Type:', line) if buff and re.match('.*Ethernet', line): current_if['type'] = 'ether' # device must have mtu attribute in ODM if 'mtu' not in current_if: lsattr_path = self.module.get_bin_path('lsattr') if lsattr_path: rc, out, err = self.module.run_command([lsattr_path, '-El', current_if['device']]) if rc != 0: break for line in out.splitlines(): if line: words = line.split() if words[0] == 'mtu': current_if['mtu'] = words[1] return interfaces, ips # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here def parse_interface_line(self, words): device = words[0][0:-1] current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} current_if['flags'] = self.get_options(words[1]) current_if['macaddress'] = 'unknown' # will be overwritten later return current_if class AIXNetworkCollector(NetworkCollector): _fact_class = AIXNetwork _platform = 'AIX'
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Database\Eloquent\Casts; use BackedEnum; use Illuminate\Contracts\Database\Eloquent\Castable; use Illuminate\Contracts\Database\Eloquent\CastsAttributes; use Illuminate\Support\Collection; use function Illuminate\Support\enum_value; class AsEnumArrayObject implements Castable { /** * Get the caster class to use when casting from / to this cast target. * * @template TEnum of \UnitEnum * * @param array{class-string<TEnum>} $arguments * @return \Illuminate\Contracts\Database\Eloquent\CastsAttributes<\Illuminate\Database\Eloquent\Casts\ArrayObject<array-key, TEnum>, iterable<TEnum>> */ public static function castUsing(array $arguments) { return new class($arguments) implements CastsAttributes { protected $arguments; public function __construct(array $arguments) { $this->arguments = $arguments; } public function get($model, $key, $value, $attributes) { if (! isset($attributes[$key])) { return; } $data = Json::decode($attributes[$key]); if (! is_array($data)) { return; } $enumClass = $this->arguments[0]; return new ArrayObject((new Collection($data))->map(function ($value) use ($enumClass) { return is_subclass_of($enumClass, BackedEnum::class) ? $enumClass::from($value) : constant($enumClass.'::'.$value); })->toArray()); } public function set($model, $key, $value, $attributes) { if ($value === null) { return [$key => null]; } $storable = []; foreach ($value as $enum) { $storable[] = $this->getStorableEnumValue($enum); } return [$key => Json::encode($storable)]; } public function serialize($model, string $key, $value, array $attributes) { return (new Collection($value->getArrayCopy())) ->map(fn ($enum) => $this->getStorableEnumValue($enum)) ->toArray(); } protected function getStorableEnumValue($enum) { if (is_string($enum) || is_int($enum)) { return $enum; } return enum_value($enum); } }; } /** * Specify the Enum for the cast. * * @param class-string $class * @return string */ public static function of($class) { return static::class.':'.$class; } }
php
github
https://github.com/laravel/framework
src/Illuminate/Database/Eloquent/Casts/AsEnumArrayObject.php
''' Created on May 2, 2012 @package: superdesk source @copyright: 2012 Sourcefabric o.p.s. @license: http://www.gnu.org/licenses/gpl-3.0.txt @author: Gabriel Nistor Contains the SQL alchemy meta for source API. ''' from ..api.source import Source from sqlalchemy.dialects.mysql.base import INTEGER from sqlalchemy.orm import relationship from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.types import String, Boolean from superdesk.meta.metadata_superdesk import Base from superdesk.source.meta.type import SourceTypeMapped from sqlalchemy.ext.associationproxy import association_proxy from ally.support.sqlalchemy.mapper import validate # -------------------------------------------------------------------- @validate(exclude=['Type']) class SourceMapped(Base, Source): ''' Provides the mapping for Source. ''' __tablename__ = 'source' __table_args__ = (dict(mysql_engine='InnoDB', mysql_charset='utf8'), ) Id = Column('id', INTEGER(unsigned=True), primary_key=True) Type = association_proxy('type', 'Key') Name = Column('name', String(255), nullable=False) URI = Column('uri', String(255), nullable=False) Key = Column('key', String(1024), nullable=True) IsModifiable = Column('modifiable', Boolean, nullable=False) OriginName = Column('origin_name', String(255), nullable=True) OriginURI = Column('origin_uri', String(255), nullable=True) # Non REST model attribute -------------------------------------- typeId = Column('fk_type_id', ForeignKey(SourceTypeMapped.id, ondelete='RESTRICT'), nullable=False) type = relationship(SourceTypeMapped, uselist=False, lazy='joined')
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import sys from typing import ( # NOQA Dict, Set, Type, ) import six from django.test import TestCase from django_extensions.management.commands import shell_plus class AutomaticShellPlusImportsTestCase(TestCase): def setUp(self): super(AutomaticShellPlusImportsTestCase, self).setUp() sys.stdout = six.StringIO() sys.stderr = six.StringIO() self.imported_objects = {} # type: Dict[str, Type] self.output = "" def get_all_names_for_class(self, model_to_find_occurrences): # type: (Type) -> Set[str] """ Returns all names under current class is imported. :param model_to_find_occurrences: class to find names :return: set of names under class is imported. """ result = set() for name, model_class in self.imported_objects.items(): if model_class == model_to_find_occurrences: result.add(name) return result def assert_imported_under_names(self, model_class, names_under_model_is_available): # type: (Type, Set[str]) -> () """ Function which asserts that class is available under given names and not available under any other name. :param model_class: class to assert availability. :param names_under_model_is_available: names under which class should be available. """ self.assertSetEqual(self.get_all_names_for_class(model_class), names_under_model_is_available) imports_output = self.output.split("from ") for line in imports_output: if line.startswith(model_class.__module__): for name in names_under_model_is_available: # assert that in print imports this model occurs only under names from parameter if name == model_class.__name__: expected_output = name else: expected_output = "%s (as %s)" % (model_class.__name__, name) line = line.replace(expected_output, '', 1) self.assertNotIn(line, model_class.__name__) def run_shell_plus(self): command = shell_plus.Command() self.imported_objects = command.get_imported_objects({}) self.output = sys.stdout.getvalue()
unknown
codeparrot/codeparrot-clean
- vars: inv: '{{limited.stdout|from_yaml}}' output_dir: '{{ lookup("env", "OUTPUT_DIR", default=undef()) }}' delegate_to: localhost block: - name: check baseline shell: ansible-inventory -i '{{ role_path }}/files/valid_sample.yml' --list --yaml > '{{ output_dir }}/yaml_inv.yml' - name: run validation playbook command: ansible-playbook -i '{{ output_dir }}/yaml_inv.yml' post_inventory.yml - name: check that limit removes host command: ansible-inventory -i '{{ role_path }}/files/valid_sample.yml' --limit '!something' --list --yaml register: limited - name: ensure empty host list assert: that: - not inv - name: check dupes command: ansible-inventory -i '{{ role_path }}/files/complex.ini' --list --yaml register: limited - name: ensure host only appears on directly assigned assert: that: - "'hosts' not in inv['all']['children']['parent_1']" - "'hosts' not in inv['all']['children']['parent_2']" - "'hosts' in inv['all']['children']['parent_3']" - "'test1' in inv['all']['children']['parent_1']['children']['test_group1']['hosts']" - "'hosts' not in inv['all']['children']['parent_2']['children']['test_group1']"
unknown
github
https://github.com/ansible/ansible
test/integration/targets/ansible-inventory/tasks/yaml_output.yml
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include "precomp.hpp" #include <opencv2/core/utils/configuration.private.hpp> #include <opencv2/core/utils/logger.hpp> #include "utils/logtagmanager.hpp" #include "utils/logtagconfigparser.hpp" #include <sstream> #include <iostream> #include <fstream> #include <atomic> #ifdef __ANDROID__ # include <android/log.h> #endif namespace cv { namespace utils { namespace logging { namespace internal { // Combining several things that require static dynamic initialization in a // well-defined order into a struct. // struct GlobalLoggingInitStruct { public: #if defined NDEBUG static const bool m_isDebugBuild = false; #else static const bool m_isDebugBuild = true; #endif public: static LogLevel m_defaultUnconfiguredGlobalLevel; public: LogTagManager logTagManager; GlobalLoggingInitStruct() : logTagManager(m_defaultUnconfiguredGlobalLevel) { (void)getInitializationMutex(); // ensure initialization of global objects applyConfigString(); handleMalformed(); } private: void applyConfigString() { logTagManager.setConfigString(utils::getConfigurationParameterString("OPENCV_LOG_LEVEL", "")); } void handleMalformed() { // need to print warning for malformed log tag config strings? if (m_isDebugBuild) { const auto& parser = logTagManager.getConfigParser(); if (parser.hasMalformed()) { const auto& malformedList = parser.getMalformed(); for (const auto& malformed : malformedList) { std::cout << "Malformed log level config: \"" << malformed << "\"\n"; } std::cout.flush(); } } } }; LogLevel GlobalLoggingInitStruct::m_defaultUnconfiguredGlobalLevel = GlobalLoggingInitStruct::m_isDebugBuild ? LOG_LEVEL_INFO : LOG_LEVEL_WARNING; // Static dynamic initialization guard function for the combined struct // just defined above // // An initialization guard function guarantees that outside code cannot // accidentally see not-yet-dynamically-initialized data, by routing // all outside access request to this function, so that this function // has a chance to run the initialization code if necessary. // // An initialization guard function only guarantees initialization upon // the first call to this function. // static GlobalLoggingInitStruct& getGlobalLoggingInitStruct() { CV_SINGLETON_LAZY_INIT_REF(GlobalLoggingInitStruct, new GlobalLoggingInitStruct()); } // To ensure that the combined struct defined above is initialized even // if the initialization guard function wasn't called, a dummy static // instance of a struct is defined below, which will call the // initialization guard function. // struct GlobalLoggingInitCall { GlobalLoggingInitCall() { getGlobalLoggingInitStruct(); (void)getGlobalLogTag(); // complete initialization of logger structures } }; static GlobalLoggingInitCall globalLoggingInitCall; static LogTagManager& getLogTagManager() { static LogTagManager& logTagManagerInstance = getGlobalLoggingInitStruct().logTagManager; return logTagManagerInstance; } static LogLevel& getLogLevelVariable() { static LogLevel& refGlobalLogLevel = getGlobalLogTag()->level; return refGlobalLogLevel; } LogTag* getGlobalLogTag() { static LogTag* globalLogTagPtr = getGlobalLoggingInitStruct().logTagManager.get("global"); return globalLogTagPtr; } } // namespace void registerLogTag(LogTag* plogtag) { if (!plogtag || !plogtag->name) { return; } internal::getLogTagManager().assign(plogtag->name, plogtag); } void setLogTagLevel(const char* tag, LogLevel level) { if (!tag) { return; } internal::getLogTagManager().setLevelByFullName(std::string(tag), level); } LogLevel getLogTagLevel(const char* tag) { if (!tag) { return getLogLevel(); } const LogTag* ptr = internal::getLogTagManager().get(std::string(tag)); if (!ptr) { return getLogLevel(); } return ptr->level; } LogLevel setLogLevel(LogLevel logLevel) { // note: not thread safe, use sparingly and do not critically depend on outcome LogLevel& refGlobalLevel = internal::getLogLevelVariable(); const LogLevel old = refGlobalLevel; refGlobalLevel = logLevel; return old; } LogLevel getLogLevel() { return internal::getLogLevelVariable(); } namespace internal { namespace //unnamed { std::atomic<WriteLogMessageFuncType> stc_userWriteLogMessageFunc{}; std::atomic<WriteLogMessageExFuncType> stc_userWriteLogMessageExFunc{}; } //unnamed static int getShowTimestampMode() { static bool param_timestamp_enable = utils::getConfigurationParameterBool("OPENCV_LOG_TIMESTAMP", true); static bool param_timestamp_ns_enable = utils::getConfigurationParameterBool("OPENCV_LOG_TIMESTAMP_NS", false); return (param_timestamp_enable ? 1 : 0) + (param_timestamp_ns_enable ? 2 : 0); } void writeLogMessage(LogLevel logLevel, const char* message) { WriteLogMessageFuncType userFunc = stc_userWriteLogMessageFunc.load(); if (userFunc && userFunc != writeLogMessage) { (*userFunc)(logLevel, message); return; } const int threadID = cv::utils::getThreadID(); std::string message_id; switch (getShowTimestampMode()) { case 1: message_id = cv::format("%d@%0.3f", threadID, getTimestampNS() * 1e-9); break; case 1+2: message_id = cv::format("%d@%llu", threadID, (long long unsigned int)getTimestampNS()); break; default: message_id = cv::format("%d", threadID); break; } std::ostringstream ss; switch (logLevel) { case LOG_LEVEL_FATAL: ss << "[FATAL:" << message_id << "] " << message << std::endl; break; case LOG_LEVEL_ERROR: ss << "[ERROR:" << message_id << "] " << message << std::endl; break; case LOG_LEVEL_WARNING: ss << "[ WARN:" << message_id << "] " << message << std::endl; break; case LOG_LEVEL_INFO: ss << "[ INFO:" << message_id << "] " << message << std::endl; break; case LOG_LEVEL_DEBUG: ss << "[DEBUG:" << message_id << "] " << message << std::endl; break; case LOG_LEVEL_VERBOSE: ss << message << std::endl; break; case LOG_LEVEL_SILENT: return; // avoid compiler warning about incomplete switch case ENUM_LOG_LEVEL_FORCE_INT: return; // avoid compiler warning about incomplete switch } #ifdef __ANDROID__ int android_logLevel = ANDROID_LOG_INFO; switch (logLevel) { case LOG_LEVEL_FATAL: android_logLevel = ANDROID_LOG_FATAL; break; case LOG_LEVEL_ERROR: android_logLevel = ANDROID_LOG_ERROR; break; case LOG_LEVEL_WARNING: android_logLevel = ANDROID_LOG_WARN; break; case LOG_LEVEL_INFO: android_logLevel = ANDROID_LOG_INFO; break; case LOG_LEVEL_DEBUG: android_logLevel = ANDROID_LOG_DEBUG; break; case LOG_LEVEL_VERBOSE: android_logLevel = ANDROID_LOG_VERBOSE; break; default: break; } __android_log_print(android_logLevel, "OpenCV/" CV_VERSION, "%s", ss.str().c_str()); #endif std::ostream* out = (logLevel <= LOG_LEVEL_WARNING) ? &std::cerr : &std::cout; (*out) << ss.str(); if (logLevel <= LOG_LEVEL_WARNING) { (*out) << std::flush; } } static const char* stripSourceFilePathPrefix(const char* file) { CV_Assert(file); const char* pos = file; const char* strip_pos = NULL; char ch = 0; while ((ch = pos[0]) != 0) { ++pos; if (ch == '/' || ch == '\\') strip_pos = pos; } if (strip_pos == NULL || strip_pos == pos/*eos*/) return file; return strip_pos; } void writeLogMessageEx(LogLevel logLevel, const char* tag, const char* file, int line, const char* func, const char* message) { WriteLogMessageExFuncType userFunc = stc_userWriteLogMessageExFunc.load(); if (userFunc && userFunc != writeLogMessageEx) { (*userFunc)(logLevel, tag, file, line, func, message); return; } std::ostringstream strm; if (tag) { strm << tag << ' '; } if (file) { strm << stripSourceFilePathPrefix(file); if (line > 0) { strm << ':' << line; } strm << ' '; } if (func) { strm << func << ' '; } strm << message; writeLogMessage(logLevel, strm.str().c_str()); } void replaceWriteLogMessage(WriteLogMessageFuncType f) { if (f == writeLogMessage) { f = nullptr; } stc_userWriteLogMessageFunc.store(f); } void replaceWriteLogMessageEx(WriteLogMessageExFuncType f) { if (f == writeLogMessageEx) { f = nullptr; } stc_userWriteLogMessageExFunc.store(f); } } // namespace }}} // namespace
cpp
github
https://github.com/opencv/opencv
modules/core/src/logger.cpp
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>). # All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # Many thanks to our contributors # # Kaspars Vilkens (KNdati): lenghty discussions, bugreports and bugfixes # Stefan Rijnhart (Therp): bugreport and bugfix # CLEARCORP S.A: Customization and migration to OpenERP 7.0 ''' This module contains the business logic of the wizard account_banking_import. The parsing is done in the parser modules. Every parser module is required to use parser.models as a mean of communication with the business logic. ''' from openerp.osv import osv, fields import time import netsvc import base64 import datetime from tools import config from tools.translate import _ from account_banking_ccorp.parsers import models from account_banking_ccorp.parsers.convert import * from account_banking_ccorp.struct import struct from account_banking_ccorp import sepa from banktools import * import decimal_precision as dp import re bt = models.mem_bank_transaction # This variable is used to match supplier invoices with an invoice date after # the real payment date. This can occur with online transactions (web shops). payment_window = datetime.timedelta(days=10) def parser_types(*args, **kwargs): '''Delay evaluation of parser types until start of wizard, to allow depending modules to initialize and add their parsers to the list ''' return models.parser_type.get_parser_types() class bankImportLine(osv.TransientModel): _name = 'account.banking.ccorp.bank.import.line' _description = 'Bank import lines' _columns = { 'name': fields.char('Name', size=64), 'date': fields.date('Date', readonly=True), 'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')), 'statement_line_id': fields.many2one('account.bank.statement.line', 'Resulting statement line', readonly=True), 'type': fields.selection([('supplier','Supplier'), ('customer','Customer'), ('general','General')], 'Type', required=True), 'partner_id': fields.many2one('res.partner', 'Partner'), 'statement_id': fields.many2one('account.bank.statement', 'Statement', select=True, required=True, ondelete='cascade'), 'ref': fields.char('Reference', size=32), 'note': fields.text('Notes'), 'period_id': fields.many2one('account.period', 'Period'), 'currency': fields.many2one('res.currency', 'Currency'), 'banking_import_id': fields.many2one('account.banking.ccorp.bank.import.wizard', 'Bank import', readonly=True, ondelete='cascade'), 'reconcile_id': fields.many2one('account.move.reconcile', 'Reconciliaton'), 'account_id': fields.many2one('account.account', 'Account'), 'invoice_ids': fields.many2many('account.invoice', 'banking_import_line_invoice_rel', 'line_id', 'invoice_id'), 'payment_order_id': fields.many2one('payment.order', 'Payment order'), 'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account'), 'transaction_type': fields.selection([ # TODO: payment terminal etc... ('invoice', 'Invoice payment'), ('payment_order_line', 'Payment from a payment order'), ('payment_order', 'Aggregate payment order'), ('storno', 'Canceled debit order'), ('bank_costs', 'Bank costs'), ('unknown', 'Unknown'), ], 'Transaction type'), 'duplicate': fields.boolean('Duplicate'), } class bankImportWizard(osv.TransientModel): _name = 'account.banking.ccorp.bank.import.wizard' def import_statements_file(self, cr, uid, ids, context): ''' Import bank statements / bank transactions file. This method is a wrapper for the business logic on the transaction. The parser modules represent the decoding logic. ''' #Get the wizard record and binary file bank_import_wizard = self.browse(cr, uid, ids, context)[0] #Get the binary file without encoding statements_file = bank_import_wizard.file user_obj = self.pool.get('res.users') company_obj = self.pool.get('res.company') statement_obj = self.pool.get('account.bank.statement') statement_file_obj = self.pool.get('account.banking.ccorp.imported.file') import_transaction_obj = self.pool.get('account.banking.ccorp.bank.import.transaction') period_obj = self.pool.get('account.period') # Get the parser to parse the file according to #the selected account parser_code = bank_import_wizard.parser parser = models.create_parser(parser_code) if not parser: raise osv.except_osv( _('ERROR!'), _('Unable to import parser %(parser)s. Parser class not found.') % {'parser': parser_code} ) # Get the user and company #Fix the problem with permission of the user user = user_obj.browse(cr, uid, uid, context) company = bank_import_wizard.company ''' PASS THE PARAMETERS TO PARSER. IT'S MORE EASY AND CLEAR PASS FROM HERE. The object bank_import_wizard have all the fields in the wizard. For BAC and BCR parsers it's not needed. ''' #account_number used if the file haven't one. #Really needed? account_number = self.extract_number(bank_import_wizard.account_bank.acc_number) #local_currency extracted from account_bank if bank_import_wizard.account_bank.currency_id: local_currency = bank_import_wizard.account_bank.currency_id.name else: local_currency = 'CRC' #TODO: Fields used only by the davivienda's parser, must be removed and added #in the custom parser's module date_from_str = bank_import_wizard.date_from date_to_str = bank_import_wizard.date_to #TODO: Fields used only by the BNCR's parser, must be removed and added #in the custom parser's module #In BNCR's parser the file does not have the initial balance, #the extract must have a initial or ending balance. #With ending_balance compute the initial_balance ending_balance = float(bank_import_wizard.ending_balance) ''' For BCR and BAC parsers. (this parsers don't need account_number, local_currency, date_from_str, date_to_str parameters, because the file have this information. The file for Davivienda don't have that information. To prevent other parsers not work with this specific parameter passing, ** kwargs is used, which allows dynamically pass parameters to functions. In all parsers must specify this parameter, if required pass parameters, passed through this dictionary, but the method is unknown and there is no need to change the original format parsers. account_number=account_number, local_currency=local_currency, date_from_str=date_from_str, date_to_str=date_to_str = pass through **kwargs and extract kwargs['parameter_name]. parse is a generic name method that is used by all the parsers. When is selected the parser, call the method that corresponds to parser and execute the other methods. Not recommended change the name for this method, because should create a specific wizard for each parser that is created. ''' ''' It changes the way file encoding. Now it's parser was chosen and not the wizard. The parser has a field (type_file) which is a list of file types supported by each of the types of parser The statements is the file without encondig. Change parameter data for statements_file and the parser encoding the file.''' #Call the parser's parse method. statements = parser.parse(cr, statements_file, account_number = account_number, local_currency = local_currency, date_from_str = date_from_str, date_to_str = date_to_str,ending_balance = ending_balance, real_account= bank_import_wizard.account_bank.acc_number) if any([x for x in statements if not x.is_valid()]): raise osv.except_osv( _('ERROR!'), _('The imported statements appear to be invalid! Check your file.')) # Create the file now, as the statements need to be linked to it import_id = statement_file_obj.create(cr, uid, dict(company_id = company.id, file = statements_file, state = 'unfinished', format = parser.name, bank_id = bank_import_wizard.account_bank.id)) #Extract country_code from parser if available bank_country_code = False if hasattr(parser, 'country_code'): bank_country_code = parser.country_code # Caching info = {} imported_statement_ids = [] transaction_ids = [] for statement in statements: # Create fallback currency code currency_code = statement.local_currency or company.currency_id.name # Obtain account_info # Check cache for account info/currency if statement.local_account in info and \ currency_code in info[statement.local_account]: account_info = info[statement.local_account][currency_code] else: # Pull account info/currency account_info = None try: account_info = get_company_bank_account( self.pool, cr, uid, statement.local_account, statement.local_currency, company, bank_import_wizard.account_bank) except Exception as exception_obj: msg = exception_obj.args raise osv.except_osv(_('ERROR!'),msg) if not account_info: raise osv.except_osv( _('ERROR!'), _('Statements found for unknown account %(bank_account)s') % {'bank_account': statement.local_account}) #Check for journal in account_info if 'journal_id' not in account_info.keys(): raise osv.except_osv( _('ERROR!'), _('Statements found for account %(bank_account)s, ' 'but no default journal was defined.') % {'bank_account': statement.local_account}) # Get required currency code overwriting fallback currency_code = account_info.currency_id.name # Cache results if not statement.local_account in info: info[statement.local_account] = { currency_code: account_info } else: info[statement.local_account][currency_code] = account_info # Account_info obtained and cached # Final check: no coercion of currencies! if statement.local_currency \ and account_info.currency_id.name != statement.local_currency: # TODO: convert currencies? raise osv.except_osv( _('ERROR!'), _('Statement %(statement_id)s for account %(bank_account)s' ' uses different currency than the defined bank journal.') % { 'bank_account': statement.local_account, 'statement_id': statement.id }) # Check existence of previous statement # Less well defined formats can resort to a # dynamically generated statement identification # (e.g. a datetime string of the moment of import) # and have potential duplicates flagged by the # matching procedure statement_ids = statement_obj.search(cr, uid, [('name', '=', statement.id), ('date', '=', date2str(statement.date)),]) if statement_ids: raise osv.except_osv( _('ERROR!'), _('Statement %(id)s known - skipped') % {'id': statement.id}) # Get the period for the statement (as bank statement object checks this) period_ids = period_obj.search(cr, uid, [('company_id','=',company.id), ('date_start','<=',statement.date), ('date_stop','>=',statement.date), ('special', '=', False)]) if not period_ids: raise osv.except_osv( _('ERROR!'), _('No period found covering statement date %s, ' 'statement %s') % (statement.date.strftime('%Y-%m-%d'), statement.id)) # Create the bank statement record statement_id = statement_obj.create(cr, uid, dict( name = statement.id, journal_id = account_info.journal_id.id, date = date2str(statement.date), balance_start = statement.start_balance, balance_end_real = statement.end_balance, balance_end = statement.end_balance, state = 'draft', user_id = uid, banking_id = import_id, company_id = company.id, period_id = period_ids[0],)) imported_statement_ids.append(statement_id) # Process Transactions subno = 0 for transaction in statement.transactions: subno += 1 #Assign Transaction number if not available if not transaction.id: transaction.id = str(subno) values = {} for attr in transaction.__slots__ + ['type']: if attr in import_transaction_obj.column_map: values[import_transaction_obj.column_map[attr]] = eval('transaction.%s' % attr) elif attr in import_transaction_obj._columns: values[attr] = eval('transaction.%s' % attr) values['statement_id'] = statement_id values['bank_country_code'] = bank_country_code values['local_account'] = statement.local_account values['local_currency'] = statement.local_currency transaction_id = import_transaction_obj.create(cr, uid, values, context=context) if transaction_id: transaction_ids.append(transaction_id) else: raise osv.except_osv( _('ERROR!'), _('Failed to create an import transaction resource')) import_transaction_obj.match(cr, uid, transaction_ids, bank_import_wizard.account_bank, context=context) #recompute statement end_balance for validation statement_obj.button_dummy(cr, uid, imported_statement_ids, context=context) # Update Imported File state = 'ready' statement_file_obj.write(cr, uid, import_id, dict(state = state), context) if not imported_statement_ids:# or not results.trans_loaded_cnt: # file state can be 'ready' while import state is 'error' state = 'error' self.write(cr, uid, [ids[0]], dict( import_id = import_id, state = state, statement_ids = [(6, 0, imported_statement_ids)], ), context) return { 'name': (state == 'ready' and _('Review Bank Statements') or _('Error')), 'view_type': 'form', 'view_mode': 'form', 'view_id': False, 'res_model': self._name, 'domain': [], 'context': dict(context, active_ids=ids), 'type': 'ir.actions.act_window', 'target': 'new', 'res_id': ids[0] or False, } _columns = { 'company': fields.many2one('res.company', 'Company', required=True, states = { 'ready': [('readonly', True)], 'error': [('readonly', True)], }), 'file': fields.binary('Statements File', states = { 'ready': [('readonly', True)], 'error': [('readonly', True)], }), 'parser': fields.selection(parser_types, 'File Format', states = { 'ready': [('readonly', True)], 'error': [('readonly', True)], }), 'state': fields.selection([('init', 'init'), ('ready', 'ready'), ('error', 'error')], 'State', readonly=True), 'import_id': fields.many2one('account.banking.ccorp.imported.file', 'Import File'), 'statement_ids': fields.many2many('account.bank.statement', 'rel_wiz_statements', 'wizard_id','statement_id', 'Imported Bank Statements'), 'line_ids': fields.one2many('account.banking.ccorp.bank.import.line', 'banking_import_id', 'Transactions'), 'account_bank': fields.many2one('res.partner.bank', 'Account', states = { 'ready': [('readonly', True)], 'error': [('readonly', True)], }), 'date_from':fields.date('Date from', states = { 'ready': [('readonly', True)], 'error': [('readonly', True)] }), 'date_to':fields.date('Date to', states = { 'ready': [('readonly', True)], 'error': [('readonly', True)] }), 'ending_balance': fields.float('Ending balance', digits=(16, 2), states = { 'ready': [('readonly', True)], 'error': [('readonly', True)], }), } #Used on view onchange event def onchange_account_parser(self, cr, uid, ids, account_bank=False, context=None): if account_bank: account_parser = self.pool.get('res.partner.bank').browse(cr, uid, account_bank, context=context).parser_types if account_parser: return {'value':{'parser': account_parser}} return {'value':{'parser': False}} def extract_number( self, account_number ): ''' Extracts symbols from account_number using regular expression r'[0-9]+' ''' cad = '' result = re.findall(r'[0-9]+', account_number) for character in result: cad = cad + character return cad _defaults = { 'state': 'init', 'ending_balance': 0.0, 'company': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'bank.import.transaction', context=c), }
unknown
codeparrot/codeparrot-clean
// Copyright 2017 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package integration import ( "fmt" "testing" "time" "github.com/stretchr/testify/require" pb "go.etcd.io/etcd/api/v3/etcdserverpb" epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" "go.etcd.io/etcd/tests/v3/framework/integration" ) // TestV3ElectionCampaign checks that Campaign will not give // simultaneous leadership to multiple campaigners. func TestV3ElectionCampaign(t *testing.T) { integration.BeforeTest(t) clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(t.Context(), &pb.LeaseGrantRequest{TTL: 30}) require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(t.Context(), &pb.LeaseGrantRequest{TTL: 30}) require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} l1, lerr1 := lc.Campaign(t.Context(), req1) require.NoError(t, lerr1) campaignc := make(chan struct{}) go func() { defer close(campaignc) req2 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease2.ID, Value: []byte("def")} l2, lerr2 := lc.Campaign(t.Context(), req2) if lerr2 != nil { t.Error(lerr2) } if l1.Header.Revision >= l2.Header.Revision { t.Errorf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision) } }() select { case <-time.After(200 * time.Millisecond): case <-campaignc: t.Fatalf("got leadership before resign") } _, uerr := lc.Resign(t.Context(), &epb.ResignRequest{Leader: l1.Leader}) require.NoError(t, uerr) select { case <-time.After(200 * time.Millisecond): t.Fatalf("campaigner unelected after resign") case <-campaignc: } lval, lverr := lc.Leader(t.Context(), &epb.LeaderRequest{Name: []byte("foo")}) require.NoError(t, lverr) if string(lval.Kv.Value) != "def" { t.Fatalf("got election value %q, expected %q", string(lval.Kv.Value), "def") } } // TestV3ElectionObserve checks that an Observe stream receives // proclamations from different leaders uninterrupted. func TestV3ElectionObserve(t *testing.T) { integration.BeforeTest(t) clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) lc := integration.ToGRPC(clus.Client(0)).Election // observe leadership events observec := make(chan struct{}, 1) go func() { defer close(observec) s, err := lc.Observe(t.Context(), &epb.LeaderRequest{Name: []byte("foo")}) observec <- struct{}{} if err != nil { t.Error(err) } for i := 0; i < 10; i++ { resp, rerr := s.Recv() if rerr != nil { t.Error(rerr) } respV := 0 fmt.Sscanf(string(resp.Kv.Value), "%d", &respV) // leader transitions should not go backwards if respV < i { t.Errorf(`got observe value %q, expected >= "%d"`, string(resp.Kv.Value), i) } i = respV } }() select { case <-observec: case <-time.After(time.Second): t.Fatalf("observe stream took too long to start") } lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(t.Context(), &pb.LeaseGrantRequest{TTL: 30}) require.NoError(t, err1) c1, cerr1 := lc.Campaign(t.Context(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("0")}) require.NoError(t, cerr1) // overlap other leader so it waits on resign leader2c := make(chan struct{}) go func() { defer close(leader2c) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(t.Context(), &pb.LeaseGrantRequest{TTL: 30}) if err2 != nil { t.Error(err2) } c2, cerr2 := lc.Campaign(t.Context(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease2.ID, Value: []byte("5")}) if cerr2 != nil { t.Error(cerr2) } for i := 6; i < 10; i++ { v := []byte(fmt.Sprintf("%d", i)) req := &epb.ProclaimRequest{Leader: c2.Leader, Value: v} if _, err := lc.Proclaim(t.Context(), req); err != nil { t.Error(err) } } }() for i := 1; i < 5; i++ { v := []byte(fmt.Sprintf("%d", i)) req := &epb.ProclaimRequest{Leader: c1.Leader, Value: v} _, err := lc.Proclaim(t.Context(), req) require.NoError(t, err) } // start second leader lc.Resign(t.Context(), &epb.ResignRequest{Leader: c1.Leader}) select { case <-observec: case <-time.After(time.Second): t.Fatalf("observe did not observe all events in time") } <-leader2c }
go
github
https://github.com/etcd-io/etcd
tests/integration/v3election_grpc_test.go
#!/bin/env python # Automatically translated python version of # OpenSceneGraph example program "osgwidgetshader" # !!! This program will need manual tuning before it will work. !!! import sys from osgpypp import osgDB from osgpypp import osgWidget # Translated from file 'osgwidgetshader.cpp' # -*-c++-*- osgWidget - Code by: Jeremy Moles (cubicool) 2007-2008 # $Id: osgwidgetshader.cpp 28 2008-03-26 15:26:48Z cubicool $ #include <osgDB/FileUtils> #include <osgWidget/Util> #include <osgWidget/WindowManager> #include <osgWidget/Canvas> MASK_2D = 0xF0000000 def createWidget(name, col, layer): widget = osgWidget.Widget(name, 200.0, 200.0) widget.setColor(col, col, col, 0.2) widget.setLayer(layer) return widget def main(argv): viewer = osgViewer.Viewer() wm = osgWidget.WindowManager( viewer, 1280.0, 1024.0, MASK_2D ) canvas = osgWidget.Canvas("canvas") canvas.attachMoveCallback() canvas.attachScaleCallback() canvas.addWidget( createWidget("w1", 0.2, osgWidget.Widget.LAYER_LOW), 0.0, 0.0 ) canvas.addWidget( createWidget("w2", 0.4, osgWidget.Widget.LAYER_MIDDLE), 200.0, 0.0 ) canvas.addWidget( createWidget("w3", 0.6, osgWidget.Widget.LAYER_HIGH), 400.0, 0.0 ) wm.addChild(canvas) program = osg.Program() program.addShader(osg.Shader.readShaderFile( osg.Shader.VERTEX, osgDB.findDataFile("osgWidget/osgwidgetshader-vert.glsl") )) program.addShader(osg.Shader.readShaderFile( osg.Shader.FRAGMENT, osgDB.findDataFile("osgWidget/osgwidgetshader-frag.glsl") )) canvas.getGeode().getOrCreateStateSet().setAttribute(program) return osgWidget.createExample(viewer, wm) if __name__ == "__main__": main(sys.argv)
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true Before do FileUtils.rm_rf(Paths.test_dir) if Paths.test_dir.exist? FileUtils.mkdir_p(Paths.test_dir) unless Paths.test_dir.directory? Dir.chdir(Paths.test_dir) @timezone_before_scenario = ENV["TZ"] end # After do FileUtils.rm_rf(Paths.test_dir) if Paths.test_dir.exist? Paths.output_file.delete if Paths.output_file.exist? Paths.status_file.delete if Paths.status_file.exist? Dir.chdir(Paths.test_dir.parent) ENV["TZ"] = @timezone_before_scenario end # Given(%r!^I have a blank site in "(.*)"$!) do |path| unless File.exist?(path) then FileUtils.mkdir_p(path) end end # Given(%r!^I do not have a "(.*)" directory$!) do |path| Paths.test_dir.join(path).directory? end # Given(%r!^I have an? "(.*)" page(?: with (.*) "(.*)")? that contains "(.*)"$!) do |file, key, value, text| File.write(file, <<~DATA) --- #{key || "layout"}: #{value || "none"} --- #{text} DATA end # Given(%r!^I have an? "(.*)" file that contains "(.*)"$!) do |file, text| File.write(file, text) end # Given(%r!^I have an? (.*) (layout|theme) that contains "(.*)"$!) do |name, type, text| folder = type == "layout" ? "_layouts" : "_theme" destination_file = Pathname.new(File.join(folder, "#{name}.html")) FileUtils.mkdir_p(destination_file.parent) unless destination_file.parent.directory? File.write(destination_file, text) end # Given(%r!^I have an? "(.*)" file with content:$!) do |file, text| File.write(file, text) end # Given(%r!^I have an? "(.*)" page with content:$!) do |file, text| File.write(file, <<~DATA) --- --- #{text} DATA end # Given(%r!^I have an? (.*) directory$!) do |dir| unless File.directory?(dir) then FileUtils.mkdir_p(dir) end end # Given(%r!^I have the following (draft|page|post)s?(?: (in|under) "([^"]+)")?:$!) do |status, direction, folder, table| table.hashes.each do |input_hash| title = slug(input_hash["title"]) ext = input_hash["type"] || "markdown" filename = "#{title}.#{ext}" if %w(draft page).include?(status) before, after = location(folder, direction) dest_folder = "_drafts" if status == "draft" dest_folder = "_posts" if status == "post" dest_folder = "" if status == "page" if status == "post" parsed_date = Time.xmlschema(input_hash["date"]) rescue Time.parse(input_hash["date"]) input_hash["date"] = parsed_date filename = "#{parsed_date.strftime("%Y-%m-%d")}-#{title}.#{ext}" end path = File.join(before, dest_folder, after, filename) File.write(path, file_content_from_hash(input_hash)) end end # Given(%r!^I have the following (draft|post)s? within the "(.*)" directory:$!) do |type, folder, table| table.hashes.each do |input_hash| title = slug(input_hash["title"]) parsed_date = Time.xmlschema(input_hash["date"]) rescue Time.parse(input_hash["date"]) filename = type == "draft" ? "#{title}.markdown" : "#{parsed_date.strftime("%Y-%m-%d")}-#{title}.markdown" path = File.join(folder, "_#{type}s", filename) File.write(path, file_content_from_hash(input_hash)) end end # Given(%r!^I have the following documents? under the (.*) collection:$!) do |folder, table| table.hashes.each do |input_hash| title = slug(input_hash["title"]) filename = "#{title}.md" dest_folder = "_#{folder}" path = File.join(dest_folder, filename) File.write(path, file_content_from_hash(input_hash)) end end # Given(%r!^I have the following documents? under the "(.*)" collection within the "(.*)" directory:$!) do |label, dir, table| table.hashes.each do |input_hash| title = slug(input_hash["title"]) path = File.join(dir, "_#{label}", "#{title}.md") File.write(path, file_content_from_hash(input_hash)) end end # Given(%r!^I have the following documents? nested inside "(.*)" directory under the "(.*)" collection within the "(.*)" directory:$!) do |subdir, label, dir, table| table.hashes.each do |input_hash| title = slug(input_hash["title"]) path = File.join(dir, "_#{label}", subdir, "#{title}.md") File.write(path, file_content_from_hash(input_hash)) end end # Given(%r!^I have a configuration file with "(.*)" set to "(.*)"$!) do |key, value| config = \ if source_dir.join("_config.yml").exist? SafeYAML.load_file(source_dir.join("_config.yml")) else {} end config[key] = SafeYAML.load(value) Jekyll.set_timezone(value) if key == "timezone" File.write("_config.yml", YAML.dump(config)) end # Given(%r!^I have a configuration file with:$!) do |table| table.hashes.each do |row| step %(I have a configuration file with "#{row["key"]}" set to "#{row["value"]}") end end # Given(%r!^I have a configuration file with "([^\"]*)" set to:$!) do |key, table| File.open("_config.yml", "w") do |f| f.write("#{key}:\n") table.hashes.each do |row| f.write("- #{row["value"]}\n") end end end # Given(%r!^I have fixture collections(?: in "(.*)" directory)?$!) do |directory| collections_dir = File.join(source_dir, directory.to_s) FileUtils.cp_r Paths.source_dir.join("test", "source", "_methods"), collections_dir FileUtils.cp_r Paths.source_dir.join("test", "source", "_thanksgiving"), collections_dir FileUtils.cp_r Paths.source_dir.join("test", "source", "_tutorials"), collections_dir end # Given(%r!^I wait (\d+) second(s?)$!) do |time, _| sleep(time.to_f) end # When(%r!^I run jekyll(.*)$!) do |args| run_jekyll(args) if args.include?("--verbose") || ENV["DEBUG"] warn "\n#{jekyll_run_output}\n" end end # When(%r!^I run bundle(.*)$!) do |args| run_bundle(args) if args.include?("--verbose") || ENV["DEBUG"] warn "\n#{jekyll_run_output}\n" end end # When(%r!^I run gem(.*)$!) do |args| run_rubygem(args) if args.include?("--verbose") || ENV["DEBUG"] warn "\n#{jekyll_run_output}\n" end end # When(%r!^I run git add .$!) do run_in_shell("git", "add", ".", "--verbose") end # When(%r!^I decide to build the theme gem$!) do Dir.chdir(Paths.theme_gem_dir) [ "_includes/blank.html", "_sass/blank.scss", "assets/blank.scss", "_config.yml" ].each do |filename| File.new(filename, "w") end end # When(%r!^I change "(.*)" to contain "(.*)"$!) do |file, text| File.open(file, "a") do |f| f.write(text) end end # When(%r!^I delete the file "(.*)"$!) do |file| File.delete(file) end # Then(%r!^the (.*) directory should +(not )?exist$!) do |dir, negative| if negative.nil? expect(Pathname.new(dir)).to exist else expect(Pathname.new(dir)).to_not exist end end # Then(%r!^I should (not )?see "(.*)" in "(.*)"$!) do |negative, text, file| step %(the "#{file}" file should exist) regexp = Regexp.new(text, Regexp::MULTILINE) if negative.nil? || negative.empty? expect(file_contents(file)).to match regexp else expect(file_contents(file)).not_to match regexp end end # Then(%r!^I should (not )?see "(.*)" in "(.*)" if platform does not support symlinks$!) do |negative, text, file| step %(the "#{file}" file should exist) regexp = Regexp.new(text, Regexp::MULTILINE) if negative.nil? || negative.empty? if Platform.supports_symlink? expect(file_contents(file)).not_to match regexp else expect(file_contents(file)).to match regexp end end end # Then(%r!^I should (not )?see "(.*)" in "(.*)" if platform supports symlinks$!) do |negative, text, file| step %(the "#{file}" file should exist) regexp = Regexp.new(text, Regexp::MULTILINE) if negative.nil? || negative.empty? if Platform.supports_symlink? expect(file_contents(file)).to match regexp else expect(file_contents(file)).not_to match regexp end end end # Then(%r!^I should see date "(.*)" in "(.*)" unless Windows$!) do |text, file| step %(the "#{file}" file should exist) regexp = Regexp.new(text) if Jekyll::Utils::Platforms.really_windows? && !dst_active? expect(file_contents(file)).not_to match regexp else expect(file_contents(file)).to match regexp end end # Then(%r!^I should see date "(.*)" in "(.*)" if on Windows$!) do |text, file| step %(the "#{file}" file should exist) regexp = Regexp.new(text) if Jekyll::Utils::Platforms.really_windows? && !dst_active? expect(file_contents(file)).to match regexp else expect(file_contents(file)).not_to match regexp end end # Then(%r!^I should see exactly "(.*)" in "(.*)"$!) do |text, file| step %(the "#{file}" file should exist) expect(file_contents(file).strip).to eq text end # Then(%r!^I should see escaped "(.*)" in "(.*)"$!) do |text, file| step %(I should see "#{Regexp.escape(text)}" in "#{file}") end # Then(%r!^the "(.*)" file should +(not )?exist$!) do |file, negative| if negative.nil? expect(Pathname.new(file)).to exist else expect(Pathname.new(file)).to_not exist end end # Then(%r!^I should see today's time in "(.*)"$!) do |file| seconds = 3 build_time = Time.now content = file_contents(file) date_time_pattern = /(\d{4}-\d{2}-\d{2}\s\d+:\d{2}:\d{2})/ match_data = content.match(date_time_pattern) expect(match_data).not_to be_nil, "No date-time pattern found in #{file}" date_time_str = match_data.captures file_time = Time.parse("#{date_time_str}") time_difference = (build_time - file_time).abs expect(time_difference).to be <= seconds, <<~MSG Expected time in #{file} to be within #{seconds} seconds of build time. Build time: #{build_time} File time: #{file_time} Difference: #{time_difference} seconds MSG end # Then(%r!^I should see today's date in "(.*)"$!) do |file| step %(I should see "#{Date.today}" in "#{file}") end # Then(%r!^I should (not )?see "(.*)" in the build output$!) do |negative, text| if negative.nil? || negative.empty? expect(jekyll_run_output).to match Regexp.new(text) else expect(jekyll_run_output).not_to match Regexp.new(text) end end # Then(%r!^I should get an updated git index$!) do index = %w( .gitignore Gemfile LICENSE.txt README.md _config.yml _includes/blank.html _layouts/default.html _layouts/page.html _layouts/post.html _sass/blank.scss assets/blank.scss my-cool-theme.gemspec ) index.each do |file| expect(jekyll_run_output).to match file end end # Then(%r!^I should get a zero exit(?:\-| )status$!) do step %(I should see "EXIT STATUS: 0" in the build output) end # Then(%r!^I should get a non-zero exit(?:\-| )status$!) do step %(I should not see "EXIT STATUS: 0" in the build output) end
ruby
github
https://github.com/jekyll/jekyll
features/step_definitions.rb
'''OpenGL extension SGIX.async This module customises the behaviour of the OpenGL.raw.GL.SGIX.async to provide a more Python-friendly API Overview (from the spec) This extension provides a framework for asynchronous OpenGL commands. It also provides commands allowing a program to wait for the completion of asynchronous commands. Asynchronous commands have two properties: 1) Asynchronous commands are non-blocking. For example, an asynchronous ReadPixels command returns control to the program immediately rather than blocking until the command completes. This property allows the program to issue other OpenGL commands in parallel with the execution of commands that normally block. 2) Asynchronous commands may complete out-of-order with respect to other OpenGL commands. For example, an asynchronous TexImage command may complete after subsequent OpenGL commands issued by the program rather than maintaining the normal serial order of the OpenGL command stream. This property allows the graphics accelerator to execute asynchronous commands in parallel with the normal command stream, for instance using a secondary path to transfer data from or to the host, without doing any dependency checking. Programs that issue asynchronous commands must also be able to determine when the commands have completed. The completion status may be needed so that results can be retrieved (e.g. the image data from a ReadPixels command) or so that dependent commands can be issued (e.g. drawing commands that use texture data downloaded by an earlier asynchronous command). This extension provides fine-grain control over asynchronous commands by introducing a mechanism for determining the status of individual commands. Each invocation of an asynchronous command is associated with an integer called a "marker." A program specifies a marker before it issues an asynchronous command. The program may later issue a command to query if any asynchronous commands have completed. The query commands return a marker to identify the command that completed. This extension provides both blocking and non-blocking query commands. This extension does not define any asynchronous commands. See SGIX_async_pixel for the asynchronous pixel commands. The official definition of this extension is available here: http://www.opengl.org/registry/specs/SGIX/async.txt ''' from OpenGL import platform, constants, constant, arrays from OpenGL import extensions, wrapper from OpenGL.GL import glget import ctypes from OpenGL.raw.GL.SGIX.async import * ### END AUTOGENERATED SECTION
unknown
codeparrot/codeparrot-clean
/* * "git clean" builtin command * * Copyright (C) 2007 Shawn Bohrer * * Based on git-clean.sh by Pavel Roskin */ #define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "builtin.h" #include "abspath.h" #include "config.h" #include "dir.h" #include "environment.h" #include "gettext.h" #include "parse-options.h" #include "path.h" #include "read-cache-ll.h" #include "setup.h" #include "string-list.h" #include "quote.h" #include "column.h" #include "color.h" #include "pathspec.h" #include "help.h" #include "prompt.h" static int require_force = -1; /* unset */ static int interactive; static struct string_list del_list = STRING_LIST_INIT_DUP; static unsigned int colopts; static const char *const builtin_clean_usage[] = { N_("git clean [-d] [-f] [-i] [-n] [-q] [-e <pattern>] [-x | -X] [--] [<pathspec>...]"), NULL }; static const char *msg_remove = N_("Removing %s\n"); static const char *msg_would_remove = N_("Would remove %s\n"); static const char *msg_skip_git_dir = N_("Skipping repository %s\n"); static const char *msg_would_skip_git_dir = N_("Would skip repository %s\n"); static const char *msg_warn_remove_failed = N_("failed to remove %s"); static const char *msg_warn_lstat_failed = N_("could not lstat %s\n"); static const char *msg_skip_cwd = N_("Refusing to remove current working directory\n"); static const char *msg_would_skip_cwd = N_("Would refuse to remove current working directory\n"); enum color_clean { CLEAN_COLOR_RESET = 0, CLEAN_COLOR_PLAIN = 1, CLEAN_COLOR_PROMPT = 2, CLEAN_COLOR_HEADER = 3, CLEAN_COLOR_HELP = 4, CLEAN_COLOR_ERROR = 5 }; static const char *color_interactive_slots[] = { [CLEAN_COLOR_ERROR] = "error", [CLEAN_COLOR_HEADER] = "header", [CLEAN_COLOR_HELP] = "help", [CLEAN_COLOR_PLAIN] = "plain", [CLEAN_COLOR_PROMPT] = "prompt", [CLEAN_COLOR_RESET] = "reset", }; static enum git_colorbool clean_use_color = GIT_COLOR_UNKNOWN; static char clean_colors[][COLOR_MAXLEN] = { [CLEAN_COLOR_ERROR] = GIT_COLOR_BOLD_RED, [CLEAN_COLOR_HEADER] = GIT_COLOR_BOLD, [CLEAN_COLOR_HELP] = GIT_COLOR_BOLD_RED, [CLEAN_COLOR_PLAIN] = GIT_COLOR_NORMAL, [CLEAN_COLOR_PROMPT] = GIT_COLOR_BOLD_BLUE, [CLEAN_COLOR_RESET] = GIT_COLOR_RESET, }; #define MENU_OPTS_SINGLETON 01 #define MENU_OPTS_IMMEDIATE 02 #define MENU_OPTS_LIST_ONLY 04 struct menu_opts { const char *header; const char *prompt; int flags; }; #define MENU_RETURN_NO_LOOP 10 struct menu_item { char hotkey; const char *title; int selected; int (*fn)(void); }; enum menu_stuff_type { MENU_STUFF_TYPE_STRING_LIST = 1, MENU_STUFF_TYPE_MENU_ITEM }; struct menu_stuff { enum menu_stuff_type type; int nr; void *stuff; }; define_list_config_array(color_interactive_slots); static int git_clean_config(const char *var, const char *value, const struct config_context *ctx, void *cb) { const char *slot_name; if (starts_with(var, "column.")) return git_column_config(var, value, "clean", &colopts); /* honors the color.interactive* config variables which also applied in git-add--interactive and git-stash */ if (!strcmp(var, "color.interactive")) { clean_use_color = git_config_colorbool(var, value); return 0; } if (skip_prefix(var, "color.interactive.", &slot_name)) { int slot = LOOKUP_CONFIG(color_interactive_slots, slot_name); if (slot < 0) return 0; if (!value) return config_error_nonbool(var); return color_parse(value, clean_colors[slot]); } if (!strcmp(var, "clean.requireforce")) { require_force = git_config_bool(var, value); return 0; } if (git_color_config(var, value, cb) < 0) return -1; return git_default_config(var, value, ctx, cb); } static const char *clean_get_color(enum color_clean ix) { if (want_color(clean_use_color)) return clean_colors[ix]; return ""; } static void clean_print_color(enum color_clean ix) { printf("%s", clean_get_color(ix)); } static int exclude_cb(const struct option *opt, const char *arg, int unset) { struct string_list *exclude_list = opt->value; BUG_ON_OPT_NEG(unset); string_list_append(exclude_list, arg); return 0; } static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag, int dry_run, int quiet, int *dir_gone) { DIR *dir; struct strbuf quoted = STRBUF_INIT; struct strbuf realpath = STRBUF_INIT; struct strbuf real_ocwd = STRBUF_INIT; struct dirent *e; int res = 0, ret = 0, gone = 1, original_len = path->len, len; struct string_list dels = STRING_LIST_INIT_DUP; *dir_gone = 1; if ((force_flag & REMOVE_DIR_KEEP_NESTED_GIT) && is_nonbare_repository_dir(path)) { if (!quiet) { quote_path(path->buf, prefix, &quoted, 0); printf(dry_run ? _(msg_would_skip_git_dir) : _(msg_skip_git_dir), quoted.buf); } *dir_gone = 0; goto out; } dir = opendir(path->buf); if (!dir) { /* an empty dir could be removed even if it is unreadble */ res = dry_run ? 0 : rmdir(path->buf); if (res) { int saved_errno = errno; quote_path(path->buf, prefix, &quoted, 0); errno = saved_errno; warning_errno(_(msg_warn_remove_failed), quoted.buf); *dir_gone = 0; } ret = res; goto out; } strbuf_complete(path, '/'); len = path->len; while ((e = readdir_skip_dot_and_dotdot(dir)) != NULL) { struct stat st; strbuf_setlen(path, len); strbuf_addstr(path, e->d_name); if (lstat(path->buf, &st)) warning_errno(_(msg_warn_lstat_failed), path->buf); else if (S_ISDIR(st.st_mode)) { if (remove_dirs(path, prefix, force_flag, dry_run, quiet, &gone)) ret = 1; if (gone) { quote_path(path->buf, prefix, &quoted, 0); string_list_append(&dels, quoted.buf); } else *dir_gone = 0; continue; } else { res = dry_run ? 0 : unlink(path->buf); if (!res) { quote_path(path->buf, prefix, &quoted, 0); string_list_append(&dels, quoted.buf); } else { int saved_errno = errno; quote_path(path->buf, prefix, &quoted, 0); errno = saved_errno; warning_errno(_(msg_warn_remove_failed), quoted.buf); *dir_gone = 0; ret = 1; } continue; } /* path too long, stat fails, or non-directory still exists */ *dir_gone = 0; ret = 1; break; } closedir(dir); strbuf_setlen(path, original_len); if (*dir_gone) { /* * Normalize path components in path->buf, e.g. change '\' to * '/' on Windows. */ strbuf_realpath(&realpath, path->buf, 1); /* * path and realpath are absolute; for comparison, we would * like to transform startup_info->original_cwd to an absolute * path too. */ if (startup_info->original_cwd) strbuf_realpath(&real_ocwd, startup_info->original_cwd, 1); if (!strbuf_cmp(&realpath, &real_ocwd)) { printf("%s", dry_run ? _(msg_would_skip_cwd) : _(msg_skip_cwd)); *dir_gone = 0; } else { res = dry_run ? 0 : rmdir(path->buf); if (!res) *dir_gone = 1; else { int saved_errno = errno; quote_path(path->buf, prefix, &quoted, 0); errno = saved_errno; warning_errno(_(msg_warn_remove_failed), quoted.buf); *dir_gone = 0; ret = 1; } } } if (!*dir_gone && !quiet) { int i; for (i = 0; i < dels.nr; i++) printf(dry_run ? _(msg_would_remove) : _(msg_remove), dels.items[i].string); } out: strbuf_release(&realpath); strbuf_release(&real_ocwd); strbuf_release(&quoted); string_list_clear(&dels, 0); return ret; } static void pretty_print_dels(void) { struct string_list list = STRING_LIST_INIT_DUP; struct string_list_item *item; struct strbuf buf = STRBUF_INIT; const char *qname; struct column_options copts; for_each_string_list_item(item, &del_list) { qname = quote_path(item->string, NULL, &buf, 0); string_list_append(&list, qname); } /* * always enable column display, we only consult column.* * about layout strategy and stuff */ colopts = (colopts & ~COL_ENABLE_MASK) | COL_ENABLED; memset(&copts, 0, sizeof(copts)); copts.indent = " "; copts.padding = 2; print_columns(&list, colopts, &copts); strbuf_release(&buf); string_list_clear(&list, 0); } static void pretty_print_menus(struct string_list *menu_list) { unsigned int local_colopts = 0; struct column_options copts; local_colopts = COL_ENABLED | COL_ROW; memset(&copts, 0, sizeof(copts)); copts.indent = " "; copts.padding = 2; print_columns(menu_list, local_colopts, &copts); } static void prompt_help_cmd(int singleton) { clean_print_color(CLEAN_COLOR_HELP); printf(singleton ? _("Prompt help:\n" "1 - select a numbered item\n" "foo - select item based on unique prefix\n" " - (empty) select nothing\n") : _("Prompt help:\n" "1 - select a single item\n" "3-5 - select a range of items\n" "2-3,6-9 - select multiple ranges\n" "foo - select item based on unique prefix\n" "-... - unselect specified items\n" "* - choose all items\n" " - (empty) finish selecting\n")); clean_print_color(CLEAN_COLOR_RESET); } /* * display menu stuff with number prefix and hotkey highlight */ static void print_highlight_menu_stuff(struct menu_stuff *stuff, int **chosen) { struct string_list menu_list = STRING_LIST_INIT_DUP; struct strbuf menu = STRBUF_INIT; struct menu_item *menu_item; struct string_list_item *string_list_item; int i; switch (stuff->type) { default: die("Bad type of menu_stuff when print menu"); case MENU_STUFF_TYPE_MENU_ITEM: menu_item = (struct menu_item *)stuff->stuff; for (i = 0; i < stuff->nr; i++, menu_item++) { const char *p; int highlighted = 0; p = menu_item->title; if ((*chosen)[i] < 0) (*chosen)[i] = menu_item->selected ? 1 : 0; strbuf_addf(&menu, "%s%2d: ", (*chosen)[i] ? "*" : " ", i+1); for (; *p; p++) { if (!highlighted && *p == menu_item->hotkey) { strbuf_addstr(&menu, clean_get_color(CLEAN_COLOR_PROMPT)); strbuf_addch(&menu, *p); strbuf_addstr(&menu, clean_get_color(CLEAN_COLOR_RESET)); highlighted = 1; } else { strbuf_addch(&menu, *p); } } string_list_append(&menu_list, menu.buf); strbuf_reset(&menu); } break; case MENU_STUFF_TYPE_STRING_LIST: i = 0; for_each_string_list_item(string_list_item, (struct string_list *)stuff->stuff) { if ((*chosen)[i] < 0) (*chosen)[i] = 0; strbuf_addf(&menu, "%s%2d: %s", (*chosen)[i] ? "*" : " ", i+1, string_list_item->string); string_list_append(&menu_list, menu.buf); strbuf_reset(&menu); i++; } break; } pretty_print_menus(&menu_list); strbuf_release(&menu); string_list_clear(&menu_list, 0); } static int find_unique(const char *choice, struct menu_stuff *menu_stuff) { struct menu_item *menu_item; struct string_list_item *string_list_item; int i, len, found = 0; len = strlen(choice); switch (menu_stuff->type) { default: die("Bad type of menu_stuff when parse choice"); case MENU_STUFF_TYPE_MENU_ITEM: menu_item = (struct menu_item *)menu_stuff->stuff; for (i = 0; i < menu_stuff->nr; i++, menu_item++) { if (len == 1 && *choice == menu_item->hotkey) { found = i + 1; break; } if (!strncasecmp(choice, menu_item->title, len)) { if (found) { if (len == 1) { /* continue for hotkey matching */ found = -1; } else { found = 0; break; } } else { found = i + 1; } } } break; case MENU_STUFF_TYPE_STRING_LIST: string_list_item = ((struct string_list *)menu_stuff->stuff)->items; for (i = 0; i < menu_stuff->nr; i++, string_list_item++) { if (!strncasecmp(choice, string_list_item->string, len)) { if (found) { found = 0; break; } found = i + 1; } } break; } return found; } /* * Parse user input, and return choice(s) for menu (menu_stuff). * * Input * (for single choice) * 1 - select a numbered item * foo - select item based on menu title * - (empty) select nothing * * (for multiple choice) * 1 - select a single item * 3-5 - select a range of items * 2-3,6-9 - select multiple ranges * foo - select item based on menu title * -... - unselect specified items * * - choose all items * - (empty) finish selecting * * The parse result will be saved in array **chosen, and * return number of total selections. */ static int parse_choice(struct menu_stuff *menu_stuff, int is_single, char *input, int **chosen) { struct string_list choice = STRING_LIST_INIT_NODUP; struct string_list_item *item; int nr = 0; int i; string_list_split_in_place_f(&choice, input, is_single ? "\n" : ", ", -1, STRING_LIST_SPLIT_TRIM); for_each_string_list_item(item, &choice) { const char *string; int choose; int bottom = 0, top = 0; int is_range, is_number; string = item->string; if (!*string) continue; /* Input that begins with '-'; unchoose */ if (string[0] == '-') { choose = 0; string++; } else { choose = 1; } is_range = 0; is_number = 1; for (const char *p = string; *p; p++) { if ('-' == *p) { if (!is_range) { is_range = 1; is_number = 0; } else { is_number = 0; is_range = 0; break; } } else if (!isdigit(*p)) { is_number = 0; is_range = 0; break; } } if (is_number) { bottom = atoi(string); top = bottom; } else if (is_range) { bottom = atoi(string); /* a range can be specified like 5-7 or 5- */ if (!*(strchr(string, '-') + 1)) top = menu_stuff->nr; else top = atoi(strchr(string, '-') + 1); } else if (!strcmp(string, "*")) { bottom = 1; top = menu_stuff->nr; } else { bottom = find_unique(string, menu_stuff); top = bottom; } if (top <= 0 || bottom <= 0 || top > menu_stuff->nr || bottom > top || (is_single && bottom != top)) { clean_print_color(CLEAN_COLOR_ERROR); printf(_("Huh (%s)?\n"), string); clean_print_color(CLEAN_COLOR_RESET); continue; } for (i = bottom; i <= top; i++) (*chosen)[i-1] = choose; } string_list_clear(&choice, 0); for (i = 0; i < menu_stuff->nr; i++) nr += (*chosen)[i]; return nr; } /* * Implement a git-add-interactive compatible UI, which is borrowed * from add-interactive.c. * * Return value: * * - Return an array of integers * - , and it is up to you to free the allocated memory. * - The array ends with EOF. * - If user pressed CTRL-D (i.e. EOF), no selection returned. */ static int *list_and_choose(struct menu_opts *opts, struct menu_stuff *stuff) { struct strbuf choice = STRBUF_INIT; int *chosen, *result; int nr = 0; int eof = 0; int i; ALLOC_ARRAY(chosen, stuff->nr); /* set chosen as uninitialized */ for (i = 0; i < stuff->nr; i++) chosen[i] = -1; for (;;) { if (opts->header) { printf_ln("%s%s%s", clean_get_color(CLEAN_COLOR_HEADER), _(opts->header), clean_get_color(CLEAN_COLOR_RESET)); } /* chosen will be initialized by print_highlight_menu_stuff */ print_highlight_menu_stuff(stuff, &chosen); if (opts->flags & MENU_OPTS_LIST_ONLY) break; if (opts->prompt) { printf("%s%s%s%s", clean_get_color(CLEAN_COLOR_PROMPT), _(opts->prompt), opts->flags & MENU_OPTS_SINGLETON ? "> " : ">> ", clean_get_color(CLEAN_COLOR_RESET)); } if (git_read_line_interactively(&choice) == EOF) { eof = 1; break; } /* help for prompt */ if (!strcmp(choice.buf, "?")) { prompt_help_cmd(opts->flags & MENU_OPTS_SINGLETON); continue; } /* for a multiple-choice menu, press ENTER (empty) will return back */ if (!(opts->flags & MENU_OPTS_SINGLETON) && !choice.len) break; nr = parse_choice(stuff, opts->flags & MENU_OPTS_SINGLETON, choice.buf, &chosen); if (opts->flags & MENU_OPTS_SINGLETON) { if (nr) break; } else if (opts->flags & MENU_OPTS_IMMEDIATE) { break; } } if (eof) { result = xmalloc(sizeof(int)); *result = EOF; } else { int j = 0; /* * recalculate nr, if return back from menu directly with * default selections. */ if (!nr) { for (i = 0; i < stuff->nr; i++) nr += chosen[i]; } CALLOC_ARRAY(result, st_add(nr, 1)); for (i = 0; i < stuff->nr && j < nr; i++) { if (chosen[i]) result[j++] = i; } result[j] = EOF; } free(chosen); strbuf_release(&choice); return result; } static int clean_cmd(void) { return MENU_RETURN_NO_LOOP; } static int filter_by_patterns_cmd(void) { struct dir_struct dir = DIR_INIT; struct strbuf confirm = STRBUF_INIT; struct pattern_list *pl; int changed = -1, i; for (;;) { struct string_list ignore_list = STRING_LIST_INIT_NODUP; struct string_list_item *item; if (!del_list.nr) break; if (changed) pretty_print_dels(); clean_print_color(CLEAN_COLOR_PROMPT); printf(_("Input ignore patterns>> ")); clean_print_color(CLEAN_COLOR_RESET); if (git_read_line_interactively(&confirm) == EOF) putchar('\n'); /* quit filter_by_pattern mode if press ENTER or Ctrl-D */ if (!confirm.len) break; pl = add_pattern_list(&dir, EXC_CMDL, "manual exclude"); string_list_split_in_place_f(&ignore_list, confirm.buf, " ", -1, STRING_LIST_SPLIT_TRIM); for (i = 0; i < ignore_list.nr; i++) { item = &ignore_list.items[i]; if (!*item->string) continue; add_pattern(item->string, "", 0, pl, -(i+1)); } changed = 0; for_each_string_list_item(item, &del_list) { int dtype = DT_UNKNOWN; if (is_excluded(&dir, the_repository->index, item->string, &dtype)) { *item->string = '\0'; changed++; } } if (changed) { string_list_remove_empty_items(&del_list, 0); } else { clean_print_color(CLEAN_COLOR_ERROR); printf_ln(_("WARNING: Cannot find items matched by: %s"), confirm.buf); clean_print_color(CLEAN_COLOR_RESET); } string_list_clear(&ignore_list, 0); dir_clear(&dir); } strbuf_release(&confirm); return 0; } static int select_by_numbers_cmd(void) { struct menu_opts menu_opts; struct menu_stuff menu_stuff; struct string_list_item *items; int *chosen; int i, j; menu_opts.header = NULL; menu_opts.prompt = N_("Select items to delete"); menu_opts.flags = 0; menu_stuff.type = MENU_STUFF_TYPE_STRING_LIST; menu_stuff.stuff = &del_list; menu_stuff.nr = del_list.nr; chosen = list_and_choose(&menu_opts, &menu_stuff); items = del_list.items; for (i = 0, j = 0; i < del_list.nr; i++) { if (i < chosen[j]) { *(items[i].string) = '\0'; } else if (i == chosen[j]) { /* delete selected item */ j++; continue; } else { /* end of chosen (chosen[j] == EOF), won't delete */ *(items[i].string) = '\0'; } } string_list_remove_empty_items(&del_list, 0); free(chosen); return 0; } static int ask_each_cmd(void) { struct strbuf confirm = STRBUF_INIT; struct strbuf buf = STRBUF_INIT; struct string_list_item *item; const char *qname; int changed = 0, eof = 0; for_each_string_list_item(item, &del_list) { /* Ctrl-D should stop removing files */ if (!eof) { qname = quote_path(item->string, NULL, &buf, 0); /* TRANSLATORS: Make sure to keep [y/N] as is */ printf(_("Remove %s [y/N]? "), qname); if (git_read_line_interactively(&confirm) == EOF) { putchar('\n'); eof = 1; } } if (!confirm.len || strncasecmp(confirm.buf, "yes", confirm.len)) { *item->string = '\0'; changed++; } } if (changed) string_list_remove_empty_items(&del_list, 0); strbuf_release(&buf); strbuf_release(&confirm); return MENU_RETURN_NO_LOOP; } static int quit_cmd(void) { string_list_clear(&del_list, 0); printf(_("Bye.\n")); return MENU_RETURN_NO_LOOP; } static int help_cmd(void) { clean_print_color(CLEAN_COLOR_HELP); printf_ln(_( "clean - start cleaning\n" "filter by pattern - exclude items from deletion\n" "select by numbers - select items to be deleted by numbers\n" "ask each - confirm each deletion (like \"rm -i\")\n" "quit - stop cleaning\n" "help - this screen\n" "? - help for prompt selection" )); clean_print_color(CLEAN_COLOR_RESET); return 0; } static void interactive_main_loop(void) { while (del_list.nr) { struct menu_opts menu_opts; struct menu_stuff menu_stuff; struct menu_item menus[] = { {'c', "clean", 0, clean_cmd}, {'f', "filter by pattern", 0, filter_by_patterns_cmd}, {'s', "select by numbers", 0, select_by_numbers_cmd}, {'a', "ask each", 0, ask_each_cmd}, {'q', "quit", 0, quit_cmd}, {'h', "help", 0, help_cmd}, }; int *chosen; menu_opts.header = N_("*** Commands ***"); menu_opts.prompt = N_("What now"); menu_opts.flags = MENU_OPTS_SINGLETON; menu_stuff.type = MENU_STUFF_TYPE_MENU_ITEM; menu_stuff.stuff = menus; menu_stuff.nr = sizeof(menus) / sizeof(struct menu_item); clean_print_color(CLEAN_COLOR_HEADER); printf_ln(Q_("Would remove the following item:", "Would remove the following items:", del_list.nr)); clean_print_color(CLEAN_COLOR_RESET); pretty_print_dels(); chosen = list_and_choose(&menu_opts, &menu_stuff); if (*chosen != EOF) { int ret; ret = menus[*chosen].fn(); if (ret != MENU_RETURN_NO_LOOP) { FREE_AND_NULL(chosen); if (!del_list.nr) { clean_print_color(CLEAN_COLOR_ERROR); printf_ln(_("No more files to clean, exiting.")); clean_print_color(CLEAN_COLOR_RESET); break; } continue; } } else { quit_cmd(); } FREE_AND_NULL(chosen); break; } } static void correct_untracked_entries(struct dir_struct *dir) { int src, dst, ign; for (src = dst = ign = 0; src < dir->nr; src++) { /* skip paths in ignored[] that cannot be inside entries[src] */ while (ign < dir->ignored_nr && 0 <= cmp_dir_entry(&dir->entries[src], &dir->ignored[ign])) ign++; if (ign < dir->ignored_nr && check_dir_entry_contains(dir->entries[src], dir->ignored[ign])) { /* entries[src] contains an ignored path, so we drop it */ free(dir->entries[src]); } else { struct dir_entry *ent = dir->entries[src++]; /* entries[src] does not contain an ignored path, so we keep it */ dir->entries[dst++] = ent; /* then discard paths in entries[] contained inside entries[src] */ while (src < dir->nr && check_dir_entry_contains(ent, dir->entries[src])) free(dir->entries[src++]); /* compensate for the outer loop's loop control */ src--; } } dir->nr = dst; } int cmd_clean(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int i, res; int dry_run = 0, remove_directories = 0, quiet = 0, ignored = 0; int ignored_only = 0, force = 0, errors = 0, gone = 1; int rm_flags = REMOVE_DIR_KEEP_NESTED_GIT; struct strbuf abs_path = STRBUF_INIT; struct dir_struct dir = DIR_INIT; struct pathspec pathspec; struct strbuf buf = STRBUF_INIT; struct string_list exclude_list = STRING_LIST_INIT_NODUP; struct pattern_list *pl; struct string_list_item *item; const char *qname; struct option options[] = { OPT__QUIET(&quiet, N_("do not print names of files removed")), OPT__DRY_RUN(&dry_run, N_("dry run")), OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE), OPT_BOOL('i', "interactive", &interactive, N_("interactive cleaning")), OPT_BOOL('d', NULL, &remove_directories, N_("remove whole directories")), OPT_CALLBACK_F('e', "exclude", &exclude_list, N_("pattern"), N_("add <pattern> to ignore rules"), PARSE_OPT_NONEG, exclude_cb), OPT_BOOL('x', NULL, &ignored, N_("remove ignored files, too")), OPT_BOOL('X', NULL, &ignored_only, N_("remove only ignored files")), OPT_END() }; repo_config(the_repository, git_clean_config, NULL); argc = parse_options(argc, argv, prefix, options, builtin_clean_usage, 0); if (require_force != 0 && !force && !interactive && !dry_run) die(_("clean.requireForce is true and -f not given: refusing to clean")); if (force > 1) rm_flags = 0; else dir.flags |= DIR_SKIP_NESTED_GIT; dir.flags |= DIR_SHOW_OTHER_DIRECTORIES; if (ignored && ignored_only) die(_("options '%s' and '%s' cannot be used together"), "-x", "-X"); if (!ignored) setup_standard_excludes(&dir); if (ignored_only) dir.flags |= DIR_SHOW_IGNORED; if (argc) { /* * Remaining args implies pathspecs specified, and we should * recurse within those. */ remove_directories = 1; } if (remove_directories && !ignored_only) { /* * We need to know about ignored files too: * * If (ignored), then we will delete ignored files as well. * * If (!ignored), then even though we not are doing * anything with ignored files, we need to know about them * so that we can avoid deleting a directory of untracked * files that also contains an ignored file within it. * * For the (!ignored) case, since we only need to avoid * deleting ignored files, we can set * DIR_SHOW_IGNORED_TOO_MODE_MATCHING in order to avoid * recursing into a directory which is itself ignored. */ dir.flags |= DIR_SHOW_IGNORED_TOO; if (!ignored) dir.flags |= DIR_SHOW_IGNORED_TOO_MODE_MATCHING; /* * Let the fill_directory() machinery know that we aren't * just recursing to collect the ignored files; we want all * the untracked ones so that we can delete them. (Note: * we could also set DIR_KEEP_UNTRACKED_CONTENTS when * ignored_only is true, since DIR_KEEP_UNTRACKED_CONTENTS * only has effect in combination with DIR_SHOW_IGNORED_TOO. It makes * the code clearer to exclude it, though. */ dir.flags |= DIR_KEEP_UNTRACKED_CONTENTS; } prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; if (repo_read_index(the_repository) < 0) die(_("index file corrupt")); pl = add_pattern_list(&dir, EXC_CMDL, "--exclude option"); for (i = 0; i < exclude_list.nr; i++) add_pattern(exclude_list.items[i].string, "", 0, pl, -(i+1)); parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_CWD, prefix, argv); fill_directory(&dir, the_repository->index, &pathspec); correct_untracked_entries(&dir); for (i = 0; i < dir.nr; i++) { struct dir_entry *ent = dir.entries[i]; struct stat st; const char *rel; if (!index_name_is_other(the_repository->index, ent->name, ent->len)) continue; if (lstat(ent->name, &st)) die_errno("Cannot lstat '%s'", ent->name); if (S_ISDIR(st.st_mode) && !remove_directories) continue; rel = relative_path(ent->name, prefix, &buf); string_list_append(&del_list, rel); } dir_clear(&dir); if (interactive && del_list.nr > 0) interactive_main_loop(); for_each_string_list_item(item, &del_list) { struct stat st; strbuf_reset(&abs_path); if (prefix) strbuf_addstr(&abs_path, prefix); strbuf_addstr(&abs_path, item->string); /* * we might have removed this as part of earlier * recursive directory removal, so lstat() here could * fail with ENOENT. */ if (lstat(abs_path.buf, &st)) continue; if (S_ISDIR(st.st_mode)) { if (remove_dirs(&abs_path, prefix, rm_flags, dry_run, quiet, &gone)) errors++; if (gone && !quiet) { qname = quote_path(item->string, NULL, &buf, 0); printf(dry_run ? _(msg_would_remove) : _(msg_remove), qname); } } else { res = dry_run ? 0 : unlink(abs_path.buf); if (res) { int saved_errno = errno; qname = quote_path(item->string, NULL, &buf, 0); errno = saved_errno; warning_errno(_(msg_warn_remove_failed), qname); errors++; } else if (!quiet) { qname = quote_path(item->string, NULL, &buf, 0); printf(dry_run ? _(msg_would_remove) : _(msg_remove), qname); } } } strbuf_release(&abs_path); strbuf_release(&buf); string_list_clear(&del_list, 0); string_list_clear(&exclude_list, 0); clear_pathspec(&pathspec); return (errors != 0); }
c
github
https://github.com/git/git
builtin/clean.c
from __future__ import print_function import os import os.path as op from nose.tools import assert_true, assert_raises from nose.plugins.skip import SkipTest import numpy as np from numpy.testing import assert_array_equal, assert_allclose, assert_equal import warnings from mne.datasets import testing from mne import (read_source_spaces, vertex_to_mni, write_source_spaces, setup_source_space, setup_volume_source_space, add_source_space_distances, read_bem_surfaces) from mne.utils import (_TempDir, requires_fs_or_nibabel, requires_nibabel, requires_freesurfer, run_subprocess, requires_mne, requires_scipy_version, run_tests_if_main, slow_test) from mne.surface import _accumulate_normals, _triangle_neighbors from mne.source_space import _get_mgz_header from mne.externals.six.moves import zip from mne.source_space import (get_volume_labels_from_aseg, SourceSpaces, _compare_source_spaces) from mne.io.constants import FIFF warnings.simplefilter('always') data_path = testing.data_path(download=False) subjects_dir = op.join(data_path, 'subjects') fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif') fname_vol = op.join(subjects_dir, 'sample', 'bem', 'sample-volume-7mm-src.fif') fname_bem = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-1280-bem.fif') base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') fname_small = op.join(base_dir, 'small-src.fif.gz') @testing.requires_testing_data @requires_nibabel(vox2ras_tkr=True) def test_mgz_header(): """Test MGZ header reading""" import nibabel as nib header = _get_mgz_header(fname_mri) mri_hdr = nib.load(fname_mri).get_header() assert_allclose(mri_hdr.get_data_shape(), header['dims']) assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr']) assert_allclose(mri_hdr.get_ras2vox(), header['ras2vox']) @requires_scipy_version('0.11') def test_add_patch_info(): """Test adding patch info to source space""" # let's setup a small source space src = read_source_spaces(fname_small) src_new = read_source_spaces(fname_small) for s in src_new: s['nearest'] = None s['nearest_dist'] = None s['pinfo'] = None # test that no patch info is added for small dist_limit try: add_source_space_distances(src_new, dist_limit=0.00001) except RuntimeError: # what we throw when scipy version is wrong pass else: assert_true(all(s['nearest'] is None for s in src_new)) assert_true(all(s['nearest_dist'] is None for s in src_new)) assert_true(all(s['pinfo'] is None for s in src_new)) # now let's use one that works add_source_space_distances(src_new) for s1, s2 in zip(src, src_new): assert_array_equal(s1['nearest'], s2['nearest']) assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7) assert_equal(len(s1['pinfo']), len(s2['pinfo'])) for p1, p2 in zip(s1['pinfo'], s2['pinfo']): assert_array_equal(p1, p2) @testing.requires_testing_data @requires_scipy_version('0.11') def test_add_source_space_distances_limited(): """Test adding distances to source space with a dist_limit""" tempdir = _TempDir() src = read_source_spaces(fname) src_new = read_source_spaces(fname) del src_new[0]['dist'] del src_new[1]['dist'] n_do = 200 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() out_name = op.join(tempdir, 'temp-src.fif') try: add_source_space_distances(src_new, dist_limit=0.007) except RuntimeError: # what we throw when scipy version is wrong raise SkipTest('dist_limit requires scipy > 0.13') write_source_spaces(out_name, src_new) src_new = read_source_spaces(out_name) for so, sn in zip(src, src_new): assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32)) assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32)) do = so['dist'] dn = sn['dist'] # clean out distances > 0.007 in C code do.data[do.data > 0.007] = 0 do.eliminate_zeros() # make sure we have some comparable distances assert_true(np.sum(do.data < 0.007) > 400) # do comparison over the region computed d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]] assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6) @slow_test @testing.requires_testing_data @requires_scipy_version('0.11') def test_add_source_space_distances(): """Test adding distances to source space""" tempdir = _TempDir() src = read_source_spaces(fname) src_new = read_source_spaces(fname) del src_new[0]['dist'] del src_new[1]['dist'] n_do = 20 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() out_name = op.join(tempdir, 'temp-src.fif') add_source_space_distances(src_new) write_source_spaces(out_name, src_new) src_new = read_source_spaces(out_name) # iterate over both hemispheres for so, sn in zip(src, src_new): v = so['vertno'][:n_do] assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32)) assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32)) do = so['dist'] dn = sn['dist'] # clean out distances > 0.007 in C code (some residual), and Python ds = list() for d in [do, dn]: d.data[d.data > 0.007] = 0 d = d[v][:, v] d.eliminate_zeros() ds.append(d) # make sure we actually calculated some comparable distances assert_true(np.sum(ds[0].data < 0.007) > 10) # do comparison d = ds[0] - ds[1] assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9) @testing.requires_testing_data @requires_mne def test_discrete_source_space(): """Test setting up (and reading/writing) discrete source spaces """ tempdir = _TempDir() src = read_source_spaces(fname) v = src[0]['vertno'] # let's make a discrete version with the C code, and with ours temp_name = op.join(tempdir, 'temp-src.fif') try: # save temp_pos = op.join(tempdir, 'temp-pos.txt') np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]]) # let's try the spherical one (no bem or surf supplied) run_subprocess(['mne_volume_source_space', '--meters', '--pos', temp_pos, '--src', temp_name]) src_c = read_source_spaces(temp_name) pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v]) src_new = setup_volume_source_space('sample', None, pos=pos_dict, subjects_dir=subjects_dir) _compare_source_spaces(src_c, src_new, mode='approx') assert_allclose(src[0]['rr'][v], src_new[0]['rr'], rtol=1e-3, atol=1e-6) assert_allclose(src[0]['nn'][v], src_new[0]['nn'], rtol=1e-3, atol=1e-6) # now do writing write_source_spaces(temp_name, src_c) src_c2 = read_source_spaces(temp_name) _compare_source_spaces(src_c, src_c2) # now do MRI assert_raises(ValueError, setup_volume_source_space, 'sample', pos=pos_dict, mri=fname_mri) finally: if op.isfile(temp_name): os.remove(temp_name) @slow_test @testing.requires_testing_data def test_volume_source_space(): """Test setting up volume source spaces """ tempdir = _TempDir() src = read_source_spaces(fname_vol) temp_name = op.join(tempdir, 'temp-src.fif') surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN) surf['rr'] *= 1e3 # convert to mm # The one in the testing dataset (uses bem as bounds) for bem, surf in zip((fname_bem, None), (None, surf)): src_new = setup_volume_source_space('sample', temp_name, pos=7.0, bem=bem, surface=surf, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx') del src_new src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx') assert_raises(IOError, setup_volume_source_space, 'sample', temp_name, pos=7.0, bem=None, surface='foo', # bad surf mri=fname_mri, subjects_dir=subjects_dir) @testing.requires_testing_data @requires_mne def test_other_volume_source_spaces(): """Test setting up other volume source spaces""" # these are split off because they require the MNE tools, and # Travis doesn't seem to like them # let's try the spherical one (no bem or surf supplied) tempdir = _TempDir() temp_name = op.join(tempdir, 'temp-src.fif') run_subprocess(['mne_volume_source_space', '--grid', '7.0', '--src', temp_name, '--mri', fname_mri]) src = read_source_spaces(temp_name) src_new = setup_volume_source_space('sample', temp_name, pos=7.0, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx') del src del src_new assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name, pos=7.0, sphere=[1., 1.], mri=fname_mri, # bad sphere subjects_dir=subjects_dir) # now without MRI argument, it should give an error when we try # to read it run_subprocess(['mne_volume_source_space', '--grid', '7.0', '--src', temp_name]) assert_raises(ValueError, read_source_spaces, temp_name) @testing.requires_testing_data def test_triangle_neighbors(): """Test efficient vertex neighboring triangles for surfaces""" this = read_source_spaces(fname)[0] this['neighbor_tri'] = [list() for _ in range(this['np'])] for p in range(this['ntri']): verts = this['tris'][p] this['neighbor_tri'][verts[0]].append(p) this['neighbor_tri'][verts[1]].append(p) this['neighbor_tri'][verts[2]].append(p) this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']] neighbor_tri = _triangle_neighbors(this['tris'], this['np']) assert_true(np.array_equal(nt1, nt2) for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri'])) def test_accumulate_normals(): """Test efficient normal accumulation for surfaces""" # set up comparison rng = np.random.RandomState(0) n_pts = int(1.6e5) # approx number in sample source space n_tris = int(3.2e5) # use all positive to make a worst-case for cumulative summation # (real "nn" vectors will have both positive and negative values) tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int) tris = np.c_[tris, tris + 1, tris + 2] tri_nn = rng.rand(n_tris, 3) this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn) # cut-and-paste from original code in surface.py: # Find neighboring triangles and accumulate vertex normals this['nn'] = np.zeros((this['np'], 3)) for p in range(this['ntri']): # vertex normals verts = this['tris'][p] this['nn'][verts, :] += this['tri_nn'][p, :] nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np']) # the moment of truth (or reckoning) assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7) @slow_test @testing.requires_testing_data def test_setup_source_space(): """Test setting up ico, oct, and all source spaces """ tempdir = _TempDir() fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') # first lets test some input params assert_raises(ValueError, setup_source_space, 'sample', spacing='oct', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='octo', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='alls', add_dist=False) assert_raises(IOError, setup_source_space, 'sample', spacing='oct6', subjects_dir=subjects_dir, add_dist=False) # ico 5 (fsaverage) - write to temp file src = read_source_spaces(fname_ico) temp_name = op.join(tempdir, 'temp-src.fif') with warnings.catch_warnings(record=True): # sklearn equiv neighbors warnings.simplefilter('always') src_new = setup_source_space('fsaverage', temp_name, spacing='ico5', subjects_dir=subjects_dir, add_dist=False, overwrite=True) _compare_source_spaces(src, src_new, mode='approx') assert_array_equal(src[0]['vertno'], np.arange(10242)) assert_array_equal(src[1]['vertno'], np.arange(10242)) # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) temp_name = op.join(tempdir, 'temp-src.fif') with warnings.catch_warnings(record=True): # sklearn equiv neighbors warnings.simplefilter('always') src_new = setup_source_space('sample', temp_name, spacing='oct6', subjects_dir=subjects_dir, overwrite=True, add_dist=False) _compare_source_spaces(src, src_new, mode='approx') src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx') # all source points - no file writing src_new = setup_source_space('sample', None, spacing='all', subjects_dir=subjects_dir, add_dist=False) assert_true(src_new[0]['nuse'] == len(src_new[0]['rr'])) assert_true(src_new[1]['nuse'] == len(src_new[1]['rr'])) # dense source space to hit surf['inuse'] lines of _create_surf_spacing assert_raises(RuntimeError, setup_source_space, 'sample', None, spacing='ico6', subjects_dir=subjects_dir, add_dist=False) @testing.requires_testing_data def test_read_source_spaces(): """Test reading of source space meshes """ src = read_source_spaces(fname, patch_stats=True) # 3D source space lh_points = src[0]['rr'] lh_faces = src[0]['tris'] lh_use_faces = src[0]['use_tris'] rh_points = src[1]['rr'] rh_faces = src[1]['tris'] rh_use_faces = src[1]['use_tris'] assert_true(lh_faces.min() == 0) assert_true(lh_faces.max() == lh_points.shape[0] - 1) assert_true(lh_use_faces.min() >= 0) assert_true(lh_use_faces.max() <= lh_points.shape[0] - 1) assert_true(rh_faces.min() == 0) assert_true(rh_faces.max() == rh_points.shape[0] - 1) assert_true(rh_use_faces.min() >= 0) assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1) @slow_test @testing.requires_testing_data def test_write_source_space(): """Test reading and writing of source spaces """ tempdir = _TempDir() src0 = read_source_spaces(fname, patch_stats=False) write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0) src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'), patch_stats=False) _compare_source_spaces(src0, src1) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') src_badname = op.join(tempdir, 'test-bad-name.fif.gz') write_source_spaces(src_badname, src0) read_source_spaces(src_badname) assert_equal(len(w), 2) @testing.requires_testing_data @requires_fs_or_nibabel def test_vertex_to_mni(): """Test conversion of vertices to MNI coordinates """ # obtained using "tksurfer (sample) (l/r)h white" vertices = [100960, 7620, 150549, 96761] coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36], [-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]]) hemis = [0, 0, 0, 1] coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir) # less than 1mm error assert_allclose(coords, coords_2, atol=1.0) @testing.requires_testing_data @requires_freesurfer @requires_nibabel() def test_vertex_to_mni_fs_nibabel(): """Test equivalence of vert_to_mni for nibabel and freesurfer """ n_check = 1000 subject = 'sample' vertices = np.random.randint(0, 100000, n_check) hemis = np.random.randint(0, 1, n_check) coords = vertex_to_mni(vertices, hemis, subject, subjects_dir, 'nibabel') coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir, 'freesurfer') # less than 0.1 mm error assert_allclose(coords, coords_2, atol=0.1) @testing.requires_testing_data @requires_freesurfer @requires_nibabel() def test_get_volume_label_names(): """Test reading volume label names """ aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') label_names = get_volume_labels_from_aseg(aseg_fname) assert_equal(label_names.count('Brain-Stem'), 1) @testing.requires_testing_data @requires_freesurfer @requires_nibabel() def test_source_space_from_label(): """Test generating a source space from volume label """ tempdir = _TempDir() aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') label_names = get_volume_labels_from_aseg(aseg_fname) volume_label = label_names[int(np.random.rand() * len(label_names))] # Test pos as dict pos = dict() assert_raises(ValueError, setup_volume_source_space, 'sample', pos=pos, volume_label=volume_label, mri=aseg_fname) # Test no mri provided assert_raises(RuntimeError, setup_volume_source_space, 'sample', mri=None, volume_label=volume_label) # Test invalid volume label assert_raises(ValueError, setup_volume_source_space, 'sample', volume_label='Hello World!', mri=aseg_fname) src = setup_volume_source_space('sample', subjects_dir=subjects_dir, volume_label=volume_label, mri=aseg_fname, add_interpolator=False) assert_equal(volume_label, src[0]['seg_name']) # test reading and writing out_name = op.join(tempdir, 'temp-src.fif') write_source_spaces(out_name, src) src_from_file = read_source_spaces(out_name) _compare_source_spaces(src, src_from_file, mode='approx') @testing.requires_testing_data @requires_freesurfer @requires_nibabel() def test_combine_source_spaces(): """Test combining source spaces """ tempdir = _TempDir() aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') label_names = get_volume_labels_from_aseg(aseg_fname) volume_labels = [label_names[int(np.random.rand() * len(label_names))] for ii in range(2)] # get a surface source space (no need to test creation here) srf = read_source_spaces(fname, patch_stats=False) # setup 2 volume source spaces vol = setup_volume_source_space('sample', subjects_dir=subjects_dir, volume_label=volume_labels[0], mri=aseg_fname, add_interpolator=False) # setup a discrete source space rr = np.random.randint(0, 20, (100, 3)) * 1e-3 nn = np.zeros(rr.shape) nn[:, -1] = 1 pos = {'rr': rr, 'nn': nn} disc = setup_volume_source_space('sample', subjects_dir=subjects_dir, pos=pos, verbose='error') # combine source spaces src = srf + vol + disc # test addition of source spaces assert_equal(type(src), SourceSpaces) assert_equal(len(src), 4) # test reading and writing src_out_name = op.join(tempdir, 'temp-src.fif') src.save(src_out_name) src_from_file = read_source_spaces(src_out_name) _compare_source_spaces(src, src_from_file, mode='approx') # test that all source spaces are in MRI coordinates coord_frames = np.array([s['coord_frame'] for s in src]) assert_true((coord_frames == FIFF.FIFFV_COORD_MRI).all()) # test errors for export_volume image_fname = op.join(tempdir, 'temp-image.mgz') # source spaces with no volume assert_raises(ValueError, srf.export_volume, image_fname, verbose='error') # unrecognized source type disc2 = disc.copy() disc2[0]['type'] = 'kitty' src_unrecognized = src + disc2 assert_raises(ValueError, src_unrecognized.export_volume, image_fname, verbose='error') # unrecognized file type bad_image_fname = op.join(tempdir, 'temp-image.png') assert_raises(ValueError, src.export_volume, bad_image_fname, verbose='error') # mixed coordinate frames disc3 = disc.copy() disc3[0]['coord_frame'] = 10 src_mixed_coord = src + disc3 assert_raises(ValueError, src_mixed_coord.export_volume, image_fname, verbose='error') run_tests_if_main() # The following code was used to generate small-src.fif.gz. # Unfortunately the C code bombs when trying to add source space distances, # possibly due to incomplete "faking" of a smaller surface on our part here. """ # -*- coding: utf-8 -*- import os import numpy as np import mne data_path = mne.datasets.sample.data_path() src = mne.setup_source_space('sample', fname=None, spacing='oct5') hemis = ['lh', 'rh'] fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis] vs = list() for s, fname in zip(src, fnames): coords = s['rr'][s['vertno']] vs.append(s['vertno']) idx = -1 * np.ones(len(s['rr'])) idx[s['vertno']] = np.arange(s['nuse']) faces = s['use_tris'] faces = idx[faces] mne.write_surface(fname, coords, faces) # we need to move sphere surfaces spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis] for s in spheres: os.rename(s, s + '.bak') try: for s, v in zip(spheres, vs): coords, faces = mne.read_surface(s + '.bak') coords = coords[v] mne.write_surface(s, coords, faces) src = mne.setup_source_space('sample', fname=None, spacing='oct4', surface='decimated') finally: for s in spheres: os.rename(s + '.bak', s) fname = 'small-src.fif' fname_gz = fname + '.gz' mne.write_source_spaces(fname, src) mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname, '--srcp', fname]) mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname)) """
unknown
codeparrot/codeparrot-clean
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_DATA_TEXT_LINE_DATASET_OP_H_ #define TENSORFLOW_CORE_KERNELS_DATA_TEXT_LINE_DATASET_OP_H_ #include "tensorflow/core/framework/dataset.h" namespace tensorflow { namespace data { class TextLineDatasetOp : public DatasetOpKernel { public: static constexpr const char* const kDatasetType = "TextLine"; static constexpr const char* const kFileNames = "filenames"; static constexpr const char* const kCompressionType = "compression_type"; static constexpr const char* const kBufferSize = "buffer_size"; explicit TextLineDatasetOp(OpKernelConstruction* ctx); protected: void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override; private: class Dataset; }; } // namespace data } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_DATA_TEXT_LINE_DATASET_OP_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/kernels/data/text_line_dataset_op.h
# -*- coding: utf-8 -*- import pytest from cookiecutter import exceptions, vcs @pytest.mark.parametrize('repo_url, exp_repo_type, exp_repo_url', [ ( 'git+https://github.com/pytest-dev/cookiecutter-pytest-plugin.git', 'git', 'https://github.com/pytest-dev/cookiecutter-pytest-plugin.git' ), ( 'hg+https://bitbucket.org/foo/bar.hg', 'hg', 'https://bitbucket.org/foo/bar.hg' ), ( 'https://github.com/pytest-dev/cookiecutter-pytest-plugin.git', 'git', 'https://github.com/pytest-dev/cookiecutter-pytest-plugin.git' ), ( 'https://bitbucket.org/foo/bar.hg', 'hg', 'https://bitbucket.org/foo/bar.hg' ), ( 'https://github.com/audreyr/cookiecutter-pypackage.git', 'git', 'https://github.com/audreyr/cookiecutter-pypackage.git', ), ( 'https://github.com/audreyr/cookiecutter-pypackage', 'git', 'https://github.com/audreyr/cookiecutter-pypackage', ), ( 'git@gitorious.org:cookiecutter-gitorious/cookiecutter-gitorious.git', 'git', 'git@gitorious.org:cookiecutter-gitorious/cookiecutter-gitorious.git', ), ( 'https://audreyr@bitbucket.org/audreyr/cookiecutter-bitbucket', 'hg', 'https://audreyr@bitbucket.org/audreyr/cookiecutter-bitbucket', ) ]) def test_identify_known_repo(repo_url, exp_repo_type, exp_repo_url): assert vcs.identify_repo(repo_url) == (exp_repo_type, exp_repo_url) @pytest.fixture(params=[ 'foo+git', # uses explicit identifier with 'git' in the wrong place 'foo+hg', # uses explicit identifier with 'hg' in the wrong place 'foo+bar', # uses explicit identifier with neither 'git' nor 'hg' 'foobar', # no identifier but neither 'git' nor 'bitbucket' in url 'http://norepotypespecified.com' ]) def unknown_repo_type_url(request): return request.param def test_identify_raise_on_unknown_repo(unknown_repo_type_url): with pytest.raises(exceptions.UnknownRepoType): vcs.identify_repo(unknown_repo_type_url)
unknown
codeparrot/codeparrot-clean
from cornice import Service from daybed.backends.exceptions import CredentialsAlreadyExist from daybed.tokens import get_hawk_credentials, hmac_digest from daybed.views.errors import forbidden_view tokens = Service(name='tokens', path='/tokens', description='Tokens') token = Service(name='token', path='/token', description='Token') @tokens.post(permission='post_token') def post_tokens(request): """Creates a new token and store it""" # If we have an authorization header with the Basic or Token realm # Use it to derive the key session_token = None if request.authorization and \ request.authorization[0] in ["Basic", "Token"]: session_token = hmac_digest(request.registry.tokenHmacKey, "%s %s" % request.authorization[:2]) token, credentials = get_hawk_credentials(session_token) try: request.db.store_credentials(token, credentials) except CredentialsAlreadyExist: request.response.status = "200 OK" else: request.response.status = "201 Created" return { 'token': token, 'credentials': credentials } @token.get() def get_token(request): if request.credentials_id: token = request.db.get_token(request.credentials_id) _, credentials = get_hawk_credentials(token) return { 'token': token, 'credentials': credentials } else: return forbidden_view(request)
unknown
codeparrot/codeparrot-clean
#!/bin/sh -eu # SPDX-License-Identifier: GPL-2.0 [ ! -x "$(command -v "$1")" ] && exit 1 tmp_file=$(mktemp) trap "rm -f $tmp_file" EXIT cat << EOF >$tmp_file static inline int u(const int *q) { __typeof_unqual__(*q) v = *q; return v; } EOF # sparse happily exits with 0 on error so validate # there is none on stderr. Use awk as grep is a pain with sh -e $@ $tmp_file 2>&1 | awk -v c=1 '/error/{c=0}END{print c}'
unknown
github
https://github.com/torvalds/linux
scripts/checker-valid.sh
#ifndef HAVE_DES_TABLES /* Initial key schedule permutation */ static const C_block PC1ROT[64/CHUNKBITS][1<<CHUNKBITS] = { { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 1, 0, 0, 0, 0, 0,}}, {{ 0, 0, 1, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 1, 1, 0, 0, 0, 0,}}, {{ 0, 0, 1, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 1, 0, 0, 16, 0, 0,}}, {{ 0, 0, 1, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 1, 0, 16, 0, 0,}}, {{ 0, 0, 0, 1, 0, 16, 0, 0,}}, {{ 0, 0, 1, 1, 0, 16, 0, 0,}}, {{ 0, 0, 1, 1, 0, 16, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0,128, 0, 0, 0,}}, {{ 0, 0, 4, 0, 0, 0, 0, 0,}}, {{ 0, 0, 4, 0,128, 0, 0, 0,}}, {{ 0, 16, 0, 0, 0, 0, 0, 0,}}, {{ 0, 16, 0, 0,128, 0, 0, 0,}}, {{ 0, 16, 4, 0, 0, 0, 0, 0,}}, {{ 0, 16, 4, 0,128, 0, 0, 0,}}, {{ 0, 0, 0, 8, 0, 0, 0, 0,}}, {{ 0, 0, 0, 8,128, 0, 0, 0,}}, {{ 0, 0, 4, 8, 0, 0, 0, 0,}}, {{ 0, 0, 4, 8,128, 0, 0, 0,}}, {{ 0, 16, 0, 8, 0, 0, 0, 0,}}, {{ 0, 16, 0, 8,128, 0, 0, 0,}}, {{ 0, 16, 4, 8, 0, 0, 0, 0,}}, {{ 0, 16, 4, 8,128, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 64, 0,}}, {{ 0, 0, 0, 0, 0, 0, 64, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0, 64, 8,}}, {{ 0, 0, 0, 0, 0, 0, 64, 8,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 64, 16,}}, {{ 0, 0, 0, 0, 0, 0, 64, 16,}}, {{ 0, 0, 0, 0, 0, 0, 0, 24,}}, {{ 0, 0, 0, 0, 0, 0, 0, 24,}}, {{ 0, 0, 0, 0, 0, 0, 64, 24,}}, {{ 0, 0, 0, 0, 0, 0, 64, 24,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 2, 0, 0, 0, 0,}}, {{ 0, 1, 0, 0, 0, 0, 0, 0,}}, {{ 0, 1, 0, 2, 0, 0, 0, 0,}}, {{ 4, 0, 0, 0, 0, 0, 0, 0,}}, {{ 4, 0, 0, 2, 0, 0, 0, 0,}}, {{ 4, 1, 0, 0, 0, 0, 0, 0,}}, {{ 4, 1, 0, 2, 0, 0, 0, 0,}}, {{ 0, 32, 0, 0, 0, 0, 0, 0,}}, {{ 0, 32, 0, 2, 0, 0, 0, 0,}}, {{ 0, 33, 0, 0, 0, 0, 0, 0,}}, {{ 0, 33, 0, 2, 0, 0, 0, 0,}}, {{ 4, 32, 0, 0, 0, 0, 0, 0,}}, {{ 4, 32, 0, 2, 0, 0, 0, 0,}}, {{ 4, 33, 0, 0, 0, 0, 0, 0,}}, {{ 4, 33, 0, 2, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 64, 0, 0,}}, {{ 0, 0, 0, 0, 0, 64, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 64, 0, 0,}}, {{ 0, 0, 0, 0, 4, 64, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 8, 0,}}, {{ 0, 0, 0, 0, 0, 0, 8, 0,}}, {{ 0, 0, 0, 0, 0, 64, 8, 0,}}, {{ 0, 0, 0, 0, 0, 64, 8, 0,}}, {{ 0, 0, 0, 0, 4, 0, 8, 0,}}, {{ 0, 0, 0, 0, 4, 0, 8, 0,}}, {{ 0, 0, 0, 0, 4, 64, 8, 0,}}, {{ 0, 0, 0, 0, 4, 64, 8, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0,128, 0,}}, {{ 0, 64, 0, 0, 0, 0, 0, 0,}}, {{ 0, 64, 0, 0, 0, 0,128, 0,}}, {{ 0, 0, 0, 64, 0, 0, 0, 0,}}, {{ 0, 0, 0, 64, 0, 0,128, 0,}}, {{ 0, 64, 0, 64, 0, 0, 0, 0,}}, {{ 0, 64, 0, 64, 0, 0,128, 0,}}, {{128, 0, 0, 0, 0, 0, 0, 0,}}, {{128, 0, 0, 0, 0, 0,128, 0,}}, {{128, 64, 0, 0, 0, 0, 0, 0,}}, {{128, 64, 0, 0, 0, 0,128, 0,}}, {{128, 0, 0, 64, 0, 0, 0, 0,}}, {{128, 0, 0, 64, 0, 0,128, 0,}}, {{128, 64, 0, 64, 0, 0, 0, 0,}}, {{128, 64, 0, 64, 0, 0,128, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0,128,}}, {{ 0, 0, 0, 0, 0, 0, 0,128,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0,128,}}, {{ 0, 0, 0, 0, 0, 8, 0,128,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0,128,}}, {{ 0, 0, 0, 0, 0,128, 0,128,}}, {{ 0, 0, 0, 0, 0,136, 0, 0,}}, {{ 0, 0, 0, 0, 0,136, 0, 0,}}, {{ 0, 0, 0, 0, 0,136, 0,128,}}, {{ 0, 0, 0, 0, 0,136, 0,128,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 8, 0, 0, 0,}}, {{ 0, 0, 0, 32, 0, 0, 0, 0,}}, {{ 0, 0, 0, 32, 8, 0, 0, 0,}}, {{ 0, 0, 16, 0, 0, 0, 0, 0,}}, {{ 0, 0, 16, 0, 8, 0, 0, 0,}}, {{ 0, 0, 16, 32, 0, 0, 0, 0,}}, {{ 0, 0, 16, 32, 8, 0, 0, 0,}}, {{ 0, 0, 32, 0, 0, 0, 0, 0,}}, {{ 0, 0, 32, 0, 8, 0, 0, 0,}}, {{ 0, 0, 32, 32, 0, 0, 0, 0,}}, {{ 0, 0, 32, 32, 8, 0, 0, 0,}}, {{ 0, 0, 48, 0, 0, 0, 0, 0,}}, {{ 0, 0, 48, 0, 8, 0, 0, 0,}}, {{ 0, 0, 48, 32, 0, 0, 0, 0,}}, {{ 0, 0, 48, 32, 8, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 16, 0, 0, 0,}}, {{ 0, 0, 0, 0, 16, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 16, 0, 16, 0,}}, {{ 0, 0, 0, 0, 16, 0, 16, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 80, 0, 0, 0,}}, {{ 0, 0, 0, 0, 80, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 16, 0,}}, {{ 0, 0, 0, 0, 64, 0, 16, 0,}}, {{ 0, 0, 0, 0, 80, 0, 16, 0,}}, {{ 0, 0, 0, 0, 80, 0, 16, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 16, 0, 0, 0, 0,}}, {{ 0, 0, 8, 0, 0, 0, 0, 0,}}, {{ 0, 0, 8, 16, 0, 0, 0, 0,}}, {{ 16, 0, 0, 0, 0, 0, 0, 0,}}, {{ 16, 0, 0, 16, 0, 0, 0, 0,}}, {{ 16, 0, 8, 0, 0, 0, 0, 0,}}, {{ 16, 0, 8, 16, 0, 0, 0, 0,}}, {{ 0, 4, 0, 0, 0, 0, 0, 0,}}, {{ 0, 4, 0, 16, 0, 0, 0, 0,}}, {{ 0, 4, 8, 0, 0, 0, 0, 0,}}, {{ 0, 4, 8, 16, 0, 0, 0, 0,}}, {{ 16, 4, 0, 0, 0, 0, 0, 0,}}, {{ 16, 4, 0, 16, 0, 0, 0, 0,}}, {{ 16, 4, 8, 0, 0, 0, 0, 0,}}, {{ 16, 4, 8, 16, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 4, 0, 0,}}, {{ 0, 0, 0, 0, 0, 4, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 4, 0, 0,}}, {{ 0, 0, 2, 0, 0, 4, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 4,}}, {{ 0, 0, 0, 0, 0, 0, 0, 4,}}, {{ 0, 0, 0, 0, 0, 4, 0, 4,}}, {{ 0, 0, 0, 0, 0, 4, 0, 4,}}, {{ 0, 0, 2, 0, 0, 0, 0, 4,}}, {{ 0, 0, 2, 0, 0, 0, 0, 4,}}, {{ 0, 0, 2, 0, 0, 4, 0, 4,}}, {{ 0, 0, 2, 0, 0, 4, 0, 4,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 64, 0, 0, 0, 0, 0,}}, {{ 2, 0, 0, 0, 0, 0, 0, 0,}}, {{ 2, 0, 64, 0, 0, 0, 0, 0,}}, {{ 0,128, 0, 0, 0, 0, 0, 0,}}, {{ 0,128, 64, 0, 0, 0, 0, 0,}}, {{ 2,128, 0, 0, 0, 0, 0, 0,}}, {{ 2,128, 64, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0,128, 0, 0, 0, 0,}}, {{ 0, 0, 64,128, 0, 0, 0, 0,}}, {{ 2, 0, 0,128, 0, 0, 0, 0,}}, {{ 2, 0, 64,128, 0, 0, 0, 0,}}, {{ 0,128, 0,128, 0, 0, 0, 0,}}, {{ 0,128, 64,128, 0, 0, 0, 0,}}, {{ 2,128, 0,128, 0, 0, 0, 0,}}, {{ 2,128, 64,128, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 32, 0, 0, 0,}}, {{ 0, 0, 0, 0, 32, 0, 0, 0,}}, {{ 0, 0, 0, 0, 32, 0, 0, 64,}}, {{ 0, 0, 0, 0, 32, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 64,}}, {{ 0, 0, 0, 0, 0, 32, 0, 64,}}, {{ 0, 0, 0, 0, 32, 32, 0, 0,}}, {{ 0, 0, 0, 0, 32, 32, 0, 0,}}, {{ 0, 0, 0, 0, 32, 32, 0, 64,}}, {{ 0, 0, 0, 0, 32, 32, 0, 64,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 2, 0, 0, 0, 0, 0, 0,}}, {{ 8, 0, 0, 0, 0, 0, 0, 0,}}, {{ 8, 2, 0, 0, 0, 0, 0, 0,}}, {{ 1, 0, 0, 0, 0, 0, 0, 0,}}, {{ 1, 2, 0, 0, 0, 0, 0, 0,}}, {{ 9, 0, 0, 0, 0, 0, 0, 0,}}, {{ 9, 2, 0, 0, 0, 0, 0, 0,}}, {{ 64, 0, 0, 0, 0, 0, 0, 0,}}, {{ 64, 2, 0, 0, 0, 0, 0, 0,}}, {{ 72, 0, 0, 0, 0, 0, 0, 0,}}, {{ 72, 2, 0, 0, 0, 0, 0, 0,}}, {{ 65, 0, 0, 0, 0, 0, 0, 0,}}, {{ 65, 2, 0, 0, 0, 0, 0, 0,}}, {{ 73, 0, 0, 0, 0, 0, 0, 0,}}, {{ 73, 2, 0, 0, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 32,}}, {{ 0, 0, 0, 0, 0, 0, 0, 32,}}, {{ 0, 0, 0, 0, 0, 0, 32, 32,}}, {{ 0, 0, 0, 0, 0, 0, 32, 32,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 36, 0,}}, {{ 0, 0, 0, 0, 0, 0, 36, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 32,}}, {{ 0, 0, 0, 0, 0, 0, 4, 32,}}, {{ 0, 0, 0, 0, 0, 0, 36, 32,}}, {{ 0, 0, 0, 0, 0, 0, 36, 32,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 32, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 4, 0, 0, 0, 0,}}, {{ 32, 0, 0, 4, 0, 0, 0, 0,}}, {{ 0, 0,128, 0, 0, 0, 0, 0,}}, {{ 32, 0,128, 0, 0, 0, 0, 0,}}, {{ 0, 0,128, 4, 0, 0, 0, 0,}}, {{ 32, 0,128, 4, 0, 0, 0, 0,}}, {{ 0, 8, 0, 0, 0, 0, 0, 0,}}, {{ 32, 8, 0, 0, 0, 0, 0, 0,}}, {{ 0, 8, 0, 4, 0, 0, 0, 0,}}, {{ 32, 8, 0, 4, 0, 0, 0, 0,}}, {{ 0, 8,128, 0, 0, 0, 0, 0,}}, {{ 32, 8,128, 0, 0, 0, 0, 0,}}, {{ 0, 8,128, 4, 0, 0, 0, 0,}}, {{ 32, 8,128, 4, 0, 0, 0, 0,}}, }, }; /* Subsequent key schedule rotation permutations */ static const C_block PC2ROT[2][64/CHUNKBITS][1<<CHUNKBITS] = { { { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0,128, 0, 0, 0, 0, 0,}}, {{ 8, 0, 0, 0, 0, 0, 0, 0,}}, {{ 8, 0,128, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 64, 0, 0, 0, 0,}}, {{ 0, 0,128, 64, 0, 0, 0, 0,}}, {{ 8, 0, 0, 64, 0, 0, 0, 0,}}, {{ 8, 0,128, 64, 0, 0, 0, 0,}}, {{ 0, 0, 0, 4, 0, 0, 0, 0,}}, {{ 0, 0,128, 4, 0, 0, 0, 0,}}, {{ 8, 0, 0, 4, 0, 0, 0, 0,}}, {{ 8, 0,128, 4, 0, 0, 0, 0,}}, {{ 0, 0, 0, 68, 0, 0, 0, 0,}}, {{ 0, 0,128, 68, 0, 0, 0, 0,}}, {{ 8, 0, 0, 68, 0, 0, 0, 0,}}, {{ 8, 0,128, 68, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0,128, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 4, 0, 0, 0, 0, 0,}}, {{ 0,128, 4, 0, 0, 0, 0, 0,}}, {{ 0, 8, 0, 0, 0, 0, 0, 0,}}, {{ 0,136, 0, 0, 0, 0, 0, 0,}}, {{ 0, 8, 4, 0, 0, 0, 0, 0,}}, {{ 0,136, 4, 0, 0, 0, 0, 0,}}, {{ 0, 0, 32, 0, 0, 0, 0, 0,}}, {{ 0,128, 32, 0, 0, 0, 0, 0,}}, {{ 0, 0, 36, 0, 0, 0, 0, 0,}}, {{ 0,128, 36, 0, 0, 0, 0, 0,}}, {{ 0, 8, 32, 0, 0, 0, 0, 0,}}, {{ 0,136, 32, 0, 0, 0, 0, 0,}}, {{ 0, 8, 36, 0, 0, 0, 0, 0,}}, {{ 0,136, 36, 0, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 64, 0, 0, 0, 0, 0, 0,}}, {{ 32, 0, 0, 0, 0, 0, 0, 0,}}, {{ 32, 64, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0,128, 0, 0, 0, 0,}}, {{ 0, 64, 0,128, 0, 0, 0, 0,}}, {{ 32, 0, 0,128, 0, 0, 0, 0,}}, {{ 32, 64, 0,128, 0, 0, 0, 0,}}, {{ 0, 0, 0, 16, 0, 0, 0, 0,}}, {{ 0, 64, 0, 16, 0, 0, 0, 0,}}, {{ 32, 0, 0, 16, 0, 0, 0, 0,}}, {{ 32, 64, 0, 16, 0, 0, 0, 0,}}, {{ 0, 0, 0,144, 0, 0, 0, 0,}}, {{ 0, 64, 0,144, 0, 0, 0, 0,}}, {{ 32, 0, 0,144, 0, 0, 0, 0,}}, {{ 32, 64, 0,144, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 4, 0, 0, 0, 0, 0, 0, 0,}}, {{128, 0, 0, 0, 0, 0, 0, 0,}}, {{132, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 32, 0, 0, 0, 0,}}, {{ 4, 0, 0, 32, 0, 0, 0, 0,}}, {{128, 0, 0, 32, 0, 0, 0, 0,}}, {{132, 0, 0, 32, 0, 0, 0, 0,}}, {{ 1, 0, 0, 0, 0, 0, 0, 0,}}, {{ 5, 0, 0, 0, 0, 0, 0, 0,}}, {{129, 0, 0, 0, 0, 0, 0, 0,}}, {{133, 0, 0, 0, 0, 0, 0, 0,}}, {{ 1, 0, 0, 32, 0, 0, 0, 0,}}, {{ 5, 0, 0, 32, 0, 0, 0, 0,}}, {{129, 0, 0, 32, 0, 0, 0, 0,}}, {{133, 0, 0, 32, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 64, 0,}}, {{ 0, 0, 0, 0, 32, 0, 0, 0,}}, {{ 0, 0, 0, 0, 32, 0, 64, 0,}}, {{ 0, 1, 0, 0, 0, 0, 0, 0,}}, {{ 0, 1, 0, 0, 0, 0, 64, 0,}}, {{ 0, 1, 0, 0, 32, 0, 0, 0,}}, {{ 0, 1, 0, 0, 32, 0, 64, 0,}}, {{ 2, 0, 0, 0, 0, 0, 0, 0,}}, {{ 2, 0, 0, 0, 0, 0, 64, 0,}}, {{ 2, 0, 0, 0, 32, 0, 0, 0,}}, {{ 2, 0, 0, 0, 32, 0, 64, 0,}}, {{ 2, 1, 0, 0, 0, 0, 0, 0,}}, {{ 2, 1, 0, 0, 0, 0, 64, 0,}}, {{ 2, 1, 0, 0, 32, 0, 0, 0,}}, {{ 2, 1, 0, 0, 32, 0, 64, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 16, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 4, 0, 0, 0, 0, 0, 0,}}, {{ 16, 4, 0, 0, 0, 0, 0, 0,}}, {{ 0, 2, 0, 0, 0, 0, 0, 0,}}, {{ 16, 2, 0, 0, 0, 0, 0, 0,}}, {{ 0, 6, 0, 0, 0, 0, 0, 0,}}, {{ 16, 6, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 8, 0, 0, 0, 0,}}, {{ 16, 0, 0, 8, 0, 0, 0, 0,}}, {{ 0, 4, 0, 8, 0, 0, 0, 0,}}, {{ 16, 4, 0, 8, 0, 0, 0, 0,}}, {{ 0, 2, 0, 8, 0, 0, 0, 0,}}, {{ 16, 2, 0, 8, 0, 0, 0, 0,}}, {{ 0, 6, 0, 8, 0, 0, 0, 0,}}, {{ 16, 6, 0, 8, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0,128, 0,}}, {{ 0, 0, 0, 0, 0, 0,128, 8,}}, {{ 0, 16, 0, 0, 0, 0, 0, 0,}}, {{ 0, 16, 0, 0, 0, 0, 0, 8,}}, {{ 0, 16, 0, 0, 0, 0,128, 0,}}, {{ 0, 16, 0, 0, 0, 0,128, 8,}}, {{ 0, 32, 0, 0, 0, 0, 0, 0,}}, {{ 0, 32, 0, 0, 0, 0, 0, 8,}}, {{ 0, 32, 0, 0, 0, 0,128, 0,}}, {{ 0, 32, 0, 0, 0, 0,128, 8,}}, {{ 0, 48, 0, 0, 0, 0, 0, 0,}}, {{ 0, 48, 0, 0, 0, 0, 0, 8,}}, {{ 0, 48, 0, 0, 0, 0,128, 0,}}, {{ 0, 48, 0, 0, 0, 0,128, 8,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 64, 0, 0, 0, 0, 0,}}, {{ 0, 0, 8, 0, 0, 0, 0, 0,}}, {{ 0, 0, 72, 0, 0, 0, 0, 0,}}, {{ 0, 0, 16, 0, 0, 0, 0, 0,}}, {{ 0, 0, 80, 0, 0, 0, 0, 0,}}, {{ 0, 0, 24, 0, 0, 0, 0, 0,}}, {{ 0, 0, 88, 0, 0, 0, 0, 0,}}, {{ 64, 0, 0, 0, 0, 0, 0, 0,}}, {{ 64, 0, 64, 0, 0, 0, 0, 0,}}, {{ 64, 0, 8, 0, 0, 0, 0, 0,}}, {{ 64, 0, 72, 0, 0, 0, 0, 0,}}, {{ 64, 0, 16, 0, 0, 0, 0, 0,}}, {{ 64, 0, 80, 0, 0, 0, 0, 0,}}, {{ 64, 0, 24, 0, 0, 0, 0, 0,}}, {{ 64, 0, 88, 0, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 24, 0, 0,}}, {{ 0, 0, 0, 0, 0, 24, 0, 0,}}, {{ 0, 0, 0, 0, 0, 24, 0, 0,}}, {{ 0, 0, 0, 0, 0, 24, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 4, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 32,}}, {{ 0, 0, 0, 0, 0, 4, 0, 32,}}, {{ 0, 0, 0, 0, 0, 0, 0, 4,}}, {{ 0, 0, 0, 0, 0, 4, 0, 4,}}, {{ 0, 0, 0, 0, 0, 0, 0, 36,}}, {{ 0, 0, 0, 0, 0, 4, 0, 36,}}, {{ 0, 0, 0, 2, 0, 0, 0, 0,}}, {{ 0, 0, 0, 2, 0, 4, 0, 0,}}, {{ 0, 0, 0, 2, 0, 0, 0, 32,}}, {{ 0, 0, 0, 2, 0, 4, 0, 32,}}, {{ 0, 0, 0, 2, 0, 0, 0, 4,}}, {{ 0, 0, 0, 2, 0, 4, 0, 4,}}, {{ 0, 0, 0, 2, 0, 0, 0, 36,}}, {{ 0, 0, 0, 2, 0, 4, 0, 36,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 64,}}, {{ 0, 0, 0, 0, 0, 0, 16, 64,}}, {{ 0, 0, 0, 0, 0, 0, 16, 64,}}, {{ 0, 0, 0, 0, 0, 0, 16, 64,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 16,}}, {{ 0, 0, 0, 0, 0, 0, 0,128,}}, {{ 0, 0, 0, 0, 0, 0, 0,144,}}, {{ 0, 0, 0, 0, 0, 0, 4,128,}}, {{ 0, 0, 0, 0, 0, 0, 4,144,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 16,}}, {{ 0, 0, 0, 0, 64, 0, 4, 0,}}, {{ 0, 0, 0, 0, 64, 0, 4, 16,}}, {{ 0, 0, 0, 0, 64, 0, 0,128,}}, {{ 0, 0, 0, 0, 64, 0, 0,144,}}, {{ 0, 0, 0, 0, 64, 0, 4,128,}}, {{ 0, 0, 0, 0, 64, 0, 4,144,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 1, 0,128, 0, 0,}}, {{ 0, 0, 0, 1, 0,128, 0, 0,}}, {{ 0, 0, 0, 1, 0,128, 0, 0,}}, {{ 0, 0, 0, 1, 0,128, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0,128, 0, 0, 0,}}, {{ 0, 0, 2, 0,128, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 64, 0, 0,}}, {{ 0, 0, 2, 0, 0, 64, 0, 0,}}, {{ 0, 0, 0, 0,128, 64, 0, 0,}}, {{ 0, 0, 2, 0,128, 64, 0, 0,}}, {{ 0, 0, 0, 0, 8, 0, 0, 0,}}, {{ 0, 0, 2, 0, 8, 0, 0, 0,}}, {{ 0, 0, 0, 0,136, 0, 0, 0,}}, {{ 0, 0, 2, 0,136, 0, 0, 0,}}, {{ 0, 0, 0, 0, 8, 64, 0, 0,}}, {{ 0, 0, 2, 0, 8, 64, 0, 0,}}, {{ 0, 0, 0, 0,136, 64, 0, 0,}}, {{ 0, 0, 2, 0,136, 64, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 32, 0, 0,}}, {{ 0, 0, 0, 0, 4, 32, 0, 0,}}, {{ 0, 0, 0, 0, 4, 32, 0, 0,}}, {{ 0, 0, 0, 0, 4, 32, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 8, 0,}}, {{ 0, 0, 1, 0, 0, 0, 0, 0,}}, {{ 0, 0, 1, 0, 0, 0, 8, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 40, 0,}}, {{ 0, 0, 1, 0, 0, 0, 32, 0,}}, {{ 0, 0, 1, 0, 0, 0, 40, 0,}}, {{ 0, 0, 0, 0, 16, 0, 0, 0,}}, {{ 0, 0, 0, 0, 16, 0, 8, 0,}}, {{ 0, 0, 1, 0, 16, 0, 0, 0,}}, {{ 0, 0, 1, 0, 16, 0, 8, 0,}}, {{ 0, 0, 0, 0, 16, 0, 32, 0,}}, {{ 0, 0, 0, 0, 16, 0, 40, 0,}}, {{ 0, 0, 1, 0, 16, 0, 32, 0,}}, {{ 0, 0, 1, 0, 16, 0, 40, 0,}}, }, }, { { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 8, 0, 0, 0, 0,}}, {{ 0, 0, 0, 4, 0, 0, 0, 0,}}, {{ 0, 0, 0, 12, 0, 0, 0, 0,}}, {{ 0, 0, 16, 0, 0, 0, 0, 0,}}, {{ 0, 0, 16, 8, 0, 0, 0, 0,}}, {{ 0, 0, 16, 4, 0, 0, 0, 0,}}, {{ 0, 0, 16, 12, 0, 0, 0, 0,}}, {{ 0, 16, 0, 0, 0, 0, 0, 0,}}, {{ 0, 16, 0, 8, 0, 0, 0, 0,}}, {{ 0, 16, 0, 4, 0, 0, 0, 0,}}, {{ 0, 16, 0, 12, 0, 0, 0, 0,}}, {{ 0, 16, 16, 0, 0, 0, 0, 0,}}, {{ 0, 16, 16, 8, 0, 0, 0, 0,}}, {{ 0, 16, 16, 4, 0, 0, 0, 0,}}, {{ 0, 16, 16, 12, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 1, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 1, 0, 0, 0, 0, 0, 0,}}, {{ 1, 1, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 16, 0, 0, 0, 0,}}, {{ 1, 0, 0, 16, 0, 0, 0, 0,}}, {{ 0, 1, 0, 16, 0, 0, 0, 0,}}, {{ 1, 1, 0, 16, 0, 0, 0, 0,}}, {{ 0, 4, 0, 0, 0, 0, 0, 0,}}, {{ 1, 4, 0, 0, 0, 0, 0, 0,}}, {{ 0, 5, 0, 0, 0, 0, 0, 0,}}, {{ 1, 5, 0, 0, 0, 0, 0, 0,}}, {{ 0, 4, 0, 16, 0, 0, 0, 0,}}, {{ 1, 4, 0, 16, 0, 0, 0, 0,}}, {{ 0, 5, 0, 16, 0, 0, 0, 0,}}, {{ 1, 5, 0, 16, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 32, 0, 0, 0, 0,}}, {{ 0, 0, 4, 0, 0, 0, 0, 0,}}, {{ 0, 0, 4, 32, 0, 0, 0, 0,}}, {{ 64, 0, 0, 0, 0, 0, 0, 0,}}, {{ 64, 0, 0, 32, 0, 0, 0, 0,}}, {{ 64, 0, 4, 0, 0, 0, 0, 0,}}, {{ 64, 0, 4, 32, 0, 0, 0, 0,}}, {{ 0, 0, 64, 0, 0, 0, 0, 0,}}, {{ 0, 0, 64, 32, 0, 0, 0, 0,}}, {{ 0, 0, 68, 0, 0, 0, 0, 0,}}, {{ 0, 0, 68, 32, 0, 0, 0, 0,}}, {{ 64, 0, 64, 0, 0, 0, 0, 0,}}, {{ 64, 0, 64, 32, 0, 0, 0, 0,}}, {{ 64, 0, 68, 0, 0, 0, 0, 0,}}, {{ 64, 0, 68, 32, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 64, 0, 0, 0, 0,}}, {{ 0, 0, 32, 0, 0, 0, 0, 0,}}, {{ 0, 0, 32, 64, 0, 0, 0, 0,}}, {{ 0, 0, 8, 0, 0, 0, 0, 0,}}, {{ 0, 0, 8, 64, 0, 0, 0, 0,}}, {{ 0, 0, 40, 0, 0, 0, 0, 0,}}, {{ 0, 0, 40, 64, 0, 0, 0, 0,}}, {{ 0, 0,128, 0, 0, 0, 0, 0,}}, {{ 0, 0,128, 64, 0, 0, 0, 0,}}, {{ 0, 0,160, 0, 0, 0, 0, 0,}}, {{ 0, 0,160, 64, 0, 0, 0, 0,}}, {{ 0, 0,136, 0, 0, 0, 0, 0,}}, {{ 0, 0,136, 64, 0, 0, 0, 0,}}, {{ 0, 0,168, 0, 0, 0, 0, 0,}}, {{ 0, 0,168, 64, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 64, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 32,}}, {{ 0, 0, 0, 0, 0, 64, 0, 32,}}, {{ 0, 64, 0, 0, 0, 0, 0, 0,}}, {{ 0, 64, 0, 0, 0, 64, 0, 0,}}, {{ 0, 64, 0, 0, 0, 0, 0, 32,}}, {{ 0, 64, 0, 0, 0, 64, 0, 32,}}, {{ 8, 0, 0, 0, 0, 0, 0, 0,}}, {{ 8, 0, 0, 0, 0, 64, 0, 0,}}, {{ 8, 0, 0, 0, 0, 0, 0, 32,}}, {{ 8, 0, 0, 0, 0, 64, 0, 32,}}, {{ 8, 64, 0, 0, 0, 0, 0, 0,}}, {{ 8, 64, 0, 0, 0, 64, 0, 0,}}, {{ 8, 64, 0, 0, 0, 0, 0, 32,}}, {{ 8, 64, 0, 0, 0, 64, 0, 32,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0,128, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0,128, 0, 0, 0, 0,}}, {{ 0,128, 0,128, 0, 0, 0, 0,}}, {{ 32, 0, 0, 0, 0, 0, 0, 0,}}, {{ 32,128, 0, 0, 0, 0, 0, 0,}}, {{ 32, 0, 0,128, 0, 0, 0, 0,}}, {{ 32,128, 0,128, 0, 0, 0, 0,}}, {{ 0, 32, 0, 0, 0, 0, 0, 0,}}, {{ 0,160, 0, 0, 0, 0, 0, 0,}}, {{ 0, 32, 0,128, 0, 0, 0, 0,}}, {{ 0,160, 0,128, 0, 0, 0, 0,}}, {{ 32, 32, 0, 0, 0, 0, 0, 0,}}, {{ 32,160, 0, 0, 0, 0, 0, 0,}}, {{ 32, 32, 0,128, 0, 0, 0, 0,}}, {{ 32,160, 0,128, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 8, 0, 0, 0,}}, {{ 0, 0, 0, 0, 12, 0, 0, 0,}}, {{ 4, 0, 0, 0, 0, 0, 0, 0,}}, {{ 4, 0, 0, 0, 4, 0, 0, 0,}}, {{ 4, 0, 0, 0, 8, 0, 0, 0,}}, {{ 4, 0, 0, 0, 12, 0, 0, 0,}}, {{128, 0, 0, 0, 0, 0, 0, 0,}}, {{128, 0, 0, 0, 4, 0, 0, 0,}}, {{128, 0, 0, 0, 8, 0, 0, 0,}}, {{128, 0, 0, 0, 12, 0, 0, 0,}}, {{132, 0, 0, 0, 0, 0, 0, 0,}}, {{132, 0, 0, 0, 4, 0, 0, 0,}}, {{132, 0, 0, 0, 8, 0, 0, 0,}}, {{132, 0, 0, 0, 12, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 2, 0, 0, 0, 0, 0, 0,}}, {{ 2, 0, 0, 0, 0, 0, 0, 0,}}, {{ 2, 2, 0, 0, 0, 0, 0, 0,}}, {{ 16, 0, 0, 0, 0, 0, 0, 0,}}, {{ 16, 2, 0, 0, 0, 0, 0, 0,}}, {{ 18, 0, 0, 0, 0, 0, 0, 0,}}, {{ 18, 2, 0, 0, 0, 0, 0, 0,}}, {{ 0, 8, 0, 0, 0, 0, 0, 0,}}, {{ 0, 10, 0, 0, 0, 0, 0, 0,}}, {{ 2, 8, 0, 0, 0, 0, 0, 0,}}, {{ 2, 10, 0, 0, 0, 0, 0, 0,}}, {{ 16, 8, 0, 0, 0, 0, 0, 0,}}, {{ 16, 10, 0, 0, 0, 0, 0, 0,}}, {{ 18, 8, 0, 0, 0, 0, 0, 0,}}, {{ 18, 10, 0, 0, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 16, 16,}}, {{ 0, 0, 0, 0, 0, 0, 16, 16,}}, {{ 0, 0, 0, 0, 0, 0, 16, 16,}}, {{ 0, 0, 0, 0, 0, 0, 16, 16,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 1, 0, 0, 0, 0, 0,}}, {{ 0, 0, 1, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 64,}}, {{ 0, 0, 1, 0, 0, 32, 0, 0,}}, {{ 0, 0, 1, 0, 0, 32, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0,128, 0,}}, {{ 0, 0, 0, 0, 0, 0,128, 64,}}, {{ 0, 0, 1, 0, 0, 0,128, 0,}}, {{ 0, 0, 1, 0, 0, 0,128, 64,}}, {{ 0, 0, 0, 0, 0, 32,128, 0,}}, {{ 0, 0, 0, 0, 0, 32,128, 64,}}, {{ 0, 0, 1, 0, 0, 32,128, 0,}}, {{ 0, 0, 1, 0, 0, 32,128, 64,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 32, 0,}}, {{ 0, 0, 2, 0, 0, 0, 32, 0,}}, {{ 0, 0, 2, 0, 0, 0, 32, 0,}}, {{ 0, 0, 2, 0, 0, 0, 32, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 8, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 8, 0,}}, {{ 0, 0, 0, 0, 16, 0, 0, 0,}}, {{ 0, 0, 0, 0, 16, 0, 8, 0,}}, {{ 0, 0, 0, 1, 16, 0, 0, 0,}}, {{ 0, 0, 0, 1, 16, 0, 8, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 4,}}, {{ 0, 0, 0, 0, 0, 0, 8, 4,}}, {{ 0, 0, 0, 1, 0, 0, 0, 4,}}, {{ 0, 0, 0, 1, 0, 0, 8, 4,}}, {{ 0, 0, 0, 0, 16, 0, 0, 4,}}, {{ 0, 0, 0, 0, 16, 0, 8, 4,}}, {{ 0, 0, 0, 1, 16, 0, 0, 4,}}, {{ 0, 0, 0, 1, 16, 0, 8, 4,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 8,}}, {{ 0, 0, 0, 0, 64, 0, 0, 8,}}, {{ 0, 0, 0, 0, 64, 0, 0, 8,}}, {{ 0, 0, 0, 0, 64, 0, 0, 8,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 32, 0, 0, 0,}}, {{ 0, 0, 0, 2, 0, 0, 0, 0,}}, {{ 0, 0, 0, 2, 32, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0,128,}}, {{ 0, 0, 0, 0, 32, 0, 0,128,}}, {{ 0, 0, 0, 2, 0, 0, 0,128,}}, {{ 0, 0, 0, 2, 32, 0, 0,128,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 32, 16, 0, 0,}}, {{ 0, 0, 0, 2, 0, 16, 0, 0,}}, {{ 0, 0, 0, 2, 32, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0,128,}}, {{ 0, 0, 0, 0, 32, 16, 0,128,}}, {{ 0, 0, 0, 2, 0, 16, 0,128,}}, {{ 0, 0, 0, 2, 32, 16, 0,128,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 4, 0,}}, {{ 0, 0, 0, 0, 0, 8, 4, 0,}}, {{ 0, 0, 0, 0, 0, 8, 4, 0,}}, {{ 0, 0, 0, 0, 0, 8, 4, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 64, 0,}}, {{ 0, 0, 0, 0, 0,128, 64, 0,}}, {{ 0, 0, 0, 0,128, 0, 0, 0,}}, {{ 0, 0, 0, 0,128,128, 0, 0,}}, {{ 0, 0, 0, 0,128, 0, 64, 0,}}, {{ 0, 0, 0, 0,128,128, 64, 0,}}, {{ 0, 0, 0, 0, 0, 4, 0, 0,}}, {{ 0, 0, 0, 0, 0,132, 0, 0,}}, {{ 0, 0, 0, 0, 0, 4, 64, 0,}}, {{ 0, 0, 0, 0, 0,132, 64, 0,}}, {{ 0, 0, 0, 0,128, 4, 0, 0,}}, {{ 0, 0, 0, 0,128,132, 0, 0,}}, {{ 0, 0, 0, 0,128, 4, 64, 0,}}, {{ 0, 0, 0, 0,128,132, 64, 0,}}, }, }, }; /* Initial permutation/expansion table */ static const C_block IE3264[32/CHUNKBITS][1<<CHUNKBITS] = { { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 64, 4,}}, {{ 4, 0, 0, 0, 0, 0, 0, 64,}}, {{ 4, 0, 0, 0, 0, 0, 64, 68,}}, {{ 0, 0, 0, 0, 64, 4, 0, 0,}}, {{ 0, 0, 0, 0, 64, 4, 64, 4,}}, {{ 4, 0, 0, 0, 64, 4, 0, 64,}}, {{ 4, 0, 0, 0, 64, 4, 64, 68,}}, {{ 0, 0, 0, 0, 0, 64, 4, 0,}}, {{ 0, 0, 0, 0, 0, 64, 68, 4,}}, {{ 4, 0, 0, 0, 0, 64, 4, 64,}}, {{ 4, 0, 0, 0, 0, 64, 68, 68,}}, {{ 0, 0, 0, 0, 64, 68, 4, 0,}}, {{ 0, 0, 0, 0, 64, 68, 68, 4,}}, {{ 4, 0, 0, 0, 64, 68, 4, 64,}}, {{ 4, 0, 0, 0, 64, 68, 68, 68,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 64, 4, 0, 0, 0, 0,}}, {{ 0, 0, 0, 64, 4, 0, 0, 0,}}, {{ 0, 0, 64, 68, 4, 0, 0, 0,}}, {{ 64, 4, 0, 0, 0, 0, 0, 0,}}, {{ 64, 4, 64, 4, 0, 0, 0, 0,}}, {{ 64, 4, 0, 64, 4, 0, 0, 0,}}, {{ 64, 4, 64, 68, 4, 0, 0, 0,}}, {{ 0, 64, 4, 0, 0, 0, 0, 0,}}, {{ 0, 64, 68, 4, 0, 0, 0, 0,}}, {{ 0, 64, 4, 64, 4, 0, 0, 0,}}, {{ 0, 64, 68, 68, 4, 0, 0, 0,}}, {{ 64, 68, 4, 0, 0, 0, 0, 0,}}, {{ 64, 68, 68, 4, 0, 0, 0, 0,}}, {{ 64, 68, 4, 64, 4, 0, 0, 0,}}, {{ 64, 68, 68, 68, 4, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 32,}}, {{ 0, 0, 0, 0, 0, 0, 32, 32,}}, {{ 0, 0, 0, 0, 32, 0, 0, 0,}}, {{ 0, 0, 0, 0, 32, 0, 32, 0,}}, {{ 0, 0, 0, 0, 32, 0, 0, 32,}}, {{ 0, 0, 0, 0, 32, 0, 32, 32,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 32, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 32,}}, {{ 0, 0, 0, 0, 0, 32, 32, 32,}}, {{ 0, 0, 0, 0, 32, 32, 0, 0,}}, {{ 0, 0, 0, 0, 32, 32, 32, 0,}}, {{ 0, 0, 0, 0, 32, 32, 0, 32,}}, {{ 0, 0, 0, 0, 32, 32, 32, 32,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 32, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 32, 0, 0, 0, 0,}}, {{ 0, 0, 32, 32, 0, 0, 0, 0,}}, {{ 32, 0, 0, 0, 0, 0, 0, 0,}}, {{ 32, 0, 32, 0, 0, 0, 0, 0,}}, {{ 32, 0, 0, 32, 0, 0, 0, 0,}}, {{ 32, 0, 32, 32, 0, 0, 0, 0,}}, {{ 0, 32, 0, 0, 0, 0, 0, 0,}}, {{ 0, 32, 32, 0, 0, 0, 0, 0,}}, {{ 0, 32, 0, 32, 0, 0, 0, 0,}}, {{ 0, 32, 32, 32, 0, 0, 0, 0,}}, {{ 32, 32, 0, 0, 0, 0, 0, 0,}}, {{ 32, 32, 32, 0, 0, 0, 0, 0,}}, {{ 32, 32, 0, 32, 0, 0, 0, 0,}}, {{ 32, 32, 32, 32, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 16, 16,}}, {{ 0, 0, 0, 0, 16, 0, 0, 0,}}, {{ 0, 0, 0, 0, 16, 0, 16, 0,}}, {{ 0, 0, 0, 0, 16, 0, 0, 16,}}, {{ 0, 0, 0, 0, 16, 0, 16, 16,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 16, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 16,}}, {{ 0, 0, 0, 0, 0, 16, 16, 16,}}, {{ 0, 0, 0, 0, 16, 16, 0, 0,}}, {{ 0, 0, 0, 0, 16, 16, 16, 0,}}, {{ 0, 0, 0, 0, 16, 16, 0, 16,}}, {{ 0, 0, 0, 0, 16, 16, 16, 16,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 16, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 16, 0, 0, 0, 0,}}, {{ 0, 0, 16, 16, 0, 0, 0, 0,}}, {{ 16, 0, 0, 0, 0, 0, 0, 0,}}, {{ 16, 0, 16, 0, 0, 0, 0, 0,}}, {{ 16, 0, 0, 16, 0, 0, 0, 0,}}, {{ 16, 0, 16, 16, 0, 0, 0, 0,}}, {{ 0, 16, 0, 0, 0, 0, 0, 0,}}, {{ 0, 16, 16, 0, 0, 0, 0, 0,}}, {{ 0, 16, 0, 16, 0, 0, 0, 0,}}, {{ 0, 16, 16, 16, 0, 0, 0, 0,}}, {{ 16, 16, 0, 0, 0, 0, 0, 0,}}, {{ 16, 16, 16, 0, 0, 0, 0, 0,}}, {{ 16, 16, 0, 16, 0, 0, 0, 0,}}, {{ 16, 16, 16, 16, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 8, 0,}}, {{ 0, 0, 0, 0, 0, 0,128, 8,}}, {{ 0, 0, 0, 0, 0,128,136, 8,}}, {{ 0, 0, 0,128, 8, 0, 0, 0,}}, {{ 0, 0, 0,128, 8,128, 8, 0,}}, {{ 0, 0, 0,128, 8, 0,128, 8,}}, {{ 0, 0, 0,128, 8,128,136, 8,}}, {{ 0, 0, 0, 0,128, 8, 0, 0,}}, {{ 0, 0, 0, 0,128,136, 8, 0,}}, {{ 0, 0, 0, 0,128, 8,128, 8,}}, {{ 0, 0, 0, 0,128,136,136, 8,}}, {{ 0, 0, 0,128,136, 8, 0, 0,}}, {{ 0, 0, 0,128,136,136, 8, 0,}}, {{ 0, 0, 0,128,136, 8,128, 8,}}, {{ 0, 0, 0,128,136,136,136, 8,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0,128, 8, 0, 0, 0, 0, 0,}}, {{ 0, 0,128, 8, 0, 0, 0, 0,}}, {{ 0,128,136, 8, 0, 0, 0, 0,}}, {{ 8, 0, 0, 0, 0, 0, 0,128,}}, {{ 8,128, 8, 0, 0, 0, 0,128,}}, {{ 8, 0,128, 8, 0, 0, 0,128,}}, {{ 8,128,136, 8, 0, 0, 0,128,}}, {{128, 8, 0, 0, 0, 0, 0, 0,}}, {{128,136, 8, 0, 0, 0, 0, 0,}}, {{128, 8,128, 8, 0, 0, 0, 0,}}, {{128,136,136, 8, 0, 0, 0, 0,}}, {{136, 8, 0, 0, 0, 0, 0,128,}}, {{136,136, 8, 0, 0, 0, 0,128,}}, {{136, 8,128, 8, 0, 0, 0,128,}}, {{136,136,136, 8, 0, 0, 0,128,}}, }, }; /* Table that combines the S, P, and E operations. */ static const unsigned long SPE[2][8][64] = { { { 0x80088000,0x80000000, 0,0x80088000, 0,0x80088000,0x80000000, 0, 0x80088000,0x80088000,0x80000000, 0x88000, 0x88000, 0, 0,0x80000000, 0x80000000, 0, 0x88000,0x80088000, 0x80088000,0x80000000, 0x88000, 0x88000, 0, 0x88000,0x80088000,0x80000000, 0x88000, 0x88000,0x80000000, 0, 0,0x80088000, 0x88000,0x80000000, 0x80088000,0x80000000, 0x88000, 0x88000, 0x80000000, 0x88000,0x80088000, 0, 0x80088000, 0, 0,0x80000000, 0x80088000,0x80088000,0x80000000, 0x88000, 0, 0x88000,0x80000000, 0, 0x80000000, 0, 0x88000,0x80088000, 0,0x80000000, 0x88000,0x80088000, }, { 0x8800010, 0, 0x8800000, 0, 0x10, 0x8800010, 0x8800000, 0x8800000, 0x8800000, 0x10, 0x10, 0x8800000, 0x10, 0x8800000, 0, 0x10, 0, 0x8800010, 0x10, 0x8800000, 0x8800010, 0, 0, 0x10, 0x8800010, 0x8800010, 0x8800000, 0x10, 0, 0, 0x8800010, 0x8800010, 0x10, 0x8800000, 0x8800000, 0x8800010, 0x8800010, 0x10, 0x10, 0, 0, 0x8800010, 0, 0x10, 0x8800000, 0, 0x8800010, 0x8800010, 0x8800000, 0x8800000, 0, 0x10, 0x10, 0x8800010, 0x8800000, 0, 0x10, 0, 0x8800010, 0x8800000, 0x8800010, 0x10, 0, 0x8800000, }, { 0,0x40001000, 0x1000, 0x1000, 0x40000000, 0, 0x1000,0x40001000, 0x1000,0x40000000,0x40000000, 0, 0x40001000, 0x1000, 0,0x40000000, 0,0x40000000,0x40001000, 0x1000, 0x1000,0x40001000,0x40000000, 0, 0x40000000, 0x1000,0x40001000,0x40000000, 0x40001000, 0, 0,0x40001000, 0x40001000, 0x1000, 0,0x40000000, 0x1000,0x40000000,0x40000000, 0x1000, 0,0x40001000,0x40001000,0x40000000, 0x40000000, 0,0x40001000, 0, 0x40001000, 0, 0,0x40001000, 0x40000000, 0x1000, 0x1000,0x40001000, 0x1000, 0,0x40000000, 0x1000, 0,0x40001000, 0x1000,0x40000000, }, { 0x100008, 0x100000, 0x8, 0x100008, 0, 0, 0x100008, 0x8, 0x100000, 0x8, 0, 0x100008, 0x8, 0x100008, 0, 0, 0x8, 0x100000, 0x100000, 0x8, 0x100000, 0x100008, 0, 0x100000, 0x100008, 0, 0x8, 0x100000, 0x100000, 0x8, 0x100008, 0, 0x8, 0x100008, 0, 0x8, 0x100000, 0x100000, 0x8, 0, 0x100008, 0, 0x100000, 0x8, 0, 0x8, 0x100000, 0x100000, 0, 0x100008, 0x100008, 0, 0x100008, 0x8, 0x100000, 0x100008, 0x8, 0x100000, 0, 0x100008, 0x100008, 0, 0x8, 0x100000, }, { 0,0x10000000, 0x44000,0x10044020, 0x10000020, 0x44000,0x10044020,0x10000000, 0x10000000, 0x20, 0x20,0x10044000, 0x44020,0x10000020,0x10044000, 0, 0x10044000, 0,0x10000020, 0x44020, 0x44000,0x10044020, 0, 0x20, 0x20, 0x44020,0x10044020,0x10000020, 0x10000000, 0x44000, 0x44020,0x10044000, 0x10044000, 0x44020,0x10000020,0x10000000, 0x10000000, 0x20, 0x20, 0x44000, 0,0x10044000,0x10044020, 0, 0x10044020, 0, 0x44000,0x10000020, 0x44020, 0x44000, 0,0x10044020, 0x10000020,0x10044000, 0x44020,0x10000000, 0x10044000,0x10000020, 0x44000, 0x44020, 0x20,0x10044020,0x10000000, 0x20, }, { 0x440, 0x440, 0, 0x200000, 0x440, 0x200000, 0x200440, 0, 0x200440, 0x200440, 0x200000, 0, 0x200000, 0x440, 0, 0x200440, 0, 0x200440, 0x440, 0, 0x200000, 0x440, 0x200000, 0x440, 0x200440, 0, 0, 0x200440, 0x440, 0x200000, 0x200440, 0x200000, 0x200440, 0, 0x200000, 0x200440, 0x200000, 0x440, 0, 0x200000, 0, 0x200000, 0x440, 0, 0x440, 0x200440, 0x200000, 0x440, 0x200440, 0x200000, 0, 0x200440, 0x440, 0, 0x200440, 0, 0x200000, 0x440, 0x200440, 0x200000, 0, 0x200440, 0x440, 0x440, }, { 0x4400000, 0x2000, 0x2000, 0x4, 0x4402004, 0x4400004, 0x4402000, 0, 0, 0x2004, 0x2004, 0x4400000, 0x4, 0x4402000, 0x4400000, 0x2004, 0x2004, 0x4400000, 0x4400004, 0x4402004, 0, 0x2000, 0x4, 0x4402000, 0x4400004, 0x4402004, 0x4402000, 0x4, 0x4402004, 0x4400004, 0x2000, 0, 0x4402004, 0x4400000, 0x4400004, 0x2004, 0x4400000, 0x2000, 0, 0x4400004, 0x2004, 0x4402004, 0x4402000, 0, 0x2000, 0x4, 0x4, 0x2000, 0, 0x2004, 0x2000, 0x4402000, 0x2004, 0x4400000, 0x4402004, 0, 0x4402000, 0x4, 0x4400004, 0x4402004, 0x4, 0x4402000, 0x4400000, 0x4400004, }, { 0x880,0x20000000,0x20000880, 0, 0x20000000, 0x880, 0,0x20000880, 0x880, 0,0x20000000,0x20000880, 0x20000880,0x20000880, 0x880, 0, 0x20000000,0x20000880, 0x880,0x20000000, 0x20000880, 0x880, 0,0x20000000, 0, 0,0x20000880, 0x880, 0,0x20000000,0x20000000, 0x880, 0,0x20000000, 0x880,0x20000880, 0x20000880, 0, 0,0x20000000, 0x880,0x20000880,0x20000000, 0x880, 0x20000000, 0x880, 0x880,0x20000000, 0x20000880, 0, 0, 0x880, 0x20000000,0x20000880,0x20000880, 0, 0x880,0x20000000,0x20000880, 0, 0, 0x880,0x20000000,0x20000880, }, }, { { 0x2008, 0x8,0x20002000,0x20002008, 0x2000,0x20000008,0x20000008,0x20002000, 0x20000008, 0x2008, 0x2008,0x20000000, 0x20002000, 0x2000, 0,0x20000008, 0x8,0x20000000, 0x2000, 0x8, 0x20002008, 0x2008,0x20000000, 0x2000, 0x20000000, 0, 0x8,0x20002008, 0,0x20002000,0x20002008, 0, 0,0x20002008, 0x2000,0x20000008, 0x2008, 0x8,0x20000000, 0x2000, 0x20002008, 0, 0x8,0x20002000, 0x20000008,0x20000000,0x20002000, 0x2008, 0x20002008, 0x8, 0x2008,0x20002000, 0x2000,0x20000000,0x20000008, 0, 0x8, 0x2000,0x20002000, 0x2008, 0x20000000,0x20002008, 0,0x20000008, }, { 0x4400010, 0, 0x10, 0x4400010, 0x4400000, 0, 0x4400000, 0x10, 0, 0x4400010, 0, 0x4400000, 0x10, 0x4400010, 0x4400010, 0, 0x10, 0x4400000, 0x4400010, 0, 0x10, 0x4400000, 0, 0x10, 0x4400000, 0x10, 0x4400010, 0x4400000, 0x4400000, 0x10, 0, 0x4400010, 0x10, 0x4400010, 0x4400000, 0x10, 0x4400010, 0x10, 0x4400000, 0, 0x4400000, 0, 0x10, 0x4400010, 0, 0x4400000, 0x10, 0x4400000, 0x4400010, 0, 0, 0x4400000, 0, 0x4400010, 0x10, 0x4400010, 0x4400010, 0x10, 0, 0x4400000, 0x4400000, 0, 0x4400010, 0x10, }, { 0x10044000, 0x44004, 0,0x10044000, 0x10000004, 0x44000,0x10044000, 0x4, 0x44000, 0x4, 0x44004,0x10000000, 0x10044004,0x10000000,0x10000000,0x10044004, 0,0x10000004, 0x44004, 0, 0x10000000,0x10044004, 0x4,0x10044000, 0x10044004, 0x44000,0x10000004, 0x44004, 0x4, 0, 0x44000,0x10000004, 0x44004, 0,0x10000000, 0x4, 0x10000000,0x10000004, 0x44004,0x10044000, 0, 0x44004, 0x4,0x10044004, 0x10000004, 0x44000,0x10044004,0x10000000, 0x10000004,0x10044000, 0x44000,0x10044004, 0x4, 0x44000,0x10044000, 0x4, 0x44000, 0,0x10044004,0x10000000, 0x10044000,0x10000004, 0, 0x44004, }, { 0x80000440, 0x100000,0x80000000,0x80100440, 0, 0x100440,0x80100000,0x80000440, 0x100440,0x80100000, 0x100000,0x80000000, 0x80100000,0x80000440, 0x440, 0x100000, 0x80100440, 0x440, 0,0x80000000, 0x440,0x80100000, 0x100440, 0, 0x80000000, 0,0x80000440, 0x100440, 0x100000,0x80100440,0x80100440, 0x440, 0x80100440,0x80000000, 0x440,0x80100000, 0x440, 0x100000,0x80000000, 0x100440, 0x80100000, 0, 0,0x80000440, 0,0x80100440, 0x100440, 0, 0x100000,0x80100440,0x80000440, 0x440, 0x80100440,0x80000000, 0x100000,0x80000440, 0x80000440, 0x440, 0x100440,0x80100000, 0x80000000, 0x100000,0x80100000, 0x100440, }, { 0x88000, 0, 0, 0x88000, 0x88000, 0x88000, 0, 0x88000, 0, 0, 0x88000, 0, 0x88000, 0x88000, 0x88000, 0, 0, 0x88000, 0, 0, 0x88000, 0, 0, 0x88000, 0, 0x88000, 0x88000, 0, 0x88000, 0, 0, 0x88000, 0x88000, 0x88000, 0, 0x88000, 0, 0, 0x88000, 0x88000, 0x88000, 0, 0x88000, 0, 0, 0x88000, 0, 0, 0x88000, 0, 0, 0x88000, 0x88000, 0x88000, 0, 0, 0, 0x88000, 0x88000, 0, 0, 0, 0x88000, 0x88000, }, { 0x8800000, 0x20, 0, 0x8800020, 0x20, 0, 0x8800000, 0x20, 0, 0x8800020, 0x20, 0x8800000, 0x8800000, 0x8800000, 0x8800020, 0x20, 0x20, 0x8800000, 0x8800020, 0, 0, 0, 0x8800020, 0x8800020, 0x8800020, 0x8800020, 0x8800000, 0, 0, 0x20, 0x20, 0x8800000, 0, 0x8800000, 0x8800000, 0x20, 0x8800020, 0x20, 0, 0x8800000, 0x8800000, 0, 0x8800020, 0x20, 0x20, 0x8800020, 0x20, 0, 0x8800020, 0x20, 0x20, 0x8800000, 0x8800000, 0x8800020, 0x20, 0, 0, 0x8800000, 0x8800000, 0x8800020, 0x8800020, 0, 0, 0x8800020, }, { 0, 0, 0x1000,0x40001000, 0x40001000,0x40000000, 0, 0, 0x1000,0x40001000,0x40000000, 0x1000, 0x40000000, 0x1000, 0x1000,0x40000000, 0x40001000, 0,0x40000000,0x40001000, 0, 0x1000,0x40001000, 0, 0x40001000,0x40000000, 0x1000,0x40000000, 0x40000000,0x40001000, 0, 0x1000, 0x40000000, 0x1000,0x40001000,0x40000000, 0, 0, 0x1000,0x40001000, 0x40001000,0x40000000, 0, 0, 0,0x40001000,0x40000000, 0x1000, 0,0x40001000, 0x1000, 0, 0x40000000, 0,0x40001000, 0x1000, 0x1000,0x40000000,0x40000000,0x40001000, 0x40001000, 0x1000, 0x1000,0x40000000, }, { 0x200880, 0x200880, 0, 0, 0x200000, 0x880, 0x200880, 0x200880, 0, 0x200000, 0x880, 0, 0x880, 0x200000, 0x200000, 0x200880, 0, 0x880, 0x880, 0x200000, 0x200880, 0x200000, 0, 0x880, 0x200000, 0x880, 0x200000, 0x200880, 0x880, 0, 0x200880, 0, 0x880, 0, 0x200000, 0x200880, 0, 0x200000, 0, 0x880, 0x200880, 0x200000, 0x200000, 0x880, 0x200880, 0, 0x880, 0x200000, 0x200880, 0x880, 0x200880, 0x200000, 0x880, 0, 0x200000, 0x200880, 0, 0x200880, 0x880, 0, 0x200000, 0x200880, 0, 0x880, }, }, }; /* compressed/interleaved => final permutation table */ static const C_block CF6464[64/CHUNKBITS][1<<CHUNKBITS] = { { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 64,}}, {{ 0, 0, 0, 0, 0, 0, 64, 0,}}, {{ 0, 0, 0, 0, 0, 0, 64, 64,}}, {{ 0, 0, 0, 0, 0, 64, 0, 0,}}, {{ 0, 0, 0, 0, 0, 64, 0, 64,}}, {{ 0, 0, 0, 0, 0, 64, 64, 0,}}, {{ 0, 0, 0, 0, 0, 64, 64, 64,}}, {{ 0, 0, 0, 0, 64, 0, 0, 0,}}, {{ 0, 0, 0, 0, 64, 0, 0, 64,}}, {{ 0, 0, 0, 0, 64, 0, 64, 0,}}, {{ 0, 0, 0, 0, 64, 0, 64, 64,}}, {{ 0, 0, 0, 0, 64, 64, 0, 0,}}, {{ 0, 0, 0, 0, 64, 64, 0, 64,}}, {{ 0, 0, 0, 0, 64, 64, 64, 0,}}, {{ 0, 0, 0, 0, 64, 64, 64, 64,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 4,}}, {{ 0, 0, 0, 0, 0, 0, 4, 0,}}, {{ 0, 0, 0, 0, 0, 0, 4, 4,}}, {{ 0, 0, 0, 0, 0, 4, 0, 0,}}, {{ 0, 0, 0, 0, 0, 4, 0, 4,}}, {{ 0, 0, 0, 0, 0, 4, 4, 0,}}, {{ 0, 0, 0, 0, 0, 4, 4, 4,}}, {{ 0, 0, 0, 0, 4, 0, 0, 0,}}, {{ 0, 0, 0, 0, 4, 0, 0, 4,}}, {{ 0, 0, 0, 0, 4, 0, 4, 0,}}, {{ 0, 0, 0, 0, 4, 0, 4, 4,}}, {{ 0, 0, 0, 0, 4, 4, 0, 0,}}, {{ 0, 0, 0, 0, 4, 4, 0, 4,}}, {{ 0, 0, 0, 0, 4, 4, 4, 0,}}, {{ 0, 0, 0, 0, 4, 4, 4, 4,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 64, 0, 0, 0, 0,}}, {{ 0, 0, 64, 0, 0, 0, 0, 0,}}, {{ 0, 0, 64, 64, 0, 0, 0, 0,}}, {{ 0, 64, 0, 0, 0, 0, 0, 0,}}, {{ 0, 64, 0, 64, 0, 0, 0, 0,}}, {{ 0, 64, 64, 0, 0, 0, 0, 0,}}, {{ 0, 64, 64, 64, 0, 0, 0, 0,}}, {{ 64, 0, 0, 0, 0, 0, 0, 0,}}, {{ 64, 0, 0, 64, 0, 0, 0, 0,}}, {{ 64, 0, 64, 0, 0, 0, 0, 0,}}, {{ 64, 0, 64, 64, 0, 0, 0, 0,}}, {{ 64, 64, 0, 0, 0, 0, 0, 0,}}, {{ 64, 64, 0, 64, 0, 0, 0, 0,}}, {{ 64, 64, 64, 0, 0, 0, 0, 0,}}, {{ 64, 64, 64, 64, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 4, 0, 0, 0, 0,}}, {{ 0, 0, 4, 0, 0, 0, 0, 0,}}, {{ 0, 0, 4, 4, 0, 0, 0, 0,}}, {{ 0, 4, 0, 0, 0, 0, 0, 0,}}, {{ 0, 4, 0, 4, 0, 0, 0, 0,}}, {{ 0, 4, 4, 0, 0, 0, 0, 0,}}, {{ 0, 4, 4, 4, 0, 0, 0, 0,}}, {{ 4, 0, 0, 0, 0, 0, 0, 0,}}, {{ 4, 0, 0, 4, 0, 0, 0, 0,}}, {{ 4, 0, 4, 0, 0, 0, 0, 0,}}, {{ 4, 0, 4, 4, 0, 0, 0, 0,}}, {{ 4, 4, 0, 0, 0, 0, 0, 0,}}, {{ 4, 4, 0, 4, 0, 0, 0, 0,}}, {{ 4, 4, 4, 0, 0, 0, 0, 0,}}, {{ 4, 4, 4, 4, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 16,}}, {{ 0, 0, 0, 0, 0, 0, 16, 0,}}, {{ 0, 0, 0, 0, 0, 0, 16, 16,}}, {{ 0, 0, 0, 0, 0, 16, 0, 0,}}, {{ 0, 0, 0, 0, 0, 16, 0, 16,}}, {{ 0, 0, 0, 0, 0, 16, 16, 0,}}, {{ 0, 0, 0, 0, 0, 16, 16, 16,}}, {{ 0, 0, 0, 0, 16, 0, 0, 0,}}, {{ 0, 0, 0, 0, 16, 0, 0, 16,}}, {{ 0, 0, 0, 0, 16, 0, 16, 0,}}, {{ 0, 0, 0, 0, 16, 0, 16, 16,}}, {{ 0, 0, 0, 0, 16, 16, 0, 0,}}, {{ 0, 0, 0, 0, 16, 16, 0, 16,}}, {{ 0, 0, 0, 0, 16, 16, 16, 0,}}, {{ 0, 0, 0, 0, 16, 16, 16, 16,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 1,}}, {{ 0, 0, 0, 0, 0, 0, 1, 0,}}, {{ 0, 0, 0, 0, 0, 0, 1, 1,}}, {{ 0, 0, 0, 0, 0, 1, 0, 0,}}, {{ 0, 0, 0, 0, 0, 1, 0, 1,}}, {{ 0, 0, 0, 0, 0, 1, 1, 0,}}, {{ 0, 0, 0, 0, 0, 1, 1, 1,}}, {{ 0, 0, 0, 0, 1, 0, 0, 0,}}, {{ 0, 0, 0, 0, 1, 0, 0, 1,}}, {{ 0, 0, 0, 0, 1, 0, 1, 0,}}, {{ 0, 0, 0, 0, 1, 0, 1, 1,}}, {{ 0, 0, 0, 0, 1, 1, 0, 0,}}, {{ 0, 0, 0, 0, 1, 1, 0, 1,}}, {{ 0, 0, 0, 0, 1, 1, 1, 0,}}, {{ 0, 0, 0, 0, 1, 1, 1, 1,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 16, 0, 0, 0, 0,}}, {{ 0, 0, 16, 0, 0, 0, 0, 0,}}, {{ 0, 0, 16, 16, 0, 0, 0, 0,}}, {{ 0, 16, 0, 0, 0, 0, 0, 0,}}, {{ 0, 16, 0, 16, 0, 0, 0, 0,}}, {{ 0, 16, 16, 0, 0, 0, 0, 0,}}, {{ 0, 16, 16, 16, 0, 0, 0, 0,}}, {{ 16, 0, 0, 0, 0, 0, 0, 0,}}, {{ 16, 0, 0, 16, 0, 0, 0, 0,}}, {{ 16, 0, 16, 0, 0, 0, 0, 0,}}, {{ 16, 0, 16, 16, 0, 0, 0, 0,}}, {{ 16, 16, 0, 0, 0, 0, 0, 0,}}, {{ 16, 16, 0, 16, 0, 0, 0, 0,}}, {{ 16, 16, 16, 0, 0, 0, 0, 0,}}, {{ 16, 16, 16, 16, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 1, 0, 0, 0, 0,}}, {{ 0, 0, 1, 0, 0, 0, 0, 0,}}, {{ 0, 0, 1, 1, 0, 0, 0, 0,}}, {{ 0, 1, 0, 0, 0, 0, 0, 0,}}, {{ 0, 1, 0, 1, 0, 0, 0, 0,}}, {{ 0, 1, 1, 0, 0, 0, 0, 0,}}, {{ 0, 1, 1, 1, 0, 0, 0, 0,}}, {{ 1, 0, 0, 0, 0, 0, 0, 0,}}, {{ 1, 0, 0, 1, 0, 0, 0, 0,}}, {{ 1, 0, 1, 0, 0, 0, 0, 0,}}, {{ 1, 0, 1, 1, 0, 0, 0, 0,}}, {{ 1, 1, 0, 0, 0, 0, 0, 0,}}, {{ 1, 1, 0, 1, 0, 0, 0, 0,}}, {{ 1, 1, 1, 0, 0, 0, 0, 0,}}, {{ 1, 1, 1, 1, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0,128,}}, {{ 0, 0, 0, 0, 0, 0,128, 0,}}, {{ 0, 0, 0, 0, 0, 0,128,128,}}, {{ 0, 0, 0, 0, 0,128, 0, 0,}}, {{ 0, 0, 0, 0, 0,128, 0,128,}}, {{ 0, 0, 0, 0, 0,128,128, 0,}}, {{ 0, 0, 0, 0, 0,128,128,128,}}, {{ 0, 0, 0, 0,128, 0, 0, 0,}}, {{ 0, 0, 0, 0,128, 0, 0,128,}}, {{ 0, 0, 0, 0,128, 0,128, 0,}}, {{ 0, 0, 0, 0,128, 0,128,128,}}, {{ 0, 0, 0, 0,128,128, 0, 0,}}, {{ 0, 0, 0, 0,128,128, 0,128,}}, {{ 0, 0, 0, 0,128,128,128, 0,}}, {{ 0, 0, 0, 0,128,128,128,128,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 8,}}, {{ 0, 0, 0, 0, 0, 0, 8, 0,}}, {{ 0, 0, 0, 0, 0, 0, 8, 8,}}, {{ 0, 0, 0, 0, 0, 8, 0, 0,}}, {{ 0, 0, 0, 0, 0, 8, 0, 8,}}, {{ 0, 0, 0, 0, 0, 8, 8, 0,}}, {{ 0, 0, 0, 0, 0, 8, 8, 8,}}, {{ 0, 0, 0, 0, 8, 0, 0, 0,}}, {{ 0, 0, 0, 0, 8, 0, 0, 8,}}, {{ 0, 0, 0, 0, 8, 0, 8, 0,}}, {{ 0, 0, 0, 0, 8, 0, 8, 8,}}, {{ 0, 0, 0, 0, 8, 8, 0, 0,}}, {{ 0, 0, 0, 0, 8, 8, 0, 8,}}, {{ 0, 0, 0, 0, 8, 8, 8, 0,}}, {{ 0, 0, 0, 0, 8, 8, 8, 8,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0,128, 0, 0, 0, 0,}}, {{ 0, 0,128, 0, 0, 0, 0, 0,}}, {{ 0, 0,128,128, 0, 0, 0, 0,}}, {{ 0,128, 0, 0, 0, 0, 0, 0,}}, {{ 0,128, 0,128, 0, 0, 0, 0,}}, {{ 0,128,128, 0, 0, 0, 0, 0,}}, {{ 0,128,128,128, 0, 0, 0, 0,}}, {{128, 0, 0, 0, 0, 0, 0, 0,}}, {{128, 0, 0,128, 0, 0, 0, 0,}}, {{128, 0,128, 0, 0, 0, 0, 0,}}, {{128, 0,128,128, 0, 0, 0, 0,}}, {{128,128, 0, 0, 0, 0, 0, 0,}}, {{128,128, 0,128, 0, 0, 0, 0,}}, {{128,128,128, 0, 0, 0, 0, 0,}}, {{128,128,128,128, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 8, 0, 0, 0, 0,}}, {{ 0, 0, 8, 0, 0, 0, 0, 0,}}, {{ 0, 0, 8, 8, 0, 0, 0, 0,}}, {{ 0, 8, 0, 0, 0, 0, 0, 0,}}, {{ 0, 8, 0, 8, 0, 0, 0, 0,}}, {{ 0, 8, 8, 0, 0, 0, 0, 0,}}, {{ 0, 8, 8, 8, 0, 0, 0, 0,}}, {{ 8, 0, 0, 0, 0, 0, 0, 0,}}, {{ 8, 0, 0, 8, 0, 0, 0, 0,}}, {{ 8, 0, 8, 0, 0, 0, 0, 0,}}, {{ 8, 0, 8, 8, 0, 0, 0, 0,}}, {{ 8, 8, 0, 0, 0, 0, 0, 0,}}, {{ 8, 8, 0, 8, 0, 0, 0, 0,}}, {{ 8, 8, 8, 0, 0, 0, 0, 0,}}, {{ 8, 8, 8, 8, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 32,}}, {{ 0, 0, 0, 0, 0, 0, 32, 0,}}, {{ 0, 0, 0, 0, 0, 0, 32, 32,}}, {{ 0, 0, 0, 0, 0, 32, 0, 0,}}, {{ 0, 0, 0, 0, 0, 32, 0, 32,}}, {{ 0, 0, 0, 0, 0, 32, 32, 0,}}, {{ 0, 0, 0, 0, 0, 32, 32, 32,}}, {{ 0, 0, 0, 0, 32, 0, 0, 0,}}, {{ 0, 0, 0, 0, 32, 0, 0, 32,}}, {{ 0, 0, 0, 0, 32, 0, 32, 0,}}, {{ 0, 0, 0, 0, 32, 0, 32, 32,}}, {{ 0, 0, 0, 0, 32, 32, 0, 0,}}, {{ 0, 0, 0, 0, 32, 32, 0, 32,}}, {{ 0, 0, 0, 0, 32, 32, 32, 0,}}, {{ 0, 0, 0, 0, 32, 32, 32, 32,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 0, 0, 0, 0, 2,}}, {{ 0, 0, 0, 0, 0, 0, 2, 0,}}, {{ 0, 0, 0, 0, 0, 0, 2, 2,}}, {{ 0, 0, 0, 0, 0, 2, 0, 0,}}, {{ 0, 0, 0, 0, 0, 2, 0, 2,}}, {{ 0, 0, 0, 0, 0, 2, 2, 0,}}, {{ 0, 0, 0, 0, 0, 2, 2, 2,}}, {{ 0, 0, 0, 0, 2, 0, 0, 0,}}, {{ 0, 0, 0, 0, 2, 0, 0, 2,}}, {{ 0, 0, 0, 0, 2, 0, 2, 0,}}, {{ 0, 0, 0, 0, 2, 0, 2, 2,}}, {{ 0, 0, 0, 0, 2, 2, 0, 0,}}, {{ 0, 0, 0, 0, 2, 2, 0, 2,}}, {{ 0, 0, 0, 0, 2, 2, 2, 0,}}, {{ 0, 0, 0, 0, 2, 2, 2, 2,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 32, 0, 0, 0, 0,}}, {{ 0, 0, 32, 0, 0, 0, 0, 0,}}, {{ 0, 0, 32, 32, 0, 0, 0, 0,}}, {{ 0, 32, 0, 0, 0, 0, 0, 0,}}, {{ 0, 32, 0, 32, 0, 0, 0, 0,}}, {{ 0, 32, 32, 0, 0, 0, 0, 0,}}, {{ 0, 32, 32, 32, 0, 0, 0, 0,}}, {{ 32, 0, 0, 0, 0, 0, 0, 0,}}, {{ 32, 0, 0, 32, 0, 0, 0, 0,}}, {{ 32, 0, 32, 0, 0, 0, 0, 0,}}, {{ 32, 0, 32, 32, 0, 0, 0, 0,}}, {{ 32, 32, 0, 0, 0, 0, 0, 0,}}, {{ 32, 32, 0, 32, 0, 0, 0, 0,}}, {{ 32, 32, 32, 0, 0, 0, 0, 0,}}, {{ 32, 32, 32, 32, 0, 0, 0, 0,}}, }, { {{ 0, 0, 0, 0, 0, 0, 0, 0,}}, {{ 0, 0, 0, 2, 0, 0, 0, 0,}}, {{ 0, 0, 2, 0, 0, 0, 0, 0,}}, {{ 0, 0, 2, 2, 0, 0, 0, 0,}}, {{ 0, 2, 0, 0, 0, 0, 0, 0,}}, {{ 0, 2, 0, 2, 0, 0, 0, 0,}}, {{ 0, 2, 2, 0, 0, 0, 0, 0,}}, {{ 0, 2, 2, 2, 0, 0, 0, 0,}}, {{ 2, 0, 0, 0, 0, 0, 0, 0,}}, {{ 2, 0, 0, 2, 0, 0, 0, 0,}}, {{ 2, 0, 2, 0, 0, 0, 0, 0,}}, {{ 2, 0, 2, 2, 0, 0, 0, 0,}}, {{ 2, 2, 0, 0, 0, 0, 0, 0,}}, {{ 2, 2, 0, 2, 0, 0, 0, 0,}}, {{ 2, 2, 2, 0, 0, 0, 0, 0,}}, {{ 2, 2, 2, 2, 0, 0, 0, 0,}}, }, }; #define HAVE_DES_TABLES 1 #endif
c
github
https://github.com/ruby/ruby
missing/des_tables.c
#!/usr/bin/env python import sys import string import os import os.path import glob import commands gnu_warnings = { # ( warning_string, warno, src_excluded ) 3 : ( 'Unused variable', ['12_hide_mpi','65_psp','68_dmft'] ), 4 : ( 'Unused dummy argument', ['65_psp'] ), 5 : ( 'Nonstandard type declaration', ['interfaces','28_numeric_noabirule','01_macroavnew_ext','01_linalg_ext'] ), 6 : ( 'Same actual argument associated with INTENT', []), 7 : ( 'CHARACTER expression will be truncated in assignment', ["57_iopsp_parser",] ), 8 : ( 'Limit of 39 continuations exceeded', [] ), 9 : ( 'DOUBLE COMPLEX at (1) does not conform to the Fortran 95 standard', ['interfaces','01_linalg_ext'] ), 10 : ( 'at (1) defined but not used', [] ), 11 : ( 'Character length of actual argument shorter than of dummy argument', [] ), #12 : ( 'may be used uninitialized', [] ), FIXME Disabled cause it sigfaults 13 : ( 'Obsolescent', [] ), 14 : ( 'Type specified for intrinsic function', [] ), 15 : ( 'Nonconforming tab character', [] ), } def abinit_suite_generator(): def make_callable(wno): def test_func(abenv): try: return main(wno, home_dir=abenv.home_dir) except Exception: import sys raise sys.exc_info()[1] # Reraise current exception (py2.4 compliant) test_func.__doc__ = gnu_warnings[wno][0] return test_func warnos = gnu_warnings.keys() warnos.sort() for wno in warnos: yield {"test_func" : make_callable(wno)} def usage(): print "\n Usage: warningschk test_number \n " def main(warno, home_dir=""): from os.path import join as pj debug = 0 if not home_dir: cwd_dir = commands.getoutput('pwd') if os.path.isabs(sys.argv[0]): home_dir = os.path.normpath(os.path.join(os.path.dirname(sys.argv[0]), "../..")) inp_dir = os.path.join(home_dir, "tests/abirules/Input") else: inp_dir = os.path.join("..", "Input") home_dir = os.path.join(cwd_dir,"../../..") else: inp_dir = pj(home_dir, "tests", "abirules", "Input") #print "home_dir", home_dir warno = int(warno) Warning = gnu_warnings[warno][0] src_excluded = gnu_warnings[warno][1] # read variable from file : warnings and src_excluded #try: # test_number = pj(inp_dir, "warnings_"+str(warno)+".in") #except IndexError: # usage() # sys.exit(2) #except: # print "unknown error : ", sys.exc_info()[0] # raise #src_excluded = [] #Warning = "" #try: # f=open(test_number, 'r') # exec f # f.close() #except IOError: # print "%s: no such file" % test_number # sys.exit(4) #if Warning == "": # print "Pattern not defined..." # sys.exit(3) # header print "**********************************************************************" print "Warning pattern : '"+Warning+"'" print "**********************************************************************" #if len(src_excluded) > 0: print src_excluded makelog = pj(home_dir, "make.log") try: logfile = open(makelog) except IOError: raise words = [] Buffer = [] linec = 0 warning_count = 0 start = False for line in logfile: linec = linec + 1 #print 'linec : %d' % linec if linec > 5 : Buffer.pop(0) Buffer.append(line) if start == False : # Examine the make.log file, starting with the section where the directory 10_defs was treated. if line.find("Making all in 10_defs") == -1 : continue else: #print linec start = True if line.find(Warning) != -1 : if debug: print "[DEBUG] Buffer[0] : " + string.strip(Buffer[0]) # source.F90:line.pos: print "[DEBUG] Buffer[2] : " + string.strip(Buffer[2]) # instruction print "[DEBUG] Buffer[4] : " + string.strip(Buffer[4]) # Warning: msg if True: if debug: print "[DEBUG] len of Buffer[0] : " + str(len(string.strip(Buffer[0]))) if len(string.strip(Buffer[0])) != 0: source = Buffer[0].split(":")[0] if source.find('Included at'): source = source.split(" ")[-1] sourceline = Buffer[0].split(":")[1] try: sourceline = sourceline.split(".")[0] except IndexError: pass pattern = pj(home_dir, "src") + "/*/"+source #pattern = '../../../src/*/'+source #print pattern path = glob.glob(pattern) assert len(path) < 2 try: source_dir = path[0].split('/') if debug: print "[DEBUG] source_dir :" + source_dir[-2] if src_excluded.index(source_dir[-2]) : pass except IndexError: pass except ValueError: warning_count += 1 try: print source + ' : var = ' + Buffer[4].split("'")[1] +' ['+source_dir[-2]+']' except IndexError: print source + ' : line = ' + sourceline +' ['+source_dir[-2]+']' else: print " ***** Can't determine source but warning exists..." else: source = Buffer[4].split(":")[0] sourceline = Buffer[4].split(":")[1] pattern = pj(home_dir, "src") + "/*/"+source #pattern = '../../../src/*/'+source path = glob.glob(pattern) source_dir = path[0].split('/') if debug: print "[DEBUG] source_dir :" + source_dir[-2] try: if src_excluded.index(source_dir[-2]) : warning_count += 1 print string.strip(Buffer[4]) +' ['+source_dir[-2]+']' except ValueError: pass logfile.close() # footer print "**********************************************************************" print "Warning count = " + str(warning_count) print "**********************************************************************" return warning_count # --------------------------------------------------------------------------- if __name__ == "__main__": warno = sys.argv[1] try: home_dir = os.path.abspath(sys.argv[2]) except IndexError: home_dir = "" exit_status = main(warno, home_dir=home_dir) sys.exit(exit_status)
unknown
codeparrot/codeparrot-clean
# SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- $id: http://devicetree.org/schemas/mtd/allwinner,sun4i-a10-nand.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Allwinner A10 NAND Controller maintainers: - Chen-Yu Tsai <wens@csie.org> - Maxime Ripard <mripard@kernel.org> properties: compatible: enum: - allwinner,sun4i-a10-nand - allwinner,sun8i-a23-nand-controller - allwinner,sun50i-h616-nand-controller reg: maxItems: 1 interrupts: maxItems: 1 clocks: minItems: 2 items: - description: Bus Clock - description: Module Clock - description: ECC Clock - description: MBus Clock clock-names: minItems: 2 items: - const: ahb - const: mod - const: ecc - const: mbus resets: maxItems: 1 reset-names: const: ahb dmas: maxItems: 1 dma-names: const: rxtx patternProperties: "^nand@[a-f0-9]$": type: object $ref: raw-nand-chip.yaml properties: reg: minimum: 0 maximum: 7 nand-ecc-algo: const: bch nand-ecc-step-size: enum: [ 512, 1024 ] nand-ecc-strength: maximum: 80 allwinner,rb: description: Contains the native Ready/Busy IDs. $ref: /schemas/types.yaml#/definitions/uint32-array minItems: 1 maxItems: 2 items: minimum: 0 maximum: 1 unevaluatedProperties: false required: - compatible - reg - interrupts - clocks - clock-names unevaluatedProperties: false allOf: - $ref: nand-controller.yaml - if: properties: compatible: contains: enum: - allwinner,sun4i-a10-nand - allwinner,sun8i-a23-nand-controller then: properties: clocks: maxItems: 2 clock-names: maxItems: 2 - if: properties: compatible: contains: enum: - allwinner,sun50i-h616-nand-controller then: properties: clocks: minItems: 4 clock-names: minItems: 4 examples: - | #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/clock/sun6i-rtc.h> #include <dt-bindings/clock/sun8i-a23-a33-ccu.h> #include <dt-bindings/reset/sun8i-a23-a33-ccu.h> nand-controller@1c03000 { compatible = "allwinner,sun8i-a23-nand-controller"; reg = <0x01c03000 0x1000>; interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>; clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>; clock-names = "ahb", "mod"; resets = <&ccu RST_BUS_NAND>; reset-names = "ahb"; dmas = <&dma 5>; dma-names = "rxtx"; pinctrl-names = "default"; pinctrl-0 = <&nand_pins &nand_cs0_pin &nand_rb0_pin>; #address-cells = <1>; #size-cells = <0>; }; ...
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
#pragma once #include <typeindex> #include <memory> #include <c10/macros/Export.h> #include <c10/macros/Macros.h> #include <c10/util/Exception.h> namespace c10 { struct ClassType; using ClassTypePtr = std::shared_ptr<ClassType>; TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex); template <typename T> const c10::ClassTypePtr& getCustomClassType() { // Classes are never unregistered from getCustomClassTypeMap and the // hash lookup can be a hot path, so just cache. // For the same reason, it's fine If this ends up getting duplicated across // DSO boundaries for whatever reason. static c10::ClassTypePtr cache = getCustomClassTypeImpl( std::type_index(typeid(T))); return cache; } }
c
github
https://github.com/pytorch/pytorch
aten/src/ATen/core/custom_class.h
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("admin", "0002_logentry_remove_auto_add"), ] # No database changes; adds choices to action_flag. operations = [ migrations.AlterField( model_name="logentry", name="action_flag", field=models.PositiveSmallIntegerField( choices=[(1, "Addition"), (2, "Change"), (3, "Deletion")], verbose_name="action flag", ), ), ]
python
github
https://github.com/django/django
django/contrib/admin/migrations/0003_logentry_add_action_flag_choices.py
# -*- coding: utf-8 -*- # # Copyright 2003, 2004 Norwegian University of Science and Technology # # This file is part of Network Administration Visualized (NAV) # # NAV is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # NAV is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NAV; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """ Abstraction for the various config files used by servicemon and pinger. Implements the singleton pattern ensuring only one instance created. """ import os import sys import re try: # this module exists in a properly installed enviroment import nav.path CONFIGFILEPATH = [nav.path.sysconfdir] except ImportError: # fallback to current dir++ CONFIGFILEPATH = ['/usr/local/nav/local/etc/conf/', '.'] class Conf(dict): def __init__(self, *_args, **_kwargs): dict.__init__(self) self._configfile = None for path in CONFIGFILEPATH: afile = os.path.join(os.path.abspath(path), self._file) try: self._configfile = open(afile, "r") break except IOError: pass if self._configfile is None: sys.exit(0) self._regexp = re.compile(r"^([^#=]+)\s*=\s*([^#\n]+)", re.M) self.parsefile() self._configfile.close() def parsefile(self): for (key, value) in self._regexp.findall(self._configfile.read()): if self.validoptions: if key.strip() in self.validoptions: self[key.strip()] = value.strip() else: self[key.strip()] = value.strip() def dbconf(*args, **kwargs): if _dbconf._instance is None: _dbconf._instance = _dbconf(*args, **kwargs) return _dbconf._instance class _dbconf(Conf): _instance = None def __init__(self, *args, **kwargs): self._file = kwargs.get('configfile', 'db.conf') # Valid configoptions must be specified in this list self.validoptions = [] Conf.__init__(self, *args, **kwargs) class _serviceconf(Conf): _instance = None def __init__(self, *args, **kwargs): self._file = kwargs.get('configfile', 'servicemon.conf') self.validoptions = [] Conf.__init__(self, *args, **kwargs) def serviceconf(*args, **kwargs): if _serviceconf._instance is None: _serviceconf._instance = _serviceconf(*args, **kwargs) return _serviceconf._instance class _pingconf(Conf): _instance = None def __init__(self, *args, **kwargs): self._file = kwargs.get('configfile', 'pping.conf') self.validoptions = [] Conf.__init__(self, *args, **kwargs) def pingconf(*args, **kwargs): if _pingconf._instance is None: _pingconf._instance = _pingconf(*args, **kwargs) return _pingconf._instance
unknown
codeparrot/codeparrot-clean
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package command import ( "fmt" "strings" "github.com/hashicorp/cli" "github.com/posener/complete" ) var ( _ cli.Command = (*OperatorRotateCommand)(nil) _ cli.CommandAutocomplete = (*OperatorRotateCommand)(nil) ) type OperatorRotateCommand struct { *BaseCommand } func (c *OperatorRotateCommand) Synopsis() string { return "Rotates the underlying encryption key" } func (c *OperatorRotateCommand) Help() string { helpText := ` Usage: vault operator rotate [options] Rotates the underlying encryption key which is used to secure data written to the storage backend. This installs a new key in the key ring. This new key is used to encrypted new data, while older keys in the ring are used to decrypt older data. This is an online operation and does not cause downtime. This command is run per-cluster (not per-server), since Vault servers in HA mode share the same storage backend. Rotate Vault's encryption key: $ vault operator rotate For a full list of examples, please see the documentation. ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *OperatorRotateCommand) Flags() *FlagSets { return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) } func (c *OperatorRotateCommand) AutocompleteArgs() complete.Predictor { return nil } func (c *OperatorRotateCommand) AutocompleteFlags() complete.Flags { return c.Flags().Completions() } func (c *OperatorRotateCommand) Run(args []string) int { f := c.Flags() if err := f.Parse(args); err != nil { c.UI.Error(err.Error()) return 1 } args = f.Args() if len(args) > 0 { c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) return 1 } client, err := c.Client() if err != nil { c.UI.Error(err.Error()) return 2 } // Rotate the key err = client.Sys().Rotate() if err != nil { c.UI.Error(fmt.Sprintf("Error rotating key: %s", err)) return 2 } // Print the key status status, err := client.Sys().KeyStatus() if err != nil { c.UI.Error(fmt.Sprintf("Error reading key status: %s", err)) return 2 } switch Format(c.UI) { case "table": c.UI.Output("Success! Rotated key") c.UI.Output("") c.UI.Output(printKeyStatus(status)) return 0 default: return OutputData(c.UI, status) } }
go
github
https://github.com/hashicorp/vault
command/rotate.go
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: aci_static_binding_to_epg short_description: Bind static paths to EPGs (fv:RsPathAtt) description: - Bind static paths to EPGs on Cisco ACI fabrics. notes: - The C(tenant), C(ap), C(epg) used must exist before using this module in your playbook. The M(aci_tenant), M(aci_ap), M(aci_epg) modules can be used for this. - More information about the internal APIC classes B(fv:RsPathAtt) from L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/). author: - Bruno Calogero (@brunocalogero) version_added: '2.5' options: tenant: description: - Name of an existing tenant. aliases: [ tenant_name ] ap: description: - Name of an existing application network profile, that will contain the EPGs. aliases: [ app_profile, app_profile_name ] epg: description: - The name of the end point group. aliases: [ epg_name ] encap_id: description: - The encapsulation ID associating the C(epg) with the interface path. - This acts as the secondary C(encap_id) when using micro-segmentation. aliases: [ vlan, vlan_id ] choices: [ Valid encap IDs for specified encap, currently 1 to 4096 ] primary_encap_id: description: - Determines the primary encapsulation ID associating the C(epg) with the interface path when using micro-segmentation. aliases: [ primary_vlan, primary_vlan_id ] choices: [ Valid encap IDs for specified encap, currently 1 to 4096 ] deploy_immediacy: description: - The Deployement Immediacy of Static EPG on PC, VPC or Interface. - The APIC defaults to C(lazy) when unset during creation. choices: [ immediate, lazy ] interface_mode: description: - Determines how layer 2 tags will be read from and added to frames. - Values C(802.1p) and C(native) are identical. - Values C(access) and C(untagged) are identical. - Values C(regular), C(tagged) and C(trunk) are identical. - The APIC defaults to C(trunk) when unset during creation. choices: [ 802.1p, access, native, regular, tagged, trunk, untagged ] aliases: [ interface_mode_name, mode ] interface_type: description: - The type of interface for the static EPG deployement. choices: [ fex, port_channel, switch_port, vpc ] default: switch_port pod_id: description: - The pod number part of the tDn. - C(pod_id) is usually an integer below 10. aliases: [ pod, pod_number ] leafs: description: - The switch ID(s) that the C(interface) belongs to. - When C(interface_type) is C(switch_port), C(port_channel), or C(fex), then C(leafs) is a string of the leaf ID. - When C(interface_type) is C(vpc), then C(leafs) is a list with both leaf IDs. - The C(leafs) value is usually something like '101' or '101-102' depending on C(connection_type). aliases: [ leaves, nodes, paths, switches ] interface: description: - The C(interface) string value part of the tDn. - Usually a policy group like "test-IntPolGrp" or an interface of the following format "1/7" depending on C(interface_type). extpaths: description: - The C(extpaths) integer value part of the tDn. - C(extpaths) is only used if C(interface_type) is C(fex). - Usually something like '1011'. state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. choices: [ absent, present, query ] default: present extends_documentation_fragment: aci ''' EXAMPLES = r''' - name: Deploy Static Path binding for given EPG aci_static_binding_to_epg: host: apic username: admin password: SomeSecretPassword tenant: accessport-code-cert ap: accessport_code_app epg: accessport_epg1 encap_id: 222 deploy_immediacy: lazy interface_mode: untagged interface_type: switch_port pod_id: 1 leafs: 101 interface: '1/7' state: present - name: Remove Static Path binding for given EPG aci_static_binding_to_epg: host: apic username: admin password: SomeSecretPassword tenant: accessport-code-cert ap: accessport_code_app epg: accessport_epg1 interface_type: switch_port pod: 1 leafs: 101 interface: '1/7' state: absent - name: Get specific Static Path binding for given EPG aci_static_binding_to_epg: host: apic username: admin password: SomeSecretPassword tenant: accessport-code-cert ap: accessport_code_app epg: accessport_epg1 interface_type: switch_port pod: 1 leafs: 101 interface: '1/7' state: query ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: string sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: string sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: string sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: string sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: string sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule # TODO: change 'deploy_immediacy' to 'resolution_immediacy' (as seen in aci_epg_to_domain)? def main(): argument_spec = aci_argument_spec() argument_spec.update( tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects epg=dict(type='str', aliases=['epg_name']), # Not required for querying all objects encap_id=dict(type='int', aliases=['vlan', 'vlan_id']), primary_encap_id=dict(type='int', aliases=['primary_vlan', 'primary_vlan_id']), deploy_immediacy=dict(type='str', choices=['immediate', 'lazy']), interface_mode=dict(type='str', choices=['802.1p', 'access', 'native', 'regular', 'tagged', 'trunk', 'untagged'], aliases=['interface_mode_name', 'mode']), interface_type=dict(type='str', default='switch_port', choices=['fex', 'port_channel', 'switch_port', 'vpc']), pod_id=dict(type='int', aliases=['pod', 'pod_number']), # Not required for querying all objects leafs=dict(type='list', aliases=['leaves', 'nodes', 'paths', 'switches']), interface=dict(type='str'), extpaths=dict(type='int'), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['interface_type', 'fex', ['extpaths']], ['state', 'absent', ['ap', 'epg', 'interface', 'leafs', 'pod_id', 'tenant']], ['state', 'present', ['ap', 'encap_id', 'epg', 'interface', 'leafs', 'pod_id', 'tenant']], ], ) tenant = module.params['tenant'] ap = module.params['ap'] epg = module.params['epg'] encap_id = module.params['encap_id'] primary_encap_id = module.params['primary_encap_id'] deploy_immediacy = module.params['deploy_immediacy'] interface_mode = module.params['interface_mode'] interface_type = module.params['interface_type'] pod_id = module.params['pod_id'] # Users are likely to use integers for leaf IDs, which would raise an exception when using the join method leafs = [str(leaf) for leaf in module.params['leafs']] if leafs is not None: if len(leafs) == 1: if interface_type != 'vpc': leafs = leafs[0] else: module.fail_json(msg='A interface_type of "vpc" requires 2 leafs') elif len(leafs) == 2: if interface_type == 'vpc': leafs = "-".join(leafs) else: module.fail_json(msg='The interface_types "switch_port", "port_channel", and "fex" \ do not support using multiple leafs for a single binding') else: module.fail_json(msg='The "leafs" parameter must not have more than 2 entries') interface = module.params['interface'] extpaths = module.params['extpaths'] state = module.params['state'] static_path = '' if encap_id is not None: if encap_id in range(1, 4097): encap_id = 'vlan-{0}'.format(encap_id) else: module.fail_json(msg='Valid VLAN assigments are from 1 to 4096') if primary_encap_id is not None: if primary_encap_id in range(1, 4097): primary_encap_id = 'vlan-{0}'.format(primary_encap_id) else: module.fail_json(msg='Valid VLAN assigments are from 1 to 4096') INTERFACE_MODE_MAPPING = { '802.1p': 'native', 'access': 'untagged', 'native': 'native', 'regular': 'regular', 'tagged': 'regular', 'trunk': 'regular', 'untagged': 'untagged', } INTERFACE_TYPE_MAPPING = dict( fex='topology/pod-{0}/paths-{1}/extpaths-{2}/pathep-[eth{3}]'.format(pod_id, leafs, extpaths, interface), port_channel='topology/pod-{0}/paths-{1}/pathep-[eth{2}]'.format(pod_id, leafs, interface), switch_port='topology/pod-{0}/paths-{1}/pathep-[eth{2}]'.format(pod_id, leafs, interface), vpc='topology/pod-{0}/protpaths-{1}/pathep-[{2}]'.format(pod_id, leafs, interface), ) static_path = INTERFACE_TYPE_MAPPING[interface_type] if interface_mode is not None: interface_mode = INTERFACE_MODE_MAPPING[interface_mode] aci = ACIModule(module) aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), filter_target='eq(fvTenant.name, "{0}")'.format(tenant), module_object=tenant, ), subclass_1=dict( aci_class='fvAp', aci_rn='ap-{0}'.format(ap), filter_target='eq(fvAp.name, "{0}")'.format(ap), module_object=ap, ), subclass_2=dict( aci_class='fvAEPg', aci_rn='epg-{0}'.format(epg), filter_target='eq(fvAEPg.name, "{0}")'.format(epg), module_object=epg, ), subclass_3=dict( aci_class='fvRsPathAtt', aci_rn='rspathAtt-[{0}]'.format(static_path), filter_target='eq(fvRsPathAtt.tDn, "{0}"'.format(static_path), module_object=static_path, ), ) aci.get_existing() if state == 'present': aci.payload( aci_class='fvRsPathAtt', class_config=dict( encap=encap_id, primaryEncap=primary_encap_id, instrImedcy=deploy_immediacy, mode=interface_mode, tDn=static_path, ), ) aci.get_diff(aci_class='fvRsPathAtt') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
from django.template import TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup class NamedEndblockTests(SimpleTestCase): @setup({'namedendblocks01': '1{% block first %}_{% block second %}' '2{% endblock second %}_{% endblock first %}3'}) def test_namedendblocks01(self): output = self.engine.render_to_string('namedendblocks01') self.assertEqual(output, '1_2_3') # Unbalanced blocks @setup({'namedendblocks02': '1{% block first %}_{% block second %}' '2{% endblock first %}_{% endblock second %}3'}) def test_namedendblocks02(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('namedendblocks02') @setup({'namedendblocks03': '1{% block first %}_{% block second %}' '2{% endblock %}_{% endblock second %}3'}) def test_namedendblocks03(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('namedendblocks03') @setup({'namedendblocks04': '1{% block first %}_{% block second %}' '2{% endblock second %}_{% endblock third %}3'}) def test_namedendblocks04(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('namedendblocks04') @setup({'namedendblocks05': '1{% block first %}_{% block second %}2{% endblock first %}'}) def test_namedendblocks05(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('namedendblocks05') # Mixed named and unnamed endblocks @setup({'namedendblocks06': '1{% block first %}_{% block second %}' '2{% endblock %}_{% endblock first %}3'}) def test_namedendblocks06(self): """ Mixed named and unnamed endblocks """ output = self.engine.render_to_string('namedendblocks06') self.assertEqual(output, '1_2_3') @setup({'namedendblocks07': '1{% block first %}_{% block second %}' '2{% endblock second %}_{% endblock %}3'}) def test_namedendblocks07(self): output = self.engine.render_to_string('namedendblocks07') self.assertEqual(output, '1_2_3')
unknown
codeparrot/codeparrot-clean
function Component() { const renderItem = item => { // Normally we assume that it's safe to mutate globals in a function passed // as a prop, because the prop could be used as an event handler or effect. // But if the function returns JSX we can assume it's a render helper, ie // called during render, and thus it's unsafe to mutate globals or call // other impure code. global.property = true; return <Item item={item} value={rand} />; }; return <ItemList renderItem={renderItem} />; }
javascript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.invalid-mutate-global-in-render-helper-prop.js
""" Various exceptions that are specific to the SES module. """ from boto.exception import BotoServerError class SESError(BotoServerError): """ Sub-class all SES-related errors from here. Don't raise this error directly from anywhere. The only thing this gets us is the ability to catch SESErrors separately from the more generic, top-level BotoServerError exception. """ pass class SESAddressNotVerifiedError(SESError): """ Raised when a "Reply-To" address has not been validated in SES yet. """ pass class SESIdentityNotVerifiedError(SESError): """ Raised when an identity (domain or address) has not been verified in SES yet. """ pass class SESDomainNotConfirmedError(SESError): """ """ pass class SESAddressBlacklistedError(SESError): """ After you attempt to send mail to an address, and delivery repeatedly fails, said address is blacklisted for at least 24 hours. The blacklisting eventually expires, and you are able to attempt delivery again. If you attempt to send mail to a blacklisted email, this is raised. """ pass class SESDailyQuotaExceededError(SESError): """ Your account's daily (rolling 24 hour total) allotment of outbound emails has been exceeded. """ pass class SESMaxSendingRateExceededError(SESError): """ Your account's requests/second limit has been exceeded. """ pass class SESDomainEndsWithDotError(SESError): """ Recipient's email address' domain ends with a period/dot. """ pass class SESLocalAddressCharacterError(SESError): """ An address contained a control or whitespace character. """ pass class SESIllegalAddressError(SESError): """ Raised when an illegal address is encountered. """ pass
unknown
codeparrot/codeparrot-clean
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket import urllib2 import mock from oslo.config import cfg import testtools import webob import webob.exc from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as exception from neutron.tests import base from neutron import wsgi CONF = cfg.CONF TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'var')) class TestWSGIServer(base.BaseTestCase): """WSGI server tests.""" def test_start_random_port(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="127.0.0.1") self.assertNotEqual(0, server.port) server.stop() server.wait() @mock.patch('neutron.wsgi.ProcessLauncher') def test_start_multiple_workers(self, ProcessLauncher): launcher = ProcessLauncher.return_value server = wsgi.Server("test_multiple_processes") server.start(None, 0, host="127.0.0.1", workers=2) launcher.running = True launcher.launch_service.assert_called_once_with(server._server, workers=2) server.stop() self.assertFalse(launcher.running) server.wait() launcher.wait.assert_called_once_with() def test_start_random_port_with_ipv6(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="::1") self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_ipv6_listen_called_with_scope(self): server = wsgi.Server("test_app") with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen: with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr: mock_get_addr.return_value = [ (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2)) ] with mock.patch.object(server, 'pool') as mock_pool: server.start(None, 1234, host="fe80::204:acff:fe96:da87%eth0") mock_get_addr.assert_called_once_with( "fe80::204:acff:fe96:da87%eth0", 1234, socket.AF_UNSPEC, socket.SOCK_STREAM ) mock_listen.assert_called_once_with( ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2), family=socket.AF_INET6, backlog=cfg.CONF.backlog ) mock_pool.spawn.assert_has_calls([ mock.call( server._run, None, mock_listen.return_value) ]) def test_app(self): greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app") server.start(hello_world, 0, host="127.0.0.1") response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() class SerializerTest(base.BaseTestCase): def test_serialize_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" input_dict = {'servers': {'test': 'pass'}} content_type = 'application/unknown' serializer = wsgi.Serializer() self.assertRaises( exception.InvalidContentType, serializer.serialize, input_dict, content_type) def test_get_deserialize_handler_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" content_type = 'application/unknown' serializer = wsgi.Serializer() self.assertRaises( exception.InvalidContentType, serializer.get_deserialize_handler, content_type) def test_serialize_content_type_json(self): """Test serialize with content type json.""" input_data = {'servers': ['test=pass']} content_type = 'application/json' serializer = wsgi.Serializer(default_xmlns="fake") result = serializer.serialize(input_data, content_type) self.assertEqual('{"servers": ["test=pass"]}', result) def test_serialize_content_type_xml(self): """Test serialize with content type xml.""" input_data = {'servers': ['test=pass']} content_type = 'application/xml' serializer = wsgi.Serializer(default_xmlns="fake") result = serializer.serialize(input_data, content_type) expected = ( '<?xml version=\'1.0\'' ' encoding=\'UTF-8\'?>\n' '<servers xmlns="http://openstack.org/quantum/api/v2.0" ' 'xmlns:quantum="http://openstack.org/quantum/api/v2.0" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<server>test=pass</server></servers>' ) self.assertEqual(expected, result) def test_deserialize_raise_bad_request(self): """Test serialize verifies that exception is raises.""" content_type = 'application/unknown' data_string = 'test' serializer = wsgi.Serializer(default_xmlns="fake") self.assertRaises( webob.exc.HTTPBadRequest, serializer.deserialize, data_string, content_type) def test_deserialize_json_content_type(self): """Test Serializer.deserialize with content type json.""" content_type = 'application/json' data_string = '{"servers": ["test=pass"]}' serializer = wsgi.Serializer(default_xmlns="fake") result = serializer.deserialize(data_string, content_type) self.assertEqual({'body': {u'servers': [u'test=pass']}}, result) def test_deserialize_xml_content_type(self): """Test deserialize with content type xml.""" content_type = 'application/xml' data_string = ( '<servers xmlns="fake">' '<server>test=pass</server>' '</servers>' ) serializer = wsgi.Serializer( default_xmlns="fake", metadata={'xmlns': 'fake'}) result = serializer.deserialize(data_string, content_type) expected = {'body': {'servers': {'server': 'test=pass'}}} self.assertEqual(expected, result) def test_deserialize_xml_content_type_with_meta(self): """Test deserialize with content type xml with meta.""" content_type = 'application/xml' data_string = ( '<servers>' '<server name="s1">' '<test test="a">passed</test>' '</server>' '</servers>' ) metadata = {'plurals': {'servers': 'server'}, 'xmlns': 'fake'} serializer = wsgi.Serializer( default_xmlns="fake", metadata=metadata) result = serializer.deserialize(data_string, content_type) expected = {'body': {'servers': [{'name': 's1', 'test': 'passed'}]}} self.assertEqual(expected, result) def test_serialize_xml_root_key_is_dict(self): """Test Serializer.serialize with content type xml with meta dict.""" content_type = 'application/xml' data = {'servers': {'network': (2, 3)}} metadata = {'xmlns': 'fake'} serializer = wsgi.Serializer(default_xmlns="fake", metadata=metadata) result = serializer.serialize(data, content_type) result = result.replace('\n', '') expected = ( '<?xml version=\'1.0\' encoding=\'UTF-8\'?>' '<servers xmlns="fake" xmlns:quantum="fake" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<network>(2, 3)</network></servers>' ) self.assertEqual(result, expected) def test_serialize_xml_root_key_is_list(self): """Test serialize with content type xml with meta list.""" input_dict = {'servers': ['test=pass']} content_type = 'application/xml' metadata = {'application/xml': { 'xmlns': 'fake'}} serializer = wsgi.Serializer(default_xmlns="fake", metadata=metadata) result = serializer.serialize(input_dict, content_type) result = result.replace('\n', '').replace(' ', '') expected = ( '<?xmlversion=\'1.0\'' 'encoding=\'UTF-8\'?>' '<serversxmlns="http://openstack.org/quantum/api/v2.0"' 'xmlns:quantum="http://openstack.org/quantum/api/v2.0"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<server>test=pass</server></servers>' ) self.assertEqual(result, expected) def test_serialize_xml_root_is_None(self): input_dict = {'test': 'pass'} content_type = 'application/xml' serializer = wsgi.Serializer(default_xmlns="fake") result = serializer.serialize(input_dict, content_type) result = result.replace('\n', '').replace(' ', '') expected = ( '<?xmlversion=\'1.0\'' 'encoding=\'UTF-8\'?>' '<testxmlns="http://openstack.org/quantum/api/v2.0"' 'xmlns:quantum="http://openstack.org/quantum/api/v2.0"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' 'pass</test>' ) self.assertEqual(result, expected) class RequestDeserializerTest(testtools.TestCase): def setUp(self): super(RequestDeserializerTest, self).setUp() class JSONDeserializer(object): def deserialize(self, data, action='default'): return 'pew_json' class XMLDeserializer(object): def deserialize(self, data, action='default'): return 'pew_xml' self.body_deserializers = { 'application/json': JSONDeserializer(), 'application/xml': XMLDeserializer()} self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) def test_get_deserializer(self): """Test RequestDeserializer.get_body_deserializer.""" expected_json_serializer = self.deserializer.get_body_deserializer( 'application/json') expected_xml_serializer = self.deserializer.get_body_deserializer( 'application/xml') self.assertEqual( expected_json_serializer, self.body_deserializers['application/json']) self.assertEqual( expected_xml_serializer, self.body_deserializers['application/xml']) def test_get_expected_content_type(self): """Test RequestDeserializer.get_expected_content_type.""" request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' self.assertEqual( self.deserializer.get_expected_content_type(request), 'application/json') def test_get_action_args(self): """Test RequestDeserializer.get_action_args.""" env = { 'wsgiorg.routing_args': [None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12}]} expected = {'action': 'update', 'id': 12} self.assertEqual( self.deserializer.get_action_args(env), expected) def test_deserialize(self): """Test RequestDeserializer.deserialize.""" with mock.patch.object( self.deserializer, 'get_action_args') as mock_method: mock_method.return_value = {'action': 'create'} request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/xml' deserialized = self.deserializer.deserialize(request) expected = ('create', {}, 'application/xml') self.assertEqual(expected, deserialized) def test_get_body_deserializer_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" content_type = 'application/unknown' deserializer = wsgi.RequestDeserializer() self.assertRaises( exception.InvalidContentType, deserializer.get_body_deserializer, content_type) class ResponseSerializerTest(testtools.TestCase): def setUp(self): super(ResponseSerializerTest, self).setUp() class JSONSerializer(object): def serialize(self, data, action='default'): return 'pew_json' class XMLSerializer(object): def serialize(self, data, action='default'): return 'pew_xml' class HeadersSerializer(object): def serialize(self, response, data, action): response.status_int = 404 self.body_serializers = { 'application/json': JSONSerializer(), 'application/xml': XMLSerializer()} self.serializer = wsgi.ResponseSerializer( self.body_serializers, HeadersSerializer()) def test_serialize_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" self.assertRaises( exception.InvalidContentType, self.serializer.serialize, {}, 'application/unknown') def test_get_body_serializer(self): """Verify that exception InvalidContentType is raised.""" self.assertRaises( exception.InvalidContentType, self.serializer.get_body_serializer, 'application/unknown') def test_get_serializer(self): """Test ResponseSerializer.get_body_serializer.""" content_type = 'application/json' self.assertEqual( self.serializer.get_body_serializer(content_type), self.body_serializers[content_type]) def test_serialize_json_response(self): response = self.serializer.serialize({}, 'application/json') self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, 'pew_json') self.assertEqual(response.status_int, 404) def test_serialize_xml_response(self): response = self.serializer.serialize({}, 'application/xml') self.assertEqual(response.headers['Content-Type'], 'application/xml') self.assertEqual(response.body, 'pew_xml') self.assertEqual(response.status_int, 404) def test_serialize_response_None(self): response = self.serializer.serialize( None, 'application/json') self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, '') self.assertEqual(response.status_int, 404) class RequestTest(base.BaseTestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = "<body />" self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = "fake<br />" self.assertIsNone(request.get_content_type()) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual(result, "application/json") def test_content_type_with_given_content_types(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/new-type;" self.assertIsNone(request.get_content_type()) def test_content_type_from_accept(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" result = request.best_match_content_type() self.assertEqual(result, "application/xml") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual(result, "application/xml") def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') result = request.best_match_content_type() self.assertEqual(result, "application/xml") request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual(result, "application/json") def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual(result, "application/xml") def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual(result, "application/json") def test_content_type_accept_with_given_content_types(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/new_type" result = request.best_match_content_type() self.assertEqual(result, 'application/json') class ActionDispatcherTest(base.BaseTestCase): def test_dispatch(self): """Test ActionDispatcher.dispatch.""" serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x self.assertEqual( serializer.dispatch('pants', action='create'), 'pants') def test_dispatch_action_None(self): """Test ActionDispatcher.dispatch with none action.""" serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x + ' pants' serializer.default = lambda x: x + ' trousers' self.assertEqual( serializer.dispatch('Two', action=None), 'Two trousers') def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x + ' pants' serializer.default = lambda x: x + ' trousers' self.assertEqual( serializer.dispatch('Two', action='update'), 'Two trousers') class ResponseHeadersSerializerTest(base.BaseTestCase): def test_default(self): serializer = wsgi.ResponseHeaderSerializer() response = webob.Response() serializer.serialize(response, {'v': '123'}, 'fake') self.assertEqual(response.status_int, 200) def test_custom(self): class Serializer(wsgi.ResponseHeaderSerializer): def update(self, response, data): response.status_int = 404 response.headers['X-Custom-Header'] = data['v'] serializer = Serializer() response = webob.Response() serializer.serialize(response, {'v': '123'}, 'update') self.assertEqual(response.status_int, 404) self.assertEqual(response.headers['X-Custom-Header'], '123') class DictSerializerTest(base.BaseTestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual( serializer.serialize({}, 'NonExistentAction'), '') class JSONDictSerializerTest(base.BaseTestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_json) def test_json_with_utf8(self): input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c'))) expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_json) def test_json_with_unicode(self): input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc'))) expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_json) class TextDeserializerTest(base.BaseTestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual( deserializer.deserialize({}, 'update'), {}) class JSONDeserializerTest(base.BaseTestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1'}}} deserializer = wsgi.JSONDeserializer() self.assertEqual( deserializer.deserialize(data), as_dict) def test_default_raise_Malformed_Exception(self): """Test JsonDeserializer.default. Test verifies JsonDeserializer.default raises exception MalformedRequestBody correctly. """ data_string = "" deserializer = wsgi.JSONDeserializer() self.assertRaises( exception.MalformedRequestBody, deserializer.default, data_string) def test_json_with_utf8(self): data = '{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.JSONDeserializer() self.assertEqual( deserializer.deserialize(data), as_dict) def test_json_with_unicode(self): data = '{"a": "\u7f51\u7edc"}' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.JSONDeserializer() self.assertEqual( deserializer.deserialize(data), as_dict) class XMLDeserializerTest(base.BaseTestCase): def test_xml_empty(self): xml = '<a></a>' as_dict = {'body': {'a': ''}} deserializer = wsgi.XMLDeserializer() self.assertEqual( deserializer.deserialize(xml), as_dict) def test_initialization(self): xml = '<a><b>test</b></a>' deserializer = wsgi.XMLDeserializer() self.assertEqual( {'body': {u'a': {u'b': u'test'}}}, deserializer(xml)) def test_default_raise_Malformed_Exception(self): """Verify that exception MalformedRequestBody is raised.""" data_string = "" deserializer = wsgi.XMLDeserializer() self.assertRaises( exception.MalformedRequestBody, deserializer.default, data_string) def test_xml_with_utf8(self): xml = '<a>\xe7\xbd\x91\xe7\xbb\x9c</a>' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.XMLDeserializer() self.assertEqual( deserializer.deserialize(xml), as_dict) class RequestHeadersDeserializerTest(base.BaseTestCase): def test_default(self): deserializer = wsgi.RequestHeadersDeserializer() req = wsgi.Request.blank('/') self.assertEqual( deserializer.deserialize(req, 'nonExistant'), {}) def test_custom(self): class Deserializer(wsgi.RequestHeadersDeserializer): def update(self, request): return {'a': request.headers['X-Custom-Header']} deserializer = Deserializer() req = wsgi.Request.blank('/') req.headers['X-Custom-Header'] = 'b' self.assertEqual( deserializer.deserialize(req, 'update'), {'a': 'b'}) class ResourceTest(base.BaseTestCase): def test_dispatch(self): class Controller(object): def index(self, request, index=None): return index def my_fault_body_function(): return 'off' resource = wsgi.Resource(Controller(), my_fault_body_function) actual = resource.dispatch( resource.controller, 'index', action_args={'index': 'off'}) expected = 'off' self.assertEqual(actual, expected) def test_dispatch_unknown_controller_action(self): class Controller(object): def index(self, request, pants=None): return pants def my_fault_body_function(): return 'off' resource = wsgi.Resource(Controller(), my_fault_body_function) self.assertRaises( AttributeError, resource.dispatch, resource.controller, 'create', {}) def test_malformed_request_body_throws_bad_request(self): def my_fault_body_function(): return 'off' resource = wsgi.Resource(None, my_fault_body_function) request = wsgi.Request.blank( "/", body="{mal:formed", method='POST', headers={'Content-Type': "application/json"}) response = resource(request) self.assertEqual(response.status_int, 400) def test_wrong_content_type_throws_unsupported_media_type_error(self): def my_fault_body_function(): return 'off' resource = wsgi.Resource(None, my_fault_body_function) request = wsgi.Request.blank( "/", body="{some:json}", method='POST', headers={'Content-Type': "xxx"}) response = resource(request) self.assertEqual(response.status_int, 400) def test_wrong_content_type_server_error(self): def my_fault_body_function(): return 'off' resource = wsgi.Resource(None, my_fault_body_function) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "unknow"}) response = resource(request) self.assertEqual(response.status_int, 500) def test_call_resource_class_bad_request(self): class Controller(object): def index(self, request, index=None): return index def my_fault_body_function(): return 'off' class FakeRequest(): def __init__(self): self.url = 'http://where.no' self.environ = 'environ' self.body = 'body' def method(self): pass def best_match_content_type(self): return 'best_match_content_type' resource = wsgi.Resource(Controller(), my_fault_body_function) request = FakeRequest() result = resource(request) self.assertEqual(400, result.status_int) def test_type_error(self): class Controller(object): def index(self, request, index=None): return index def my_fault_body_function(): return 'off' resource = wsgi.Resource(Controller(), my_fault_body_function) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "xml"}) response = resource.dispatch( request, action='index', action_args='test') self.assertEqual(400, response.status_int) def test_call_resource_class_internal_error(self): class Controller(object): def index(self, request, index=None): return index def my_fault_body_function(): return 'off' class FakeRequest(): def __init__(self): self.url = 'http://where.no' self.environ = 'environ' self.body = '{"Content-Type": "xml"}' def method(self): pass def best_match_content_type(self): return 'application/json' resource = wsgi.Resource(Controller(), my_fault_body_function) request = FakeRequest() result = resource(request) self.assertEqual(500, result.status_int) class ServerTest(base.BaseTestCase): def test_run_server(self): with mock.patch('eventlet.listen') as listen: with mock.patch('eventlet.wsgi.server') as server: wsgi.run_server(mock.sentinel.application, mock.sentinel.port) server.assert_called_once_with( listen.return_value, mock.sentinel.application ) listen.assert_called_once_with(('0.0.0.0', mock.sentinel.port)) class MiddlewareTest(base.BaseTestCase): def test_process_response(self): def application(environ, start_response): response = 'Sucess' return response response = application('test', 'fake') result = wsgi.Middleware(application).process_response(response) self.assertEqual('Sucess', result) class FaultTest(base.BaseTestCase): def test_call_fault(self): class MyException(object): status_int = 415 explanation = 'test' my_exceptions = MyException() my_fault = wsgi.Fault(exception=my_exceptions) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "unknow"}) response = my_fault(request) self.assertEqual(415, response.status_int) class XMLDictSerializerTest(base.BaseTestCase): def test_xml(self): NETWORK = {'network': {'test': None, 'tenant_id': 'test-tenant', 'name': 'net1', 'admin_state_up': True, 'subnets': [], 'dict': {}, 'int': 3, 'long': 4L, 'float': 5.0, 'prefix:external': True, 'tests': [{'test1': 'value1'}, {'test2': 2, 'test3': 3}]}} # XML is: # <network xmlns="http://openstack.org/quantum/api/v2.0" # xmlns:prefix="http://xxxx.yy.com" # xmlns:quantum="http://openstack.org/quantum/api/v2.0" # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> # <subnets quantum:type="list" /> # Empty List # <int quantum:type="int">3</int> # Integer text # <int quantum:type="long">4</int> # Long text # <int quantum:type="float">5.0</int> # Float text # <dict quantum:type="dict" /> # Empty Dict # <name>net1</name> # <admin_state_up quantum:type="bool">True</admin_state_up> # Bool # <test xsi:nil="true" /> # None # <tenant_id>test-tenant</tenant_id> # # We must have a namespace defined in root for prefix:external # <prefix:external quantum:type="bool">True</prefix:external> # <tests> # List # <test><test1>value1</test1></test> # <test><test3 quantum:type="int">3</test3> # <test2 quantum:type="int">2</test2> # </test></tests> # </network> metadata = attributes.get_attr_metadata() ns = {'prefix': 'http://xxxx.yy.com'} metadata[constants.EXT_NS] = ns metadata['plurals'] = {'tests': 'test'} serializer = wsgi.XMLDictSerializer(metadata) result = serializer.serialize(NETWORK) deserializer = wsgi.XMLDeserializer(metadata) new_net = deserializer.deserialize(result)['body'] self.assertEqual(NETWORK, new_net) def test_None(self): data = None # Since it is None, we use xsi:nil='true'. # In addition, we use an # virtual XML root _v_root to wrap the XML doc. # XML is: # <_v_root xsi:nil="true" # xmlns="http://openstack.org/quantum/api/v2.0" # xmlns:quantum="http://openstack.org/quantum/api/v2.0" # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" /> serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) result = serializer.serialize(data) deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) new_data = deserializer.deserialize(result)['body'] self.assertIsNone(new_data) def test_empty_dic_xml(self): data = {} # Since it is an empty dict, we use quantum:type='dict' and # an empty XML element to represent it. In addition, we use an # virtual XML root _v_root to wrap the XML doc. # XML is: # <_v_root quantum:type="dict" # xmlns="http://openstack.org/quantum/api/v2.0" # xmlns:quantum="http://openstack.org/quantum/api/v2.0" # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" /> serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) result = serializer.serialize(data) deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) new_data = deserializer.deserialize(result)['body'] self.assertEqual(data, new_data) def test_non_root_one_item_dic_xml(self): data = {'test1': 1} # We have a key in this dict, and its value is an integer. # XML is: # <test1 quantum:type="int" # xmlns="http://openstack.org/quantum/api/v2.0" # xmlns:quantum="http://openstack.org/quantum/api/v2.0" # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> # 1</test1> serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) result = serializer.serialize(data) deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) new_data = deserializer.deserialize(result)['body'] self.assertEqual(data, new_data) def test_non_root_two_items_dic_xml(self): data = {'test1': 1, 'test2': '2'} # We have no root element in this data, We will use a virtual # root element _v_root to wrap the doct. # The XML is: # <_v_root xmlns="http://openstack.org/quantum/api/v2.0" # xmlns:quantum="http://openstack.org/quantum/api/v2.0" # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> # <test1 quantum:type="int">1</test1><test2>2</test2> # </_v_root> serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) result = serializer.serialize(data) deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) new_data = deserializer.deserialize(result)['body'] self.assertEqual(data, new_data) def test_xml_root_key_is_list(self): input_dict = {'servers': ['test-pass']} serializer = wsgi.XMLDictSerializer(xmlns="fake") result = serializer.default(input_dict) result = result.replace('\n', '').replace(' ', '') expected = ( '<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>' '<serversxmlns="fake"xmlns:quantum="fake"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<server>test-pass</server></servers>' ) self.assertEqual(result, expected) def test_xml_meta_contains_node_name_list(self): input_dict = {'servers': ['test-pass']} servers = {'nodename': 'test', 'item_name': 'test', 'item_key': 'test'} metadata = {'list_collections': {'servers': servers}} serializer = wsgi.XMLDictSerializer(xmlns="fake", metadata=metadata) result = serializer.default(input_dict) result = result.replace('\n', '').replace(' ', '') expected = ( '<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>' '<serversxmlns="fake"xmlns:quantum="fake"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<server>test-pass</server></servers>' ) self.assertEqual(result, expected) def test_xml_meta_contains_node_name_dict(self): input_dict = {'servers': {'a': {'2': '3'}}} servers = {'servers': { 'nodename': 'test', 'item_name': 'test', 'item_key': 'test'}} metadata = {'dict_collections': servers} serializer = wsgi.XMLDictSerializer(xmlns="fake", metadata=metadata) result = serializer.default(input_dict) result = result.replace('\n', '').replace(' ', '') expected = ( '<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>' '<serversxmlns="fake"xmlns:quantum="fake"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<a><2>3</2></a></servers>' ) self.assertEqual(result, expected) def test_call(self): data = {'servers': {'a': {'2': '3'}}} serializer = wsgi.XMLDictSerializer() expected = ( '<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>' '<serversxmlns="http://openstack.org/quantum/api/v2.0"' 'xmlns:quantum="http://openstack.org/quantum/api/v2.0"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<a><2>3</2></a></servers>' ) result = serializer(data) result = result.replace('\n', '').replace(' ', '') self.assertEqual(expected, result) def test_xml_with_utf8(self): data = {'servers': '\xe7\xbd\x91\xe7\xbb\x9c'} serializer = wsgi.XMLDictSerializer() expected = ( '<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>' '<serversxmlns="http://openstack.org/quantum/api/v2.0"' 'xmlns:quantum="http://openstack.org/quantum/api/v2.0"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '\xe7\xbd\x91\xe7\xbb\x9c</servers>' ) result = serializer(data) result = result.replace('\n', '').replace(' ', '') self.assertEqual(expected, result) def test_xml_with_unicode(self): data = {'servers': u'\u7f51\u7edc'} serializer = wsgi.XMLDictSerializer() expected = ( '<?xmlversion=\'1.0\'encoding=\'UTF-8\'?>' '<serversxmlns="http://openstack.org/quantum/api/v2.0"' 'xmlns:quantum="http://openstack.org/quantum/api/v2.0"' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '\xe7\xbd\x91\xe7\xbb\x9c</servers>' ) result = serializer(data) result = result.replace('\n', '').replace(' ', '') self.assertEqual(expected, result) class TestWSGIServerWithSSL(base.BaseTestCase): """WSGI server tests.""" def test_app_using_ssl(self): CONF.set_default('use_ssl', True) CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = wsgi.Server("test_app") server.start(hello_world, 0, host="127.0.0.1") response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() def test_app_using_ipv6_and_ssl(self): CONF.set_default('use_ssl', True) CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = wsgi.Server("test_app") server.start(hello_world, 0, host="::1") response = urllib2.urlopen('https://[::1]:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop()
unknown
codeparrot/codeparrot-clean
import logging import requests from sentry.utils import json from sentry.utils.cache import cache from simplejson.decoder import JSONDecodeError from BeautifulSoup import BeautifulStoneSoup from django.utils.datastructures import SortedDict log = logging.getLogger(__name__) CACHE_KEY = "SENTRY-JIRA-%s-%s" class JIRAClient(object): """ The JIRA API Client, so you don't have to. """ PROJECT_URL = '/rest/api/2/project' META_URL = '/rest/api/2/issue/createmeta' CREATE_URL = '/rest/api/2/issue' PRIORITIES_URL = '/rest/api/2/priority' VERSIONS_URL = '/rest/api/2/project/%s/versions' def __init__(self, instance_uri, username, password): self.instance_url = instance_uri self.username = username self.password = password def get_projects_list(self): return self.get_cached(self.PROJECT_URL) def get_create_meta(self, project): return self.make_request('get', self.META_URL, {'projectKeys': project, 'expand': 'projects.issuetypes.fields'}) def get_versions(self, project): return self.get_cached(self.VERSIONS_URL % project) def get_priorities(self): return self.get_cached(self.PRIORITIES_URL) def create_issue(self, raw_form_data): data = {'fields': raw_form_data} return self.make_request('post', self.CREATE_URL, payload=data) def make_request(self, method, url, payload=None): if url[:4] != "http": url = self.instance_url + url auth = self.username, self.password headers = {'content-type': 'application/json'} try: if method == 'get': r = requests.get(url, params=payload, auth=auth, headers=headers, verify=False) else: r = requests.post(url, data=json.dumps(payload), auth=auth, headers=headers, verify=False) return JIRAResponse(r.text, r.status_code) except Exception, e: logging.error('Error in request to %s: %s' % (url, e.message)) return JIRAResponse("There was a problem reaching %s: %s" % (url, e.message), 500) def get_cached(self, full_url): """ Basic Caching mechanism for requests and responses. It only caches responses based on URL TODO: Implement GET attr in cache as well. (see self.create_meta for example) """ key = CACHE_KEY % (full_url, self.instance_url) cached_result = cache.get(key) if not cached_result: cached_result = self.make_request('get', full_url) if cached_result.status_code == 200: cache.set(key, cached_result, 60) return cached_result class JIRAResponse(object): """ A Slimy little wrapper around a python-requests response object that renders JSON from JIRA's ordered dicts (fields come back in order, but python obv. doesn't care) """ def __init__(self, response_text, status_code): self.text = response_text self.xml = None try: self.json = json.loads(response_text, object_pairs_hook=SortedDict) except (JSONDecodeError, ValueError): if self.text[:5] == "<?xml": # perhaps it's XML? self.xml = BeautifulStoneSoup(self.text) # must be an awful code. self.json = None self.status_code = status_code def __repr__(self): return "<JIRAResponse<%s> %s>" % (self.status_code, self.text[:120])
unknown
codeparrot/codeparrot-clean
#include "test/jemalloc_test.h" #define BATCH_MAX ((1U << 16) + 1024) static void *global_ptrs[BATCH_MAX]; #define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0) static void verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize, bool zero) { for (size_t i = 0; i < batch; ++i) { void *p = ptrs[i]; expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, ""); if (zero) { for (size_t k = 0; k < usize; ++k) { expect_true(*((unsigned char *)p + k) == 0, ""); } } } } static void verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize, arena_t *arena, unsigned nregs) { if (config_prof && opt_prof) { /* * Checking batch locality when prof is on is feasible but * complicated, while checking the non-prof case suffices for * unit-test purpose. */ return; } for (size_t i = 0, j = 0; i < batch; ++i, ++j) { if (j == nregs) { j = 0; } if (j == 0 && batch - i < nregs) { break; } void *p = ptrs[i]; expect_ptr_eq(iaalloc(tsd_tsdn(tsd), p), arena, ""); if (j == 0) { expect_true(PAGE_ALIGNED(p), ""); continue; } assert(i > 0); void *q = ptrs[i - 1]; expect_true((uintptr_t)p > (uintptr_t)q && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, ""); } } static void release_batch(void **ptrs, size_t batch, size_t size) { for (size_t i = 0; i < batch; ++i) { sdallocx(ptrs[i], size, 0); } } typedef struct batch_alloc_packet_s batch_alloc_packet_t; struct batch_alloc_packet_s { void **ptrs; size_t num; size_t size; int flags; }; static size_t batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) { batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags}; size_t filled; size_t len = sizeof(size_t); assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len, &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, ""); return filled; } static void test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) { tsd_t *tsd = tsd_fetch(); assert(tsd != NULL); const size_t usize = (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size)); const szind_t ind = sz_size2index(usize); const bin_info_t *bin_info = &bin_infos[ind]; const unsigned nregs = bin_info->nregs; assert(nregs > 0); arena_t *arena; if (arena_flag != 0) { arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag), false); } else { arena = arena_choose(tsd, NULL); } assert(arena != NULL); int flags = arena_flag; if (alignment != 0) { flags |= MALLOCX_ALIGN(alignment); } if (zero) { flags |= MALLOCX_ZERO; } /* * Allocate for the purpose of bootstrapping arena_tdata, so that the * change in bin stats won't contaminate the stats to be verified below. */ void *p = mallocx(size, flags | MALLOCX_TCACHE_NONE); for (size_t i = 0; i < 4; ++i) { size_t base = 0; if (i == 1) { base = nregs; } else if (i == 2) { base = nregs * 2; } else if (i == 3) { base = (1 << 16); } for (int j = -1; j <= 1; ++j) { if (base == 0 && j == -1) { continue; } size_t batch = base + (size_t)j; assert(batch < BATCH_MAX); size_t filled = batch_alloc_wrapper(global_ptrs, batch, size, flags); assert_zu_eq(filled, batch, ""); verify_batch_basic(tsd, global_ptrs, batch, usize, zero); verify_batch_locality(tsd, global_ptrs, batch, usize, arena, nregs); release_batch(global_ptrs, batch, usize); } } free(p); } TEST_BEGIN(test_batch_alloc) { test_wrapper(11, 0, false, 0); } TEST_END TEST_BEGIN(test_batch_alloc_zero) { test_wrapper(11, 0, true, 0); } TEST_END TEST_BEGIN(test_batch_alloc_aligned) { test_wrapper(7, 16, false, 0); } TEST_END TEST_BEGIN(test_batch_alloc_manual_arena) { unsigned arena_ind; size_t len_unsigned = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL, 0), 0, ""); test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind)); } TEST_END TEST_BEGIN(test_batch_alloc_large) { size_t size = SC_LARGE_MINCLASS; for (size_t batch = 0; batch < 4; ++batch) { assert(batch < BATCH_MAX); size_t filled = batch_alloc(global_ptrs, batch, size, 0); assert_zu_eq(filled, batch, ""); release_batch(global_ptrs, batch, size); } size = tcache_maxclass + 1; for (size_t batch = 0; batch < 4; ++batch) { assert(batch < BATCH_MAX); size_t filled = batch_alloc(global_ptrs, batch, size, 0); assert_zu_eq(filled, batch, ""); release_batch(global_ptrs, batch, size); } } TEST_END int main(void) { return test( test_batch_alloc, test_batch_alloc_zero, test_batch_alloc_aligned, test_batch_alloc_manual_arena, test_batch_alloc_large); }
c
github
https://github.com/redis/redis
deps/jemalloc/test/unit/batch_alloc.c
//===--- MacroDiscriminatorContext.h - Macro Discriminators -----*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #ifndef SWIFT_AST_MACRO_DISCRIMINATOR_CONTEXT_H #define SWIFT_AST_MACRO_DISCRIMINATOR_CONTEXT_H #include "swift/AST/Decl.h" #include "swift/AST/Expr.h" #include "llvm/ADT/PointerUnion.h" namespace swift { /// Describes the context of a macro expansion for the purpose of /// computing macro expansion discriminators. struct MacroDiscriminatorContext : public llvm::PointerUnion<DeclContext *, FreestandingMacroExpansion *> { using PointerUnion::PointerUnion; static MacroDiscriminatorContext getParentOf(FreestandingMacroExpansion *expansion); static MacroDiscriminatorContext getParentOf( SourceLoc loc, DeclContext *origDC ); /// Return the innermost declaration context that is suitable for /// use in identifying a macro. static DeclContext *getInnermostMacroContext(DeclContext *dc); }; } #endif // SWIFT_AST_MACRO_DISCRIMINATOR_CONTEXT_H
c
github
https://github.com/apple/swift
include/swift/AST/MacroDiscriminatorContext.h
import copy import datetime import json import uuid from django.core.exceptions import NON_FIELD_ERRORS from django.core.files.uploadedfile import SimpleUploadedFile from django.core.validators import MaxValueValidator, RegexValidator from django.forms import ( BooleanField, BoundField, CharField, CheckboxSelectMultiple, ChoiceField, DateField, DateTimeField, EmailField, Field, FileField, FileInput, FloatField, Form, HiddenInput, ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput, MultiValueField, MultiWidget, NullBooleanField, PasswordInput, RadioSelect, Select, SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeField, ValidationError, ) from django.forms.renderers import DjangoTemplates, get_default_renderer from django.forms.utils import ErrorDict, ErrorList from django.http import QueryDict from django.template import Context, Template from django.test import SimpleTestCase from django.test.utils import override_settings from django.utils.datastructures import MultiValueDict from django.utils.safestring import mark_safe from . import jinja2_tests class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[("P", "Python"), ("J", "Java")], widget=RadioSelect) class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class PersonNew(Form): first_name = CharField(widget=TextInput(attrs={"id": "first_name_id"})) last_name = CharField() birthday = DateField() class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")], widget=CheckboxSelectMultiple, ) class MultiValueDictLike(dict): def getlist(self, key): return [self[key]] class FormsTestCase(SimpleTestCase): # A Form is a collection of Fields. It knows how to validate a set of data # and it knows how to render itself in a couple of default ways (e.g., an # HTML table). You can pass it data in __init__(), as a dictionary. def test_form(self): # Pass a dictionary to a Form's __init__(). p = Person( {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"} ) self.assertTrue(p.is_bound) self.assertEqual(p.errors, {}) self.assertIsInstance(p.errors, dict) self.assertTrue(p.is_valid()) self.assertHTMLEqual(p.errors.as_ul(), "") self.assertEqual(p.errors.as_text(), "") self.assertEqual(p.cleaned_data["first_name"], "John") self.assertEqual(p.cleaned_data["last_name"], "Lennon") self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) self.assertHTMLEqual( str(p["first_name"]), '<input type="text" name="first_name" value="John" id="id_first_name" ' "required>", ) self.assertHTMLEqual( str(p["last_name"]), '<input type="text" name="last_name" value="Lennon" id="id_last_name" ' "required>", ) self.assertHTMLEqual( str(p["birthday"]), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" ' "required>", ) msg = ( "Key 'nonexistentfield' not found in 'Person'. Choices are: birthday, " "first_name, last_name." ) with self.assertRaisesMessage(KeyError, msg): p["nonexistentfield"] form_output = [] for boundfield in p: form_output.append(str(boundfield)) self.assertHTMLEqual( "\n".join(form_output), '<input type="text" name="first_name" value="John" id="id_first_name" ' "required>" '<input type="text" name="last_name" value="Lennon" id="id_last_name" ' "required>" '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" ' "required>", ) form_output = [] for boundfield in p: form_output.append([boundfield.label, boundfield.data]) self.assertEqual( form_output, [ ["First name", "John"], ["Last name", "Lennon"], ["Birthday", "1940-10-9"], ], ) self.assertHTMLEqual( str(p), '<div><label for="id_first_name">First name:</label><input type="text" ' 'name="first_name" value="John" required id="id_first_name"></div><div>' '<label for="id_last_name">Last name:</label><input type="text" ' 'name="last_name" value="Lennon" required id="id_last_name"></div><div>' '<label for="id_birthday">Birthday:</label><input type="text" ' 'name="birthday" value="1940-10-9" required id="id_birthday"></div>', ) self.assertHTMLEqual( p.as_div(), '<div><label for="id_first_name">First name:</label><input type="text" ' 'name="first_name" value="John" required id="id_first_name"></div><div>' '<label for="id_last_name">Last name:</label><input type="text" ' 'name="last_name" value="Lennon" required id="id_last_name"></div><div>' '<label for="id_birthday">Birthday:</label><input type="text" ' 'name="birthday" value="1940-10-9" required id="id_birthday"></div>', ) def test_empty_dict(self): # Empty dictionaries are valid, too. p = Person({}) self.assertTrue(p.is_bound) self.assertEqual(p.errors["first_name"], ["This field is required."]) self.assertEqual(p.errors["last_name"], ["This field is required."]) self.assertEqual(p.errors["birthday"], ["This field is required."]) self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {}) self.assertHTMLEqual( str(p), '<div><label for="id_first_name">First name:</label>' '<ul class="errorlist" id="id_first_name_error"><li>This field is required.' '</li></ul><input type="text" name="first_name" aria-invalid="true" ' 'required id="id_first_name" aria-describedby="id_first_name_error"></div>' '<div><label for="id_last_name">Last name:</label>' '<ul class="errorlist" id="id_last_name_error"><li>This field is required.' '</li></ul><input type="text" name="last_name" aria-invalid="true" ' 'required id="id_last_name" aria-describedby="id_last_name_error"></div>' '<div><label for="id_birthday">Birthday:</label>' '<ul class="errorlist" id="id_birthday_error"><li>This field is required.' '</li></ul><input type="text" name="birthday" aria-invalid="true" required ' 'id="id_birthday" aria-describedby="id_birthday_error"></div>', ) self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <ul class="errorlist" id="id_first_name_error"><li>This field is required.</li></ul> <input type="text" name="first_name" id="id_first_name" aria-invalid="true" required aria-describedby="id_first_name_error"> </td></tr><tr><th><label for="id_last_name">Last name:</label></th> <td><ul class="errorlist" id="id_last_name_error"><li>This field is required.</li></ul> <input type="text" name="last_name" id="id_last_name" aria-invalid="true" required aria-describedby="id_last_name_error"> </td></tr><tr><th><label for="id_birthday">Birthday:</label></th> <td><ul class="errorlist" id="id_birthday_error"><li>This field is required.</li></ul> <input type="text" name="birthday" id="id_birthday" aria-invalid="true" required aria-describedby="id_birthday_error"> </td></tr>""", ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist" id="id_first_name_error"> <li>This field is required.</li></ul> <label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" aria-invalid="true" required aria-describedby="id_first_name_error"> </li><li><ul class="errorlist" id="id_last_name_error"><li>This field is required.</li> </ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" aria-invalid="true" required aria-describedby="id_last_name_error"> </li><li><ul class="errorlist" id="id_birthday_error"><li>This field is required.</li> </ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" aria-invalid="true" required aria-describedby="id_birthday_error"> </li>""", ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist" id="id_first_name_error"><li> This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" aria-invalid="true" required aria-describedby="id_first_name_error"> </p><ul class="errorlist" id="id_last_name_error"><li>This field is required.</li></ul> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" aria-invalid="true" required aria-describedby="id_last_name_error"> </p><ul class="errorlist" id="id_birthday_error"><li>This field is required.</li></ul> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" aria-invalid="true" required aria-describedby="id_birthday_error"> </p>""", ) self.assertHTMLEqual( p.as_div(), '<div><label for="id_first_name">First name:</label>' '<ul class="errorlist" id="id_first_name_error"><li>This field is required.' '</li></ul><input type="text" name="first_name" aria-invalid="true" ' 'required id="id_first_name" aria-describedby="id_first_name_error"></div>' '<div><label for="id_last_name">Last name:</label>' '<ul class="errorlist" id="id_last_name_error"><li>This field is required.' '</li></ul><input type="text" name="last_name" aria-invalid="true" ' 'required id="id_last_name" aria-describedby="id_last_name_error"></div>' '<div><label for="id_birthday">Birthday:</label>' '<ul class="errorlist" id="id_birthday_error"><li>This field is required.' '</li></ul><input type="text" name="birthday" aria-invalid="true" required ' 'id="id_birthday" aria-describedby="id_birthday_error"></div>', ) def test_empty_querydict_args(self): data = QueryDict() files = QueryDict() p = Person(data, files) self.assertIs(p.data, data) self.assertIs(p.files, files) def test_unbound_form(self): # If you don't pass any values to the Form's __init__(), or if you pass # None, the Form will be considered unbound and won't do any # validation. Form.errors will be an empty dictionary *but* # Form.is_valid() will return False. p = Person() self.assertFalse(p.is_bound) self.assertEqual(p.errors, {}) self.assertFalse(p.is_valid()) with self.assertRaises(AttributeError): p.cleaned_data self.assertHTMLEqual( str(p), '<div><label for="id_first_name">First name:</label><input type="text" ' 'name="first_name" id="id_first_name" required></div><div><label ' 'for="id_last_name">Last name:</label><input type="text" name="last_name" ' 'id="id_last_name" required></div><div><label for="id_birthday">' 'Birthday:</label><input type="text" name="birthday" id="id_birthday" ' "required></div>", ) self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required></td></tr>""", ) self.assertHTMLEqual( p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></li>""", ) self.assertHTMLEqual( p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required></p>""", ) self.assertHTMLEqual( p.as_div(), '<div><label for="id_first_name">First name:</label><input type="text" ' 'name="first_name" id="id_first_name" required></div><div><label ' 'for="id_last_name">Last name:</label><input type="text" name="last_name" ' 'id="id_last_name" required></div><div><label for="id_birthday">' 'Birthday:</label><input type="text" name="birthday" id="id_birthday" ' "required></div>", ) def test_unicode_values(self): # Unicode values are handled properly. p = Person( { "first_name": "John", "last_name": "\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111", "birthday": "1940-10-9", } ) self.assertHTMLEqual( p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td>' '<input type="text" name="first_name" value="John" id="id_first_name" ' "required></td></tr>\n" '<tr><th><label for="id_last_name">Last name:</label>' '</th><td><input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"' 'id="id_last_name" required></td></tr>\n' '<tr><th><label for="id_birthday">Birthday:</label></th><td>' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" ' "required></td></tr>", ) self.assertHTMLEqual( p.as_ul(), '<li><label for="id_first_name">First name:</label> ' '<input type="text" name="first_name" value="John" id="id_first_name" ' "required></li>\n" '<li><label for="id_last_name">Last name:</label> ' '<input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" ' 'id="id_last_name" required></li>\n' '<li><label for="id_birthday">Birthday:</label> ' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" ' "required></li>", ) self.assertHTMLEqual( p.as_p(), '<p><label for="id_first_name">First name:</label> ' '<input type="text" name="first_name" value="John" id="id_first_name" ' "required></p>\n" '<p><label for="id_last_name">Last name:</label> ' '<input type="text" name="last_name" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" ' 'id="id_last_name" required></p>\n' '<p><label for="id_birthday">Birthday:</label> ' '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" ' "required></p>", ) self.assertHTMLEqual( p.as_div(), '<div><label for="id_first_name">First name:</label>' '<input type="text" name="first_name" value="John" id="id_first_name" ' 'required></div><div><label for="id_last_name">Last name:</label>' '<input type="text" name="last_name"' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" ' 'id="id_last_name" required></div><div><label for="id_birthday">' 'Birthday:</label><input type="text" name="birthday" value="1940-10-9" ' 'id="id_birthday" required></div>', ) p = Person({"last_name": "Lennon"}) self.assertEqual(p.errors["first_name"], ["This field is required."]) self.assertEqual(p.errors["birthday"], ["This field is required."]) self.assertFalse(p.is_valid()) self.assertEqual( p.errors, { "birthday": ["This field is required."], "first_name": ["This field is required."], }, ) self.assertEqual(p.cleaned_data, {"last_name": "Lennon"}) self.assertEqual(p["first_name"].errors, ["This field is required."]) self.assertHTMLEqual( p["first_name"].errors.as_ul(), '<ul class="errorlist" id="id_first_name_error">' "<li>This field is required.</li></ul>", ) self.assertEqual(p["first_name"].errors.as_text(), "* This field is required.") p = Person() self.assertHTMLEqual( str(p["first_name"]), '<input type="text" name="first_name" id="id_first_name" required>', ) self.assertHTMLEqual( str(p["last_name"]), '<input type="text" name="last_name" id="id_last_name" required>', ) self.assertHTMLEqual( str(p["birthday"]), '<input type="text" name="birthday" id="id_birthday" required>', ) def test_cleaned_data_only_fields(self): # cleaned_data will always *only* contain a key for fields defined in # the Form, even if you pass extra data when you define the Form. In # this example, we pass a bunch of extra fields to the form # constructor, but cleaned_data contains only the form's fields. data = { "first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9", "extra1": "hello", "extra2": "hello", } p = Person(data) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data["first_name"], "John") self.assertEqual(p.cleaned_data["last_name"], "Lennon") self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) def test_optional_data(self): # cleaned_data will include a key and value for *all* fields defined in # the Form, even if the Form's data didn't include a value for fields # that are not required. In this example, the data dictionary doesn't # include a value for the "nick_name" field, but cleaned_data includes # it. For CharFields, it's set to the empty string. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() nick_name = CharField(required=False) data = {"first_name": "John", "last_name": "Lennon"} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data["nick_name"], "") self.assertEqual(f.cleaned_data["first_name"], "John") self.assertEqual(f.cleaned_data["last_name"], "Lennon") # For DateFields, it's set to None. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() birth_date = DateField(required=False) data = {"first_name": "John", "last_name": "Lennon"} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertIsNone(f.cleaned_data["birth_date"]) self.assertEqual(f.cleaned_data["first_name"], "John") self.assertEqual(f.cleaned_data["last_name"], "Lennon") def test_auto_id(self): # "auto_id" tells the Form to add an "id" attribute to each form # element. If it's a string that contains '%s', Django will use that as # a format string into which the field's name will be inserted. It will # also put a <label> around the human-readable labels for a field. p = Person(auto_id="%s_id") self.assertHTMLEqual( p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td> <input type="text" name="first_name" id="first_name_id" required></td></tr> <tr><th><label for="last_name_id">Last name:</label></th><td> <input type="text" name="last_name" id="last_name_id" required></td></tr> <tr><th><label for="birthday_id">Birthday:</label></th><td> <input type="text" name="birthday" id="birthday_id" required></td></tr>""", ) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" required></li> <li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" required></li> <li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" required></li>""", ) self.assertHTMLEqual( p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" required></p> <p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" required></p> <p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" required></p>""", ) self.assertHTMLEqual( p.as_div(), '<div><label for="first_name_id">First name:</label><input type="text" ' 'name="first_name" id="first_name_id" required></div><div><label ' 'for="last_name_id">Last name:</label><input type="text" ' 'name="last_name" id="last_name_id" required></div><div><label ' 'for="birthday_id">Birthday:</label><input type="text" name="birthday" ' 'id="birthday_id" required></div>', ) def test_auto_id_true(self): # If auto_id is any True value whose str() does not contain '%s', the # "id" attribute will be the name of the field. p = Person(auto_id=True) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" required></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" required></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" required></li>""", ) def test_auto_id_false(self): # If auto_id is any False value, an "id" attribute won't be output # unless it was manually entered. p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""", ) def test_id_on_field(self): # In this example, auto_id is False, but the "id" attribute for the # "first_name" field is given. Also note that field gets a <label>, # while the others don't. p = PersonNew(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""", ) def test_auto_id_on_form_and_field(self): # If the "id" attribute is specified in the Form and auto_id is True, # the "id" attribute in the Form gets precedence. p = PersonNew(auto_id=True) self.assertHTMLEqual( p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" required></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" required></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" required></li>""", ) def test_various_boolean_values(self): class SignupForm(Form): email = EmailField() get_spam = BooleanField() f = SignupForm(auto_id=False) self.assertHTMLEqual( str(f["email"]), '<input type="email" name="email" maxlength="320" required>', ) self.assertHTMLEqual( str(f["get_spam"]), '<input type="checkbox" name="get_spam" required>' ) f = SignupForm({"email": "test@example.com", "get_spam": True}, auto_id=False) self.assertHTMLEqual( str(f["email"]), '<input type="email" name="email" maxlength="320" value="test@example.com" ' "required>", ) self.assertHTMLEqual( str(f["get_spam"]), '<input checked type="checkbox" name="get_spam" required>', ) # 'True' or 'true' should be rendered without a value attribute f = SignupForm({"email": "test@example.com", "get_spam": "True"}, auto_id=False) self.assertHTMLEqual( str(f["get_spam"]), '<input checked type="checkbox" name="get_spam" required>', ) f = SignupForm({"email": "test@example.com", "get_spam": "true"}, auto_id=False) self.assertHTMLEqual( str(f["get_spam"]), '<input checked type="checkbox" name="get_spam" required>', ) # A value of 'False' or 'false' should be rendered unchecked f = SignupForm( {"email": "test@example.com", "get_spam": "False"}, auto_id=False ) self.assertHTMLEqual( str(f["get_spam"]), '<input type="checkbox" name="get_spam" aria-invalid="true" required>', ) f = SignupForm( {"email": "test@example.com", "get_spam": "false"}, auto_id=False ) self.assertHTMLEqual( str(f["get_spam"]), '<input type="checkbox" name="get_spam" aria-invalid="true" required>', ) # A value of '0' should be interpreted as a True value (#16820) f = SignupForm({"email": "test@example.com", "get_spam": "0"}) self.assertTrue(f.is_valid()) self.assertTrue(f.cleaned_data.get("get_spam")) def test_widget_output(self): # Any Field can have a Widget class passed to its constructor: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea) f = ContactForm(auto_id=False) self.assertHTMLEqual( str(f["subject"]), '<input type="text" name="subject" required>' ) self.assertHTMLEqual( str(f["message"]), '<textarea name="message" rows="10" cols="40" required></textarea>', ) # as_textarea(), as_text() and as_hidden() are shortcuts for changing # the output widget type: self.assertHTMLEqual( f["subject"].as_textarea(), '<textarea name="subject" rows="10" cols="40" required></textarea>', ) self.assertHTMLEqual( f["message"].as_text(), '<input type="text" name="message" required>' ) self.assertHTMLEqual( f["message"].as_hidden(), '<input type="hidden" name="message">' ) # The 'widget' parameter to a Field can also be an instance: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea(attrs={"rows": 80, "cols": 20})) f = ContactForm(auto_id=False) self.assertHTMLEqual( str(f["message"]), '<textarea name="message" rows="80" cols="20" required></textarea>', ) # Instance-level attrs are *not* carried over to as_textarea(), # as_text() and as_hidden(): self.assertHTMLEqual( f["message"].as_text(), '<input type="text" name="message" required>' ) f = ContactForm({"subject": "Hello", "message": "I love you."}, auto_id=False) self.assertHTMLEqual( f["subject"].as_textarea(), '<textarea rows="10" cols="40" name="subject" required>Hello</textarea>', ) self.assertHTMLEqual( f["message"].as_text(), '<input type="text" name="message" value="I love you." required>', ) self.assertHTMLEqual( f["message"].as_hidden(), '<input type="hidden" name="message" value="I love you.">', ) def test_forms_with_choices(self): # For a form with a <select>, use ChoiceField: class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[("P", "Python"), ("J", "Java")]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""", ) f = FrameworkForm({"name": "Django", "language": "P"}, auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""", ) # A subtlety: If one of the choices' value is the empty string and the # form is unbound, then the <option> for the empty-string choice will # get selected. class FrameworkForm(Form): name = CharField() language = ChoiceField( choices=[("", "------"), ("P", "Python"), ("J", "Java")] ) f = FrameworkForm(auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select name="language" required> <option value="" selected>------</option> <option value="P">Python</option> <option value="J">Java</option> </select>""", ) # You can specify widget attributes in the Widget constructor. class FrameworkForm(Form): name = CharField() language = ChoiceField( choices=[("P", "Python"), ("J", "Java")], widget=Select(attrs={"class": "foo"}), ) f = FrameworkForm(auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""", ) f = FrameworkForm({"name": "Django", "language": "P"}, auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select class="foo" name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""", ) # When passing a custom widget instance to ChoiceField, note that # setting 'choices' on the widget is meaningless. The widget will use # the choices defined on the Field, not the ones defined on the Widget. class FrameworkForm(Form): name = CharField() language = ChoiceField( choices=[("P", "Python"), ("J", "Java")], widget=Select( choices=[("R", "Ruby"), ("P", "Perl")], attrs={"class": "foo"} ), ) f = FrameworkForm(auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""", ) f = FrameworkForm({"name": "Django", "language": "P"}, auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select class="foo" name="language"> <option value="P" selected>Python</option> <option value="J">Java</option> </select>""", ) # You can set a ChoiceField's choices after the fact. class FrameworkForm(Form): name = CharField() language = ChoiceField() f = FrameworkForm(auto_id=False) self.assertHTMLEqual( str(f["language"]), """<select name="language"> </select>""", ) f.fields["language"].choices = [("P", "Python"), ("J", "Java")] self.assertHTMLEqual( str(f["language"]), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""", ) def test_forms_with_radio(self): # Add widget=RadioSelect to use that widget with a ChoiceField. f = FrameworkForm(auto_id=False) self.assertHTMLEqual( str(f["language"]), """<div> <div><label><input type="radio" name="language" value="P" required> Python</label></div> <div><label><input type="radio" name="language" value="J" required> Java</label></div> </div>""", ) self.assertHTMLEqual( f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" required></td></tr> <tr><th>Language:</th><td><div> <div><label><input type="radio" name="language" value="P" required> Python</label></div> <div><label><input type="radio" name="language" value="J" required> Java</label></div> </div></td></tr>""", ) self.assertHTMLEqual( f.as_ul(), """<li>Name: <input type="text" name="name" required></li> <li>Language: <div> <div><label><input type="radio" name="language" value="P" required> Python</label></div> <div><label><input type="radio" name="language" value="J" required> Java</label></div> </div></li>""", ) # Need an auto_id to generate legend. self.assertHTMLEqual( f.render(f.template_name_div), '<div> Name: <input type="text" name="name" required></div><div><fieldset>' 'Language:<div><div><label><input type="radio" name="language" value="P" ' 'required> Python</label></div><div><label><input type="radio" ' 'name="language" value="J" required> Java</label></div></div></fieldset>' "</div>", ) # Regarding auto_id and <label>, RadioSelect is a special case. Each # radio button gets a distinct ID, formed by appending an underscore # plus the button's zero-based index. f = FrameworkForm(auto_id="id_%s") self.assertHTMLEqual( str(f["language"]), """ <div id="id_language"> <div><label for="id_language_0"> <input type="radio" id="id_language_0" value="P" name="language" required> Python</label></div> <div><label for="id_language_1"> <input type="radio" id="id_language_1" value="J" name="language" required> Java</label></div> </div>""", ) # When RadioSelect is used with auto_id, and the whole form is printed # using either as_table() or as_ul(), the label for the RadioSelect # will **not** point to the ID of the *first* radio button to improve # accessibility for screen reader users. self.assertHTMLEqual( f.as_table(), """ <tr><th><label for="id_name">Name:</label></th><td> <input type="text" name="name" id="id_name" required></td></tr> <tr><th><label>Language:</label></th><td><div id="id_language"> <div><label for="id_language_0"> <input type="radio" id="id_language_0" value="P" name="language" required> Python</label></div> <div><label for="id_language_1"> <input type="radio" id="id_language_1" value="J" name="language" required> Java</label></div> </div></td></tr>""", ) self.assertHTMLEqual( f.as_ul(), """ <li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></li> <li><label>Language:</label> <div id="id_language"> <div><label for="id_language_0"> <input type="radio" id="id_language_0" value="P" name="language" required> Python</label></div> <div><label for="id_language_1"> <input type="radio" id="id_language_1" value="J" name="language" required> Java</label></div> </div></li> """, ) self.assertHTMLEqual( f.as_p(), """ <p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" required></p> <p><label>Language:</label> <div id="id_language"> <div><label for="id_language_0"> <input type="radio" id="id_language_0" value="P" name="language" required> Python</label></div> <div><label for="id_language_1"> <input type="radio" id="id_language_1" value="J" name="language" required> Java</label></div> </div></p> """, ) self.assertHTMLEqual( f.render(f.template_name_div), '<div><label for="id_name">Name:</label><input type="text" name="name" ' 'required id="id_name"></div><div><fieldset><legend>Language:</legend>' '<div id="id_language"><div><label for="id_language_0"><input ' 'type="radio" name="language" value="P" required id="id_language_0">' 'Python</label></div><div><label for="id_language_1"><input type="radio" ' 'name="language" value="J" required id="id_language_1">Java</label></div>' "</div></fieldset></div>", ) def test_form_with_iterable_boundfield(self): class BeatleForm(Form): name = ChoiceField( choices=[ ("john", "John"), ("paul", "Paul"), ("george", "George"), ("ringo", "Ringo"), ], widget=RadioSelect, ) f = BeatleForm(auto_id=False) self.assertHTMLEqual( "\n".join(str(bf) for bf in f["name"]), '<label><input type="radio" name="name" value="john" required> John</label>' '<label><input type="radio" name="name" value="paul" required> Paul</label>' '<label><input type="radio" name="name" value="george" required> George' "</label>" '<label><input type="radio" name="name" value="ringo" required> Ringo' "</label>", ) self.assertHTMLEqual( "\n".join("<div>%s</div>" % bf for bf in f["name"]), """ <div><label> <input type="radio" name="name" value="john" required> John</label></div> <div><label> <input type="radio" name="name" value="paul" required> Paul</label></div> <div><label> <input type="radio" name="name" value="george" required> George </label></div> <div><label> <input type="radio" name="name" value="ringo" required> Ringo</label></div> """, ) def test_form_with_iterable_boundfield_id(self): class BeatleForm(Form): name = ChoiceField( choices=[ ("john", "John"), ("paul", "Paul"), ("george", "George"), ("ringo", "Ringo"), ], widget=RadioSelect, ) fields = list(BeatleForm()["name"]) self.assertEqual(len(fields), 4) self.assertEqual(fields[0].id_for_label, "id_name_0") self.assertEqual(fields[0].choice_label, "John") self.assertHTMLEqual( fields[0].tag(), '<input type="radio" name="name" value="john" id="id_name_0" required>', ) self.assertHTMLEqual( str(fields[0]), '<label for="id_name_0"><input type="radio" name="name" ' 'value="john" id="id_name_0" required> John</label>', ) self.assertEqual(fields[1].id_for_label, "id_name_1") self.assertEqual(fields[1].choice_label, "Paul") self.assertHTMLEqual( fields[1].tag(), '<input type="radio" name="name" value="paul" id="id_name_1" required>', ) self.assertHTMLEqual( str(fields[1]), '<label for="id_name_1"><input type="radio" name="name" ' 'value="paul" id="id_name_1" required> Paul</label>', ) def test_iterable_boundfield_select(self): class BeatleForm(Form): name = ChoiceField( choices=[ ("john", "John"), ("paul", "Paul"), ("george", "George"), ("ringo", "Ringo"), ] ) fields = list(BeatleForm(auto_id=False)["name"]) self.assertEqual(len(fields), 4) self.assertIsNone(fields[0].id_for_label) self.assertEqual(fields[0].choice_label, "John") self.assertHTMLEqual(fields[0].tag(), '<option value="john">John</option>') self.assertHTMLEqual(str(fields[0]), '<option value="john">John</option>') def test_form_with_noniterable_boundfield(self): # You can iterate over any BoundField, not just those with # widget=RadioSelect. class BeatleForm(Form): name = CharField() f = BeatleForm(auto_id=False) self.assertHTMLEqual( "\n".join(str(bf) for bf in f["name"]), '<input type="text" name="name" required>', ) def test_boundfield_slice(self): class BeatleForm(Form): name = ChoiceField( choices=[ ("john", "John"), ("paul", "Paul"), ("george", "George"), ("ringo", "Ringo"), ], widget=RadioSelect, ) f = BeatleForm() bf = f["name"] self.assertEqual( [str(item) for item in bf[1:]], [str(bf[1]), str(bf[2]), str(bf[3])], ) def test_boundfield_invalid_index(self): class TestForm(Form): name = ChoiceField(choices=[]) field = TestForm()["name"] msg = "BoundField indices must be integers or slices, not str." with self.assertRaisesMessage(TypeError, msg): field["foo"] def test_boundfield_bool(self): """BoundField without any choices (subwidgets) evaluates to True.""" class TestForm(Form): name = ChoiceField(choices=[]) self.assertIs(bool(TestForm()["name"]), True) def test_forms_with_multiple_choice(self): # MultipleChoiceField is a special case, as its data is required to be # a list: class SongForm(Form): name = CharField() composers = MultipleChoiceField() f = SongForm(auto_id=False) self.assertHTMLEqual( str(f["composers"]), """<select multiple name="composers" required> </select>""", ) class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")] ) f = SongForm(auto_id=False) self.assertHTMLEqual( str(f["composers"]), """<select multiple name="composers" required> <option value="J">John Lennon</option> <option value="P">Paul McCartney</option> </select>""", ) f = SongForm({"name": "Yesterday", "composers": ["P"]}, auto_id=False) self.assertHTMLEqual( str(f["name"]), '<input type="text" name="name" value="Yesterday" required>' ) self.assertHTMLEqual( str(f["composers"]), """<select multiple name="composers" required> <option value="J">John Lennon</option> <option value="P" selected>Paul McCartney</option> </select>""", ) f = SongForm() self.assertHTMLEqual( f.as_table(), '<tr><th><label for="id_name">Name:</label></th>' '<td><input type="text" name="name" required id="id_name"></td>' '</tr><tr><th><label for="id_composers">Composers:</label></th>' '<td><select name="composers" required id="id_composers" multiple>' '<option value="J">John Lennon</option>' '<option value="P">Paul McCartney</option>' "</select></td></tr>", ) self.assertHTMLEqual( f.as_ul(), '<li><label for="id_name">Name:</label>' '<input type="text" name="name" required id="id_name"></li>' '<li><label for="id_composers">Composers:</label>' '<select name="composers" required id="id_composers" multiple>' '<option value="J">John Lennon</option>' '<option value="P">Paul McCartney</option>' "</select></li>", ) self.assertHTMLEqual( f.as_p(), '<p><label for="id_name">Name:</label>' '<input type="text" name="name" required id="id_name"></p>' '<p><label for="id_composers">Composers:</label>' '<select name="composers" required id="id_composers" multiple>' '<option value="J">John Lennon</option>' '<option value="P">Paul McCartney</option>' "</select></p>", ) self.assertHTMLEqual( f.render(f.template_name_div), '<div><label for="id_name">Name:</label><input type="text" name="name" ' 'required id="id_name"></div><div><label for="id_composers">Composers:' '</label><select name="composers" required id="id_composers" multiple>' '<option value="J">John Lennon</option><option value="P">Paul McCartney' "</option></select></div>", ) def test_multiple_checkbox_render(self): f = SongForm() self.assertHTMLEqual( f.as_table(), '<tr><th><label for="id_name">Name:</label></th><td>' '<input type="text" name="name" required id="id_name"></td></tr>' '<tr><th><label>Composers:</label></th><td><div id="id_composers">' '<div><label for="id_composers_0">' '<input type="checkbox" name="composers" value="J" ' 'id="id_composers_0">John Lennon</label></div>' '<div><label for="id_composers_1">' '<input type="checkbox" name="composers" value="P" ' 'id="id_composers_1">Paul McCartney</label></div>' "</div></td></tr>", ) self.assertHTMLEqual( f.as_ul(), '<li><label for="id_name">Name:</label>' '<input type="text" name="name" required id="id_name"></li>' '<li><label>Composers:</label><div id="id_composers">' '<div><label for="id_composers_0">' '<input type="checkbox" name="composers" value="J" ' 'id="id_composers_0">John Lennon</label></div>' '<div><label for="id_composers_1">' '<input type="checkbox" name="composers" value="P" ' 'id="id_composers_1">Paul McCartney</label></div>' "</div></li>", ) self.assertHTMLEqual( f.as_p(), '<p><label for="id_name">Name:</label>' '<input type="text" name="name" required id="id_name"></p>' '<p><label>Composers:</label><div id="id_composers">' '<div><label for="id_composers_0">' '<input type="checkbox" name="composers" value="J" ' 'id="id_composers_0">John Lennon</label></div>' '<div><label for="id_composers_1">' '<input type="checkbox" name="composers" value="P" ' 'id="id_composers_1">Paul McCartney</label></div>' "</div></p>", ) self.assertHTMLEqual( f.render(f.template_name_div), '<div><label for="id_name">Name:</label><input type="text" name="name" ' 'required id="id_name"></div><div><fieldset><legend>Composers:</legend>' '<div id="id_composers"><div><label for="id_composers_0"><input ' 'type="checkbox" name="composers" value="J" id="id_composers_0">' 'John Lennon</label></div><div><label for="id_composers_1"><input ' 'type="checkbox" name="composers" value="P" id="id_composers_1">' "Paul McCartney</label></div></div></fieldset></div>", ) def test_form_with_disabled_fields(self): class PersonForm(Form): name = CharField() birthday = DateField(disabled=True) class PersonFormFieldInitial(Form): name = CharField() birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16)) # Disabled fields are generally not transmitted by user agents. # The value from the form's initial data is used. f1 = PersonForm( {"name": "John Doe"}, initial={"birthday": datetime.date(1974, 8, 16)} ) f2 = PersonFormFieldInitial({"name": "John Doe"}) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {"birthday": datetime.date(1974, 8, 16), "name": "John Doe"}, ) # Values provided in the form's data are ignored. data = {"name": "John Doe", "birthday": "1984-11-10"} f1 = PersonForm(data, initial={"birthday": datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {"birthday": datetime.date(1974, 8, 16), "name": "John Doe"}, ) # Initial data remains present on invalid forms. data = {} f1 = PersonForm(data, initial={"birthday": datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertFalse(form.is_valid()) self.assertEqual(form["birthday"].value(), datetime.date(1974, 8, 16)) def test_hidden_data(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")] ) # MultipleChoiceField rendered as_hidden() is a special case. Because # it can have multiple values, its as_hidden() renders multiple <input # type="hidden"> tags. f = SongForm({"name": "Yesterday", "composers": ["P"]}, auto_id=False) self.assertHTMLEqual( f["composers"].as_hidden(), '<input type="hidden" name="composers" value="P">', ) f = SongForm({"name": "From Me To You", "composers": ["P", "J"]}, auto_id=False) self.assertHTMLEqual( f["composers"].as_hidden(), """<input type="hidden" name="composers" value="P"> <input type="hidden" name="composers" value="J">""", ) # DateTimeField rendered as_hidden() is special too class MessageForm(Form): when = SplitDateTimeField() f = MessageForm({"when_0": "1992-01-01", "when_1": "01:01"}) self.assertTrue(f.is_valid()) self.assertHTMLEqual( str(f["when"]), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" ' "required>" '<input type="text" name="when_1" value="01:01" id="id_when_1" required>', ) self.assertHTMLEqual( f["when"].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0">' '<input type="hidden" name="when_1" value="01:01" id="id_when_1">', ) def test_multiple_choice_checkbox(self): # MultipleChoiceField can also be used with the CheckboxSelectMultiple # widget. f = SongForm(auto_id=False) self.assertHTMLEqual( str(f["composers"]), """ <div> <div><label><input type="checkbox" name="composers" value="J"> John Lennon</label></div> <div><label><input type="checkbox" name="composers" value="P"> Paul McCartney</label></div> </div> """, ) f = SongForm({"composers": ["J"]}, auto_id=False) self.assertHTMLEqual( str(f["composers"]), """ <div> <div><label><input checked type="checkbox" name="composers" value="J"> John Lennon</label></div> <div><label><input type="checkbox" name="composers" value="P"> Paul McCartney</label></div> </div> """, ) f = SongForm({"composers": ["J", "P"]}, auto_id=False) self.assertHTMLEqual( str(f["composers"]), """ <div> <div><label><input checked type="checkbox" name="composers" value="J"> John Lennon</label></div> <div><label><input checked type="checkbox" name="composers" value="P"> Paul McCartney</label></div> </div> """, ) def test_checkbox_auto_id(self): # Regarding auto_id, CheckboxSelectMultiple is a special case. Each # checkbox gets a distinct ID, formed by appending an underscore plus # the checkbox's zero-based index. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")], widget=CheckboxSelectMultiple, ) f = SongForm(auto_id="%s_id") self.assertHTMLEqual( str(f["composers"]), """ <div id="composers_id"> <div><label for="composers_id_0"> <input type="checkbox" name="composers" value="J" id="composers_id_0"> John Lennon</label></div> <div><label for="composers_id_1"> <input type="checkbox" name="composers" value="P" id="composers_id_1"> Paul McCartney</label></div> </div> """, ) def test_multiple_choice_list_data(self): # Data for a MultipleChoiceField should be a list. QueryDict and # MultiValueDict conveniently work with this. class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")], widget=CheckboxSelectMultiple, ) data = {"name": "Yesterday", "composers": ["J", "P"]} f = SongForm(data) self.assertEqual(f.errors, {}) data = QueryDict("name=Yesterday&composers=J&composers=P") f = SongForm(data) self.assertEqual(f.errors, {}) data = MultiValueDict({"name": ["Yesterday"], "composers": ["J", "P"]}) f = SongForm(data) self.assertEqual(f.errors, {}) # SelectMultiple uses ducktyping so that MultiValueDictLike.getlist() # is called. f = SongForm(MultiValueDictLike({"name": "Yesterday", "composers": "J"})) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["composers"], ["J"]) def test_multiple_hidden(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")], widget=CheckboxSelectMultiple, ) # The MultipleHiddenInput widget renders multiple values as hidden # fields. class SongFormHidden(Form): name = CharField() composers = MultipleChoiceField( choices=[("J", "John Lennon"), ("P", "Paul McCartney")], widget=MultipleHiddenInput, ) f = SongFormHidden( MultiValueDict({"name": ["Yesterday"], "composers": ["J", "P"]}), auto_id=False, ) self.assertHTMLEqual( f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" required> <input type="hidden" name="composers" value="J"> <input type="hidden" name="composers" value="P"></li>""", ) # When using CheckboxSelectMultiple, the framework expects a list of # input and returns a list of input. f = SongForm({"name": "Yesterday"}, auto_id=False) self.assertEqual(f.errors["composers"], ["This field is required."]) f = SongForm({"name": "Yesterday", "composers": ["J"]}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["composers"], ["J"]) self.assertEqual(f.cleaned_data["name"], "Yesterday") f = SongForm({"name": "Yesterday", "composers": ["J", "P"]}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["composers"], ["J", "P"]) self.assertEqual(f.cleaned_data["name"], "Yesterday") # MultipleHiddenInput uses ducktyping so that # MultiValueDictLike.getlist() is called. f = SongForm(MultiValueDictLike({"name": "Yesterday", "composers": "J"})) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["composers"], ["J"]) def test_escaping(self): # Validation errors are HTML-escaped when output as HTML. class EscapingForm(Form): special_name = CharField(label="<em>Special</em> Field") special_safe_name = CharField(label=mark_safe("<em>Special</em> Field")) def clean_special_name(self): raise ValidationError( "Something's wrong with '%s'" % self.cleaned_data["special_name"] ) def clean_special_safe_name(self): raise ValidationError( mark_safe( "'<b>%s</b>' is a safe string" % self.cleaned_data["special_safe_name"] ) ) f = EscapingForm( { "special_name": "Nothing to escape", "special_safe_name": "Nothing to escape", }, auto_id=False, ) self.assertHTMLEqual( f.as_table(), """ <tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td> <ul class="errorlist"> <li>Something&#x27;s wrong with &#x27;Nothing to escape&#x27;</li></ul> <input type="text" name="special_name" value="Nothing to escape" aria-invalid="true" required></td></tr> <tr><th><em>Special</em> Field:</th><td> <ul class="errorlist"> <li>'<b>Nothing to escape</b>' is a safe string</li></ul> <input type="text" name="special_safe_name" value="Nothing to escape" aria-invalid="true" required></td></tr> """, ) f = EscapingForm( { "special_name": "Should escape < & > and <script>alert('xss')</script>", "special_safe_name": "<i>Do not escape</i>", }, auto_id=False, ) self.assertHTMLEqual( f.as_table(), "<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td>" '<ul class="errorlist"><li>' "Something&#x27;s wrong with &#x27;Should escape &lt; &amp; &gt; and " "&lt;script&gt;alert(&#x27;xss&#x27;)&lt;/script&gt;&#x27;</li></ul>" '<input type="text" name="special_name" value="Should escape &lt; &amp; ' '&gt; and &lt;script&gt;alert(&#x27;xss&#x27;)&lt;/script&gt;" ' 'aria-invalid="true" required></td></tr>' "<tr><th><em>Special</em> Field:</th><td>" '<ul class="errorlist">' "<li>'<b><i>Do not escape</i></b>' is a safe string</li></ul>" '<input type="text" name="special_safe_name" ' 'value="&lt;i&gt;Do not escape&lt;/i&gt;" aria-invalid="true" required>' "</td></tr>", ) def test_validating_multiple_fields(self): # There are a couple of ways to do multiple-field validation. If you # want the validation message to be associated with a particular field, # implement the clean_XXX() method on the Form, where XXX is the field # name. As in Field.clean(), the clean_XXX() method should return the # cleaned value. In the clean_XXX() method, you have access to # self.cleaned_data, which is a dictionary of all the data that has # been cleaned *so far*, in order by the fields, including the current # field (e.g., the field XXX if you're in clean_XXX()). class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean_password2(self): if ( self.cleaned_data.get("password1") and self.cleaned_data.get("password2") and self.cleaned_data["password1"] != self.cleaned_data["password2"] ): raise ValidationError("Please make sure your passwords match.") return self.cleaned_data["password2"] f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertEqual(f.errors["username"], ["This field is required."]) self.assertEqual(f.errors["password1"], ["This field is required."]) self.assertEqual(f.errors["password2"], ["This field is required."]) f = UserRegistration( {"username": "adrian", "password1": "foo", "password2": "bar"}, auto_id=False, ) self.assertEqual( f.errors["password2"], ["Please make sure your passwords match."] ) f = UserRegistration( {"username": "adrian", "password1": "foo", "password2": "foo"}, auto_id=False, ) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["username"], "adrian") self.assertEqual(f.cleaned_data["password1"], "foo") self.assertEqual(f.cleaned_data["password2"], "foo") # Another way of doing multiple-field validation is by implementing the # Form's clean() method. Usually ValidationError raised by that method # will not be associated with a particular field and will have a # special-case association with the field named '__all__'. It's # possible to associate the errors to particular field with the # Form.add_error() method or by passing a dictionary that maps each # field to one or more errors. # # Note that in Form.clean(), you have access to self.cleaned_data, a # dictionary of all the fields/values that have *not* raised a # ValidationError. Also note Form.clean() is required to return a # dictionary of all clean data. class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): # Test raising a ValidationError as NON_FIELD_ERRORS. if ( self.cleaned_data.get("password1") and self.cleaned_data.get("password2") and self.cleaned_data["password1"] != self.cleaned_data["password2"] ): raise ValidationError("Please make sure your passwords match.") # Test raising ValidationError that targets multiple fields. errors = {} if self.cleaned_data.get("password1") == "FORBIDDEN_VALUE": errors["password1"] = "Forbidden value." if self.cleaned_data.get("password2") == "FORBIDDEN_VALUE": errors["password2"] = ["Forbidden value."] if errors: raise ValidationError(errors) # Test Form.add_error() if self.cleaned_data.get("password1") == "FORBIDDEN_VALUE2": self.add_error(None, "Non-field error 1.") self.add_error("password1", "Forbidden value 2.") if self.cleaned_data.get("password2") == "FORBIDDEN_VALUE2": self.add_error("password2", "Forbidden value 2.") raise ValidationError("Non-field error 2.") return self.cleaned_data f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertHTMLEqual( f.as_table(), """<tr><th>Username:</th><td> <ul class="errorlist"><li>This field is required.</li></ul> <input type="text" name="username" maxlength="10" aria-invalid="true" required> </td></tr> <tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul> <input type="password" name="password1" aria-invalid="true" required></td></tr> <tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul> <input type="password" name="password2" aria-invalid="true" required></td></tr>""", ) self.assertEqual(f.errors["username"], ["This field is required."]) self.assertEqual(f.errors["password1"], ["This field is required."]) self.assertEqual(f.errors["password2"], ["This field is required."]) f = UserRegistration( {"username": "adrian", "password1": "foo", "password2": "bar"}, auto_id=False, ) self.assertEqual( f.errors["__all__"], ["Please make sure your passwords match."] ) self.assertHTMLEqual( f.as_table(), """ <tr><td colspan="2"> <ul class="errorlist nonfield"> <li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td> <input type="text" name="username" value="adrian" maxlength="10" required> </td></tr> <tr><th>Password1:</th><td> <input type="password" name="password1" required></td></tr> <tr><th>Password2:</th><td> <input type="password" name="password2" required></td></tr> """, ) self.assertHTMLEqual( f.as_ul(), """ <li><ul class="errorlist nonfield"> <li>Please make sure your passwords match.</li></ul></li> <li>Username: <input type="text" name="username" value="adrian" maxlength="10" required> </li> <li>Password1: <input type="password" name="password1" required></li> <li>Password2: <input type="password" name="password2" required></li> """, ) self.assertHTMLEqual( f.render(f.template_name_div), '<ul class="errorlist nonfield"><li>Please make sure your passwords match.' '</li></ul><div>Username: <input type="text" name="username" ' 'value="adrian" maxlength="10" required></div><div>Password1: <input ' 'type="password" name="password1" required></div><div>Password2: <input ' 'type="password" name="password2" required></div>', ) f = UserRegistration( {"username": "adrian", "password1": "foo", "password2": "foo"}, auto_id=False, ) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["username"], "adrian") self.assertEqual(f.cleaned_data["password1"], "foo") self.assertEqual(f.cleaned_data["password2"], "foo") f = UserRegistration( { "username": "adrian", "password1": "FORBIDDEN_VALUE", "password2": "FORBIDDEN_VALUE", }, auto_id=False, ) self.assertEqual(f.errors["password1"], ["Forbidden value."]) self.assertEqual(f.errors["password2"], ["Forbidden value."]) f = UserRegistration( { "username": "adrian", "password1": "FORBIDDEN_VALUE2", "password2": "FORBIDDEN_VALUE2", }, auto_id=False, ) self.assertEqual( f.errors["__all__"], ["Non-field error 1.", "Non-field error 2."] ) self.assertEqual(f.errors["password1"], ["Forbidden value 2."]) self.assertEqual(f.errors["password2"], ["Forbidden value 2."]) with self.assertRaisesMessage(ValueError, "has no field named"): f.add_error("missing_field", "Some error.") with self.assertRaisesMessage( TypeError, "The argument `field` must be `None` when the `error` argument is a " "dictionary.", ): f.add_error("password1", ValidationError({"password1": "Some error."})) def test_update_error_dict(self): class CodeForm(Form): code = CharField(max_length=10) def clean(self): try: raise ValidationError({"code": [ValidationError("Code error 1.")]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({"code": [ValidationError("Code error 2.")]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({"code": ErrorList(["Code error 3."])}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError("Non-field error 1.") except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError([ValidationError("Non-field error 2.")]) except ValidationError as e: self._errors = e.update_error_dict(self._errors) # The newly added list of errors is an instance of ErrorList. for field, error_list in self._errors.items(): if not isinstance(error_list, self.error_class): self._errors[field] = self.error_class(error_list) form = CodeForm({"code": "hello"}) # Trigger validation. self.assertFalse(form.is_valid()) # update_error_dict didn't lose track of the ErrorDict type. self.assertIsInstance(form._errors, ErrorDict) self.assertEqual( dict(form.errors), { "code": ["Code error 1.", "Code error 2.", "Code error 3."], NON_FIELD_ERRORS: ["Non-field error 1.", "Non-field error 2."], }, ) def test_has_error(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput, min_length=5) password2 = CharField(widget=PasswordInput) def clean(self): if ( self.cleaned_data.get("password1") and self.cleaned_data.get("password2") and self.cleaned_data["password1"] != self.cleaned_data["password2"] ): raise ValidationError( "Please make sure your passwords match.", code="password_mismatch", ) f = UserRegistration(data={}) self.assertTrue(f.has_error("password1")) self.assertTrue(f.has_error("password1", "required")) self.assertFalse(f.has_error("password1", "anything")) f = UserRegistration(data={"password1": "Hi", "password2": "Hi"}) self.assertTrue(f.has_error("password1")) self.assertTrue(f.has_error("password1", "min_length")) self.assertFalse(f.has_error("password1", "anything")) self.assertFalse(f.has_error("password2")) self.assertFalse(f.has_error("password2", "anything")) f = UserRegistration(data={"password1": "Bonjour", "password2": "Hello"}) self.assertFalse(f.has_error("password1")) self.assertFalse(f.has_error("password1", "required")) self.assertTrue(f.has_error(NON_FIELD_ERRORS)) self.assertTrue(f.has_error(NON_FIELD_ERRORS, "password_mismatch")) self.assertFalse(f.has_error(NON_FIELD_ERRORS, "anything")) def test_html_output_with_hidden_input_field_errors(self): class TestForm(Form): hidden_input = CharField(widget=HiddenInput) def clean(self): self.add_error(None, "Form error") f = TestForm(data={}) error_dict = { "hidden_input": ["This field is required."], "__all__": ["Form error"], } self.assertEqual(f.errors, error_dict) f.as_table() self.assertEqual(f.errors, error_dict) self.assertHTMLEqual( f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield"><li>Form error</li>' "<li>(Hidden field hidden_input) This field is required.</li></ul>" '<input type="hidden" name="hidden_input" id="id_hidden_input"></td></tr>', ) self.assertHTMLEqual( f.as_ul(), '<li><ul class="errorlist nonfield"><li>Form error</li>' "<li>(Hidden field hidden_input) This field is required.</li></ul>" '<input type="hidden" name="hidden_input" id="id_hidden_input"></li>', ) self.assertHTMLEqual( f.as_p(), '<ul class="errorlist nonfield"><li>Form error</li>' "<li>(Hidden field hidden_input) This field is required.</li></ul>" '<p><input type="hidden" name="hidden_input" id="id_hidden_input"></p>', ) self.assertHTMLEqual( f.render(f.template_name_div), '<ul class="errorlist nonfield"><li>Form error</li>' "<li>(Hidden field hidden_input) This field is required.</li></ul>" '<div><input type="hidden" name="hidden_input" id="id_hidden_input"></div>', ) def test_dynamic_construction(self): # It's possible to construct a Form dynamically by adding to the # self.fields dictionary in __init__(). Don't forget to call # Form.__init__() within the subclass' __init__(). class Person(Form): first_name = CharField() last_name = CharField() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields["birthday"] = DateField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_table(), """ <tr><th>First name:</th><td> <input type="text" name="first_name" required></td></tr> <tr><th>Last name:</th><td> <input type="text" name="last_name" required></td></tr> <tr><th>Birthday:</th><td> <input type="text" name="birthday" required></td></tr> """, ) # Instances of a dynamic Form do not persist fields from one Form # instance to the next. class MyForm(Form): def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [("field1", CharField()), ("field2", CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """ <tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr> """, ) field_list = [("field3", CharField()), ("field4", CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """ <tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr> """, ) class MyForm(Form): default_field_1 = CharField() default_field_2 = CharField() def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [("field1", CharField()), ("field2", CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """ <tr><th>Default field 1:</th><td> <input type="text" name="default_field_1" required></td></tr> <tr><th>Default field 2:</th><td> <input type="text" name="default_field_2" required></td></tr> <tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr> """, ) field_list = [("field3", CharField()), ("field4", CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual( my_form.as_table(), """ <tr><th>Default field 1:</th><td> <input type="text" name="default_field_1" required></td></tr> <tr><th>Default field 2:</th><td> <input type="text" name="default_field_2" required></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr> """, ) # Similarly, changes to field attributes do not persist from one Form # instance to the next. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) def __init__(self, names_required=False, *args, **kwargs): super().__init__(*args, **kwargs) if names_required: self.fields["first_name"].required = True self.fields["first_name"].widget.attrs["class"] = "required" self.fields["last_name"].required = True self.fields["last_name"].widget.attrs["class"] = "required" f = Person(names_required=False) self.assertEqual( f["first_name"].field.required, f["last_name"].field.required, (False, False), ) self.assertEqual( f["first_name"].field.widget.attrs, f["last_name"].field.widget.attrs, ({}, {}), ) f = Person(names_required=True) self.assertEqual( f["first_name"].field.required, f["last_name"].field.required, (True, True) ) self.assertEqual( f["first_name"].field.widget.attrs, f["last_name"].field.widget.attrs, ({"class": "reuired"}, {"class": "required"}), ) f = Person(names_required=False) self.assertEqual( f["first_name"].field.required, f["last_name"].field.required, (False, False), ) self.assertEqual( f["first_name"].field.widget.attrs, f["last_name"].field.widget.attrs, ({}, {}), ) class Person(Form): first_name = CharField(max_length=30) last_name = CharField(max_length=30) def __init__(self, name_max_length=None, *args, **kwargs): super().__init__(*args, **kwargs) if name_max_length: self.fields["first_name"].max_length = name_max_length self.fields["last_name"].max_length = name_max_length f = Person(name_max_length=None) self.assertEqual( f["first_name"].field.max_length, f["last_name"].field.max_length, (30, 30) ) f = Person(name_max_length=20) self.assertEqual( f["first_name"].field.max_length, f["last_name"].field.max_length, (20, 20) ) f = Person(name_max_length=None) self.assertEqual( f["first_name"].field.max_length, f["last_name"].field.max_length, (30, 30) ) # Similarly, choices do not persist from one Form instance to the next. # Refs #15127. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) gender = ChoiceField(choices=(("f", "Female"), ("m", "Male"))) def __init__(self, allow_unspec_gender=False, *args, **kwargs): super().__init__(*args, **kwargs) if allow_unspec_gender: self.fields["gender"].choices += (("u", "Unspecified"),) f = Person() self.assertEqual(f["gender"].field.choices, [("f", "Female"), ("m", "Male")]) f = Person(allow_unspec_gender=True) self.assertEqual( f["gender"].field.choices, [("f", "Female"), ("m", "Male"), ("u", "Unspecified")], ) f = Person() self.assertEqual(f["gender"].field.choices, [("f", "Female"), ("m", "Male")]) def test_validators_independence(self): """ The list of form field validators can be modified without polluting other forms. """ class MyForm(Form): myfield = CharField(max_length=25) f1 = MyForm() f2 = MyForm() f1.fields["myfield"].validators[0] = MaxValueValidator(12) self.assertNotEqual( f1.fields["myfield"].validators[0], f2.fields["myfield"].validators[0] ) def test_hidden_widget(self): # HiddenInput widgets are displayed differently in the as_table(), # as_ul()) and as_p() output of a Form -- their verbose names are not # displayed, and a separate row is not displayed. They're displayed in # the last row of the form, directly after that row's form element. class Person(Form): first_name = CharField() last_name = CharField() hidden_text = CharField(widget=HiddenInput) birthday = DateField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_table(), """ <tr><th>First name:</th><td><input type="text" name="first_name" required> </td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" required> </td></tr> <tr><th>Birthday:</th> <td><input type="text" name="birthday" required> <input type="hidden" name="hidden_text"></td></tr> """, ) self.assertHTMLEqual( p.as_ul(), """ <li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required> <input type="hidden" name="hidden_text"></li> """, ) self.assertHTMLEqual( p.as_p(), """ <p>First name: <input type="text" name="first_name" required></p> <p>Last name: <input type="text" name="last_name" required></p> <p>Birthday: <input type="text" name="birthday" required> <input type="hidden" name="hidden_text"></p> """, ) self.assertHTMLEqual( p.as_div(), '<div>First name: <input type="text" name="first_name" required></div>' '<div>Last name: <input type="text" name="last_name" required></div><div>' 'Birthday: <input type="text" name="birthday" required><input ' 'type="hidden" name="hidden_text"></div>', ) # With auto_id set, a HiddenInput still gets an ID, but it doesn't get # a label. p = Person(auto_id="id_%s") self.assertHTMLEqual( p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" id="id_first_name" required></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" id="id_last_name" required></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></td></tr>""", ) self.assertHTMLEqual( p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></li>""", ) self.assertHTMLEqual( p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" required></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" required></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" required> <input type="hidden" name="hidden_text" id="id_hidden_text"></p>""", ) self.assertHTMLEqual( p.as_div(), '<div><label for="id_first_name">First name:</label><input type="text" ' 'name="first_name" id="id_first_name" required></div><div><label ' 'for="id_last_name">Last name:</label><input type="text" name="last_name" ' 'id="id_last_name" required></div><div><label for="id_birthday">Birthday:' '</label><input type="text" name="birthday" id="id_birthday" required>' '<input type="hidden" name="hidden_text" id="id_hidden_text"></div>', ) # If a field with a HiddenInput has errors, the as_table() and as_ul() # output will include the error message(s) with the text "(Hidden field # [fieldname]) " prepended. This message is displayed at the top of the # output, regardless of its field's order in the form. p = Person( {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"}, auto_id=False, ) self.assertHTMLEqual( p.as_table(), """ <tr><td colspan="2"> <ul class="errorlist nonfield"><li> (Hidden field hidden_text) This field is required.</li></ul></td></tr> <tr><th>First name:</th><td> <input type="text" name="first_name" value="John" required></td></tr> <tr><th>Last name:</th><td> <input type="text" name="last_name" value="Lennon" required></td></tr> <tr><th>Birthday:</th><td> <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></td></tr> """, ) self.assertHTMLEqual( p.as_ul(), """ <li><ul class="errorlist nonfield"><li> (Hidden field hidden_text) This field is required.</li></ul></li> <li>First name: <input type="text" name="first_name" value="John" required> </li> <li>Last name: <input type="text" name="last_name" value="Lennon" required> </li> <li>Birthday: <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></li> """, ) self.assertHTMLEqual( p.as_p(), """ <ul class="errorlist nonfield"><li> (Hidden field hidden_text) This field is required.</li></ul> <p>First name: <input type="text" name="first_name" value="John" required> </p> <p>Last name: <input type="text" name="last_name" value="Lennon" required> </p> <p>Birthday: <input type="text" name="birthday" value="1940-10-9" required> <input type="hidden" name="hidden_text"></p> """, ) self.assertHTMLEqual( p.as_div(), '<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field ' 'is required.</li></ul><div>First name: <input type="text" ' 'name="first_name" value="John" required></div><div>Last name: <input ' 'type="text" name="last_name" value="Lennon" required></div><div>' 'Birthday: <input type="text" name="birthday" value="1940-10-9" required>' '<input type="hidden" name="hidden_text"></div>', ) # A corner case: It's possible for a form to have only HiddenInputs. class TestForm(Form): foo = CharField(widget=HiddenInput) bar = CharField(widget=HiddenInput) p = TestForm(auto_id=False) self.assertHTMLEqual( p.as_table(), '<input type="hidden" name="foo"><input type="hidden" name="bar">', ) self.assertHTMLEqual( p.as_ul(), '<input type="hidden" name="foo"><input type="hidden" name="bar">', ) self.assertHTMLEqual( p.as_p(), '<input type="hidden" name="foo"><input type="hidden" name="bar">' ) def test_hidden_widget_does_not_have_aria_describedby(self): class TestForm(Form): hidden_text = CharField(widget=HiddenInput, help_text="Help Text") f = TestForm() self.assertEqual( str(f), '<input type="hidden" name="hidden_text" id="id_hidden_text">' ) def test_field_order(self): # A Form's fields are displayed in the same order in which they were # defined. class TestForm(Form): field1 = CharField() field2 = CharField() field3 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field7 = CharField() field8 = CharField() field9 = CharField() field10 = CharField() field11 = CharField() field12 = CharField() field13 = CharField() field14 = CharField() p = TestForm(auto_id=False) self.assertHTMLEqual( p.as_table(), "".join( f"<tr><th>Field{i}:</th><td>" f'<input type="text" name="field{i}" required></td></tr>' for i in range(1, 15) ), ) def test_explicit_field_order(self): class TestFormParent(Form): field1 = CharField() field2 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field_order = ["field6", "field5", "field4", "field2", "field1"] class TestForm(TestFormParent): field3 = CharField() field_order = ["field2", "field4", "field3", "field5", "field6"] class TestFormRemove(TestForm): field1 = None class TestFormMissing(TestForm): field_order = ["field2", "field4", "field3", "field5", "field6", "field1"] field1 = None class TestFormInit(TestFormParent): field3 = CharField() field_order = None def __init__(self, **kwargs): super().__init__(**kwargs) self.order_fields(field_order=TestForm.field_order) p = TestFormParent() self.assertEqual(list(p.fields), TestFormParent.field_order) p = TestFormRemove() self.assertEqual(list(p.fields), TestForm.field_order) p = TestFormMissing() self.assertEqual(list(p.fields), TestForm.field_order) p = TestForm() self.assertEqual(list(p.fields), TestFormMissing.field_order) p = TestFormInit() order = [*TestForm.field_order, "field1"] self.assertEqual(list(p.fields), order) TestForm.field_order = ["unknown"] p = TestForm() self.assertEqual( list(p.fields), ["field1", "field2", "field4", "field5", "field6", "field3"] ) def test_form_html_attributes(self): # Some Field classes have an effect on the HTML attributes of their # associated Widget. If you set max_length in a CharField and its # associated widget is either a TextInput or PasswordInput, then the # widget's rendered HTML will include the "maxlength" attribute. class UserRegistration(Form): username = CharField(max_length=10) # uses TextInput by default password = CharField(max_length=10, widget=PasswordInput) realname = CharField( max_length=10, widget=TextInput ) # redundantly define widget, just to test address = CharField() # no max_length defined here p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" maxlength="10" required> </li> <li>Password: <input type="password" name="password" maxlength="10" required></li> <li>Realname: <input type="text" name="realname" maxlength="10" required> </li> <li>Address: <input type="text" name="address" required></li> """, ) # If you specify a custom "attrs" that includes the "maxlength" # attribute, the Field's max_length attribute will override whatever # "maxlength" you specify in "attrs". class UserRegistration(Form): username = CharField( max_length=10, widget=TextInput(attrs={"maxlength": 20}) ) password = CharField(max_length=10, widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), '<li>Username: <input type="text" name="username" maxlength="10" required>' "</li>" '<li>Password: <input type="password" name="password" maxlength="10" ' "required></li>", ) def test_specifying_labels(self): # You can specify the label for a field by using the 'label' argument # to a Field class. If you don't specify 'label', Django will use the # field name with underscores converted to spaces, and the initial # letter capitalized. class UserRegistration(Form): username = CharField(max_length=10, label="Your username") password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput, label="Contraseña (de nuevo)") p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Your username: <input type="text" name="username" maxlength="10" required></li> <li>Password1: <input type="password" name="password1" required></li> <li>Contraseña (de nuevo): <input type="password" name="password2" required></li> """, ) # Labels for as_* methods will only end in a colon if they don't end in # other punctuation already. class Questions(Form): q1 = CharField(label="The first question") q2 = CharField(label="What is your name?") q3 = CharField(label="The answer to life is:") q4 = CharField(label="Answer this question!") q5 = CharField(label="The last question. Period.") self.assertHTMLEqual( Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" required></p> <p>What is your name? <input type="text" name="q2" required></p> <p>The answer to life is: <input type="text" name="q3" required></p> <p>Answer this question! <input type="text" name="q4" required></p> <p>The last question. Period. <input type="text" name="q5" required></p>""", ) self.assertHTMLEqual( Questions().as_p(), """ <p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" required></p> <p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" required></p> <p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" required></p> <p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" required></p> <p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" required></p> """, ) # If a label is set to the empty string for a field, that field won't # get a label. class UserRegistration(Form): username = CharField(max_length=10, label="") password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li> <input type="text" name="username" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li>""", ) p = UserRegistration(auto_id="id_%s") self.assertHTMLEqual( p.as_ul(), """ <li> <input id="id_username" type="text" name="username" maxlength="10" required> </li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required></li> """, ) # If label is None, Django will auto-create the label from the field # name. This is default behavior. class UserRegistration(Form): username = CharField(max_length=10, label=None) password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), '<li>Username: <input type="text" name="username" maxlength="10" required>' "</li>" '<li>Password: <input type="password" name="password" required></li>', ) p = UserRegistration(auto_id="id_%s") self.assertHTMLEqual( p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" required></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required></li>""", ) def test_label_suffix(self): # You can specify the 'label_suffix' argument to a Form class to modify # the punctuation symbol used at the end of a label. By default, the # colon (:) is used, and is only appended to the label if the label # doesn't already end with a punctuation symbol: ., !, ? or :. If you # specify a different suffix, it will be appended regardless of the # last character of the label. class FavoriteForm(Form): color = CharField(label="Favorite color?") animal = CharField(label="Favorite animal") answer = CharField(label="Secret answer", label_suffix=" =") f = FavoriteForm(auto_id=False) self.assertHTMLEqual( f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal: <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""", ) f = FavoriteForm(auto_id=False, label_suffix="?") self.assertHTMLEqual( f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal? <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""", ) f = FavoriteForm(auto_id=False, label_suffix="") self.assertHTMLEqual( f.as_ul(), """<li>Favorite color? <input type="text" name="color" required></li> <li>Favorite animal <input type="text" name="animal" required></li> <li>Secret answer = <input type="text" name="answer" required></li>""", ) f = FavoriteForm(auto_id=False, label_suffix="\u2192") self.assertHTMLEqual( f.as_ul(), '<li>Favorite color? <input type="text" name="color" required></li>\n' "<li>Favorite animal\u2192 " '<input type="text" name="animal" required></li>\n' '<li>Secret answer = <input type="text" name="answer" required></li>', ) def test_initial_data(self): # You can specify initial data for a field by using the 'initial' # argument to a Field class. This initial data is displayed when a Form # is rendered with *no* data. It is not displayed when a Form is # rendered with any data (including an empty dictionary). Also, the # initial value is *not* used if data for a particular required field # isn't provided. class UserRegistration(Form): username = CharField(max_length=10, initial="django") password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be # displayed.) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> """, ) # Here, we're submitting data, so the initial value will *not* be # displayed. p = UserRegistration({}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li>""", ) p = UserRegistration({"username": ""}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li>""", ) p = UserRegistration({"username": "foo"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li> """, ) # An 'initial' value is *not* used as a fallback if data is not # provided. In this example, we don't provide a value for 'username', # and the form raises a validation error rather than using the initial # value for 'username'. p = UserRegistration({"password": "secret"}) self.assertEqual(p.errors["username"], ["This field is required."]) self.assertFalse(p.is_valid()) def test_dynamic_initial_data(self): # The previous technique dealt with "hard-coded" initial data, but it's # also possible to specify initial data after you've already created # the Form class (i.e., at runtime). Use the 'initial' parameter to the # Form constructor. This should be a dictionary containing initial # values for one or more fields in the form, keyed by field name. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be # displayed.) p = UserRegistration(initial={"username": "django"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> """, ) p = UserRegistration(initial={"username": "stephane"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="stephane" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> """, ) # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={"username": "django"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li>""", ) p = UserRegistration( {"username": ""}, initial={"username": "django"}, auto_id=False ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li>""", ) p = UserRegistration( {"username": "foo"}, initial={"username": "django"}, auto_id=False ) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li> """, ) # A dynamic 'initial' value is *not* used as a fallback if data is not # provided. In this example, we don't provide a value for 'username', # and the form raises a validation error rather than using the initial # value for 'username'. p = UserRegistration({"password": "secret"}, initial={"username": "django"}) self.assertEqual(p.errors["username"], ["This field is required."]) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter # to Form(), then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial="django") password = CharField(widget=PasswordInput) p = UserRegistration(initial={"username": "babik"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="babik" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> """, ) def test_callable_initial_data(self): # The previous technique dealt with raw values as initial data, but # it's also possible to specify callable data. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) options = MultipleChoiceField( choices=[("f", "foo"), ("b", "bar"), ("w", "whiz")] ) # We need to define functions that get called later.) def initial_django(): return "django" def initial_stephane(): return "stephane" def initial_options(): return ["f", "b"] def initial_other_options(): return ["b", "w"] # Here, we're not submitting any data, so the initial value will be # displayed.) p = UserRegistration( initial={"username": initial_django, "options": initial_options}, auto_id=False, ) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li> """, ) # The 'initial' parameter is meaningless if you pass data. p = UserRegistration( {}, initial={"username": initial_django, "options": initial_options}, auto_id=False, ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Options: <select multiple name="options" aria-invalid="true" required> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""", ) p = UserRegistration( {"username": ""}, initial={"username": initial_django}, auto_id=False ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul> Username: <input type="text" name="username" maxlength="10" aria-invalid="true" required></li><li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Options: <select multiple name="options" aria-invalid="true" required> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""", ) p = UserRegistration( {"username": "foo", "options": ["f", "b"]}, initial={"username": initial_django}, auto_id=False, ) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="foo" maxlength="10" required></li> <li><ul class="errorlist"><li>This field is required.</li></ul> Password: <input type="password" name="password" aria-invalid="true" required></li><li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li> """, ) # A callable 'initial' value is *not* used as a fallback if data is not # provided. In this example, we don't provide a value for 'username', # and the form raises a validation error rather than using the initial # value for 'username'. p = UserRegistration( {"password": "secret"}, initial={"username": initial_django, "options": initial_options}, ) self.assertEqual(p.errors["username"], ["This field is required."]) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter # to Form(), then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial=initial_django) password = CharField(widget=PasswordInput) options = MultipleChoiceField( choices=[("f", "foo"), ("b", "bar"), ("w", "whiz")], initial=initial_other_options, ) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="django" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f">foo</option> <option value="b" selected>bar</option> <option value="w" selected>whiz</option> </select></li> """, ) p = UserRegistration( initial={"username": initial_stephane, "options": initial_options}, auto_id=False, ) self.assertHTMLEqual( p.as_ul(), """ <li>Username: <input type="text" name="username" value="stephane" maxlength="10" required></li> <li>Password: <input type="password" name="password" required></li> <li>Options: <select multiple name="options" required> <option value="f" selected>foo</option> <option value="b" selected>bar</option> <option value="w">whiz</option> </select></li> """, ) def test_get_initial_for_field(self): now = datetime.datetime(2006, 10, 25, 14, 30, 45, 123456) class PersonForm(Form): first_name = CharField(initial="John") last_name = CharField(initial="Doe") age = IntegerField() occupation = CharField(initial=lambda: "Unknown") dt_fixed = DateTimeField(initial=now) dt_callable = DateTimeField(initial=lambda: now) form = PersonForm(initial={"first_name": "Jane"}) cases = [ ("age", None), ("last_name", "Doe"), # Form.initial overrides Field.initial. ("first_name", "Jane"), # Callables are evaluated. ("occupation", "Unknown"), # Microseconds are removed from datetimes. ("dt_fixed", datetime.datetime(2006, 10, 25, 14, 30, 45)), ("dt_callable", datetime.datetime(2006, 10, 25, 14, 30, 45)), ] for field_name, expected in cases: with self.subTest(field_name=field_name): field = form.fields[field_name] actual = form.get_initial_for_field(field, field_name) self.assertEqual(actual, expected) def test_changed_data(self): class Person(Form): first_name = CharField(initial="Hans") last_name = CharField(initial="Greatel") birthday = DateField(initial=datetime.date(1974, 8, 16)) p = Person( data={"first_name": "Hans", "last_name": "Scrmbl", "birthday": "1974-08-16"} ) self.assertTrue(p.is_valid()) self.assertNotIn("first_name", p.changed_data) self.assertIn("last_name", p.changed_data) self.assertNotIn("birthday", p.changed_data) # A field raising ValidationError is always in changed_data class PedanticField(Field): def to_python(self, value): raise ValidationError("Whatever") class Person2(Person): pedantic = PedanticField(initial="whatever", show_hidden_initial=True) p = Person2( data={ "first_name": "Hans", "last_name": "Scrmbl", "birthday": "1974-08-16", "initial-pedantic": "whatever", } ) self.assertFalse(p.is_valid()) self.assertIn("pedantic", p.changed_data) def test_boundfield_values(self): # It's possible to get to the value which would be used for rendering # the widget for a field by using the BoundField's value method. class UserRegistration(Form): username = CharField(max_length=10, initial="djangonaut") password = CharField(widget=PasswordInput) unbound = UserRegistration() bound = UserRegistration({"password": "foo"}) self.assertIsNone(bound["username"].value()) self.assertEqual(unbound["username"].value(), "djangonaut") self.assertEqual(bound["password"].value(), "foo") self.assertIsNone(unbound["password"].value()) def test_boundfield_initial_called_once(self): """ Multiple calls to BoundField().value() in an unbound form should return the same result each time (#24391). """ class MyForm(Form): name = CharField(max_length=10, initial=uuid.uuid4) form = MyForm() name = form["name"] self.assertEqual(name.value(), name.value()) # BoundField is also cached self.assertIs(form["name"], name) def test_boundfield_value_disabled_callable_initial(self): class PersonForm(Form): name = CharField(initial=lambda: "John Doe", disabled=True) # Without form data. form = PersonForm() self.assertEqual(form["name"].value(), "John Doe") # With form data. As the field is disabled, the value should not be # affected by the form data. form = PersonForm({}) self.assertEqual(form["name"].value(), "John Doe") def test_custom_boundfield(self): class CustomField(CharField): def get_bound_field(self, form, name): return (form, name) class SampleForm(Form): name = CustomField() f = SampleForm() self.assertEqual(f["name"], (f, "name")) def test_initial_datetime_values(self): now = datetime.datetime.now() # Nix microseconds (since they should be ignored). #22502 now_no_ms = now.replace(microsecond=0) if now == now_no_ms: now = now.replace(microsecond=1) def delayed_now(): return now def delayed_now_time(): return now.time() class HiddenInputWithoutMicrosec(HiddenInput): supports_microseconds = False class TextInputWithoutMicrosec(TextInput): supports_microseconds = False class DateTimeForm(Form): # Test a non-callable. fixed = DateTimeField(initial=now) auto_timestamp = DateTimeField(initial=delayed_now) auto_time_only = TimeField(initial=delayed_now_time) supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput) hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput) hi_without_microsec = DateTimeField( initial=delayed_now, widget=HiddenInputWithoutMicrosec ) ti_without_microsec = DateTimeField( initial=delayed_now, widget=TextInputWithoutMicrosec ) unbound = DateTimeForm() cases = [ ("fixed", now_no_ms), ("auto_timestamp", now_no_ms), ("auto_time_only", now_no_ms.time()), ("supports_microseconds", now), ("hi_default_microsec", now), ("hi_without_microsec", now_no_ms), ("ti_without_microsec", now_no_ms), ] for field_name, expected in cases: with self.subTest(field_name=field_name): actual = unbound[field_name].value() self.assertEqual(actual, expected) # Also check get_initial_for_field(). field = unbound.fields[field_name] actual = unbound.get_initial_for_field(field, field_name) self.assertEqual(actual, expected) def get_datetime_form_with_callable_initial(self, disabled, microseconds=0): class FakeTime: def __init__(self): self.elapsed_seconds = 0 def now(self): self.elapsed_seconds += 1 return datetime.datetime( 2006, 10, 25, 14, 30, 45 + self.elapsed_seconds, microseconds, ) class DateTimeForm(Form): dt = DateTimeField(initial=FakeTime().now, disabled=disabled) return DateTimeForm({}) def test_datetime_clean_disabled_callable_initial_microseconds(self): """ Cleaning a form with a disabled DateTimeField and callable initial removes microseconds. """ form = self.get_datetime_form_with_callable_initial( disabled=True, microseconds=123456, ) self.assertEqual(form.errors, {}) self.assertEqual( form.cleaned_data, { "dt": datetime.datetime(2006, 10, 25, 14, 30, 46), }, ) def test_datetime_clean_disabled_callable_initial_bound_field(self): """ The cleaned value for a form with a disabled DateTimeField and callable initial matches the bound field's cached initial value. """ form = self.get_datetime_form_with_callable_initial(disabled=True) self.assertEqual(form.errors, {}) cleaned = form.cleaned_data["dt"] self.assertEqual(cleaned, datetime.datetime(2006, 10, 25, 14, 30, 46)) bf = form["dt"] self.assertEqual(cleaned, bf.initial) def test_datetime_changed_data_callable_with_microseconds(self): class DateTimeForm(Form): dt = DateTimeField( initial=lambda: datetime.datetime(2006, 10, 25, 14, 30, 45, 123456), disabled=True, ) form = DateTimeForm({"dt": "2006-10-25 14:30:45"}) self.assertEqual(form.changed_data, []) def test_help_text(self): # You can specify descriptive text for a field by using the 'help_text' # argument. class UserRegistration(Form): username = CharField(max_length=10, help_text="e.g., user@example.com") password = CharField( widget=PasswordInput, help_text="Wählen Sie mit Bedacht." ) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., user@example.com</span></li> <li>Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""", ) self.assertHTMLEqual( p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., user@example.com</span></p> <p>Password: <input type="password" name="password" required> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""", ) self.assertHTMLEqual( p.as_table(), """ <tr><th>Username:</th><td> <input type="text" name="username" maxlength="10" required><br> <span class="helptext">e.g., user@example.com</span></td></tr> <tr><th>Password:</th><td><input type="password" name="password" required> <br> <span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""", ) self.assertHTMLEqual( p.as_div(), '<div>Username: <div class="helptext">e.g., user@example.com</div>' '<input type="text" name="username" maxlength="10" required></div>' '<div>Password: <div class="helptext">Wählen Sie mit Bedacht.</div>' '<input type="password" name="password" required></div>', ) # The help text is displayed whether or not data is provided for the # form. p = UserRegistration({"username": "foo"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), '<li>Username: <input type="text" name="username" value="foo" ' 'maxlength="10" required>' '<span class="helptext">e.g., user@example.com</span></li>' '<li><ul class="errorlist"><li>This field is required.</li></ul>' 'Password: <input type="password" name="password" aria-invalid="true" ' 'required><span class="helptext">Wählen Sie mit Bedacht.</span></li>', ) # help_text is not displayed for hidden fields. It can be used for # documentation purposes, though. class UserRegistration(Form): username = CharField(max_length=10, help_text="e.g., user@example.com") password = CharField(widget=PasswordInput) next = CharField( widget=HiddenInput, initial="/", help_text="Redirect destination" ) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" required> <span class="helptext">e.g., user@example.com</span></li> <li>Password: <input type="password" name="password" required> <input type="hidden" name="next" value="/"></li>""", ) def test_help_text_html_safe(self): """help_text should not be escaped.""" class UserRegistration(Form): username = CharField(max_length=10, help_text="e.g., user@example.com") password = CharField( widget=PasswordInput, help_text="Help text is <strong>escaped</strong>.", ) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), '<li>Username: <input type="text" name="username" maxlength="10" required>' '<span class="helptext">e.g., user@example.com</span></li>' '<li>Password: <input type="password" name="password" required>' '<span class="helptext">Help text is <strong>escaped</strong>.</span></li>', ) self.assertHTMLEqual( p.as_p(), '<p>Username: <input type="text" name="username" maxlength="10" required>' '<span class="helptext">e.g., user@example.com</span></p>' '<p>Password: <input type="password" name="password" required>' '<span class="helptext">Help text is <strong>escaped</strong>.</span></p>', ) self.assertHTMLEqual( p.as_table(), "<tr><th>Username:</th><td>" '<input type="text" name="username" maxlength="10" required><br>' '<span class="helptext">e.g., user@example.com</span></td></tr>' "<tr><th>Password:</th><td>" '<input type="password" name="password" required><br>' '<span class="helptext">Help text is <strong>escaped</strong>.</span>' "</td></tr>", ) def test_widget_attrs_custom_aria_describedby(self): # aria-describedby provided to the widget overrides the default. class UserRegistration(Form): username = CharField( max_length=255, help_text="e.g., user@example.com", widget=TextInput(attrs={"aria-describedby": "custom-description"}), ) password = CharField( widget=PasswordInput, help_text="Wählen Sie mit Bedacht." ) p = UserRegistration() self.assertHTMLEqual( p.as_div(), '<div><label for="id_username">Username:</label>' '<div class="helptext" id="id_username_helptext">e.g., user@example.com' '</div><input type="text" name="username" maxlength="255" required ' 'aria-describedby="custom-description" id="id_username">' "</div><div>" '<label for="id_password">Password:</label>' '<div class="helptext" id="id_password_helptext">Wählen Sie mit Bedacht.' '</div><input type="password" name="password" required ' 'aria-describedby="id_password_helptext" id="id_password"></div>', ) self.assertHTMLEqual( p.as_ul(), '<li><label for="id_username">Username:</label><input type="text" ' 'name="username" maxlength="255" required ' 'aria-describedby="custom-description" id="id_username">' '<span class="helptext" id="id_username_helptext">e.g., user@example.com' "</span></li><li>" '<label for="id_password">Password:</label>' '<input type="password" name="password" required ' 'aria-describedby="id_password_helptext" id="id_password">' '<span class="helptext" id="id_password_helptext">Wählen Sie mit Bedacht.' "</span></li>", ) self.assertHTMLEqual( p.as_p(), '<p><label for="id_username">Username:</label><input type="text" ' 'name="username" maxlength="255" required ' 'aria-describedby="custom-description" id="id_username">' '<span class="helptext" id="id_username_helptext">e.g., user@example.com' "</span></p><p>" '<label for="id_password">Password:</label>' '<input type="password" name="password" required ' 'aria-describedby="id_password_helptext" id="id_password">' '<span class="helptext" id="id_password_helptext">Wählen Sie mit Bedacht.' "</span></p>", ) self.assertHTMLEqual( p.as_table(), '<tr><th><label for="id_username">Username:</label></th><td>' '<input type="text" name="username" maxlength="255" required ' 'aria-describedby="custom-description" id="id_username"><br>' '<span class="helptext" id="id_username_helptext">e.g., user@example.com' "</span></td></tr><tr><th>" '<label for="id_password">Password:</label></th><td>' '<input type="password" name="password" required ' 'aria-describedby="id_password_helptext" id="id_password"><br>' '<span class="helptext" id="id_password_helptext">Wählen Sie mit Bedacht.' "</span></td></tr>", ) def test_aria_describedby_custom_widget_id(self): class UserRegistration(Form): username = CharField( max_length=255, help_text="e.g., user@example.com", widget=TextInput(attrs={"id": "custom-id"}), ) f = UserRegistration() self.assertHTMLEqual( str(f), '<div><label for="custom-id">Username:</label>' '<div class="helptext" id="id_username_helptext">e.g., user@example.com' '</div><input type="text" name="username" id="custom-id" maxlength="255" ' 'required aria-describedby="id_username_helptext"></div>', ) def test_select_aria_describedby(self): class TestForm(Form): color = MultipleChoiceField( choices=[("red", "Red"), ("green", "Green")], help_text="Select Color", ) f = TestForm({"color": "Blue"}) self.assertHTMLEqual( str(f), '<div><label for="id_color">Color:</label><div class="helptext" ' 'id="id_color_helptext">Select Color</div>' '<ul class="errorlist" id="id_color_error"><li>Enter a list of values.' '</li></ul><select name="color" required aria-invalid="true" ' 'aria-describedby="id_color_helptext id_color_error" id="id_color" ' 'multiple><option value="red">Red</option>' '<option value="green">Green</option></select></div>', ) def test_textarea_aria_describedby(self): class TestForm(Form): color = CharField(widget=Textarea, max_length=5, help_text="Enter Color") f = TestForm({"color": "Purple"}) self.assertHTMLEqual( str(f), '<div><label for="id_color">Color:</label>' '<div class="helptext" id="id_color_helptext">Enter Color</div>' '<ul class="errorlist" id="id_color_error">' "<li>Ensure this value has at most 5 characters (it has 6).</li></ul>" '<textarea name="color" cols="40" rows="10" maxlength="5" required ' 'aria-invalid="true" aria-describedby="id_color_helptext id_color_error" ' 'id="id_color">Purple</textarea></div>', ) def test_aria_describedby_called_multiple_times(self): class TestForm(Form): color = CharField(widget=Textarea, help_text="Enter Color") f = TestForm({"color": "Purple"}) self.assertEqual(f["color"].aria_describedby, "id_color_helptext") f.add_error("color", "An error about Purple.") self.assertEqual( f["color"].aria_describedby, "id_color_helptext id_color_error" ) def test_fieldset_aria_describedby(self): class FieldsetForm(Form): checkbox = MultipleChoiceField( choices=[("a", "A"), ("b", "B")], widget=CheckboxSelectMultiple, help_text="Checkbox help text", ) radio = MultipleChoiceField( choices=[("a", "A"), ("b", "B")], widget=RadioSelect, help_text="Radio help text", ) datetime = SplitDateTimeField(help_text="Enter Date and Time") f = FieldsetForm() self.assertHTMLEqual( str(f), '<div><fieldset aria-describedby="id_checkbox_helptext">' "<legend>Checkbox:</legend>" '<div class="helptext" id="id_checkbox_helptext">Checkbox help text</div>' '<div id="id_checkbox"><div>' '<label for="id_checkbox_0"><input type="checkbox" name="checkbox" ' 'value="a" id="id_checkbox_0" /> A</label>' "</div><div>" '<label for="id_checkbox_1"><input type="checkbox" name="checkbox" ' 'value="b" id="id_checkbox_1" /> B</label>' "</div></div></fieldset></div>" '<div><fieldset aria-describedby="id_radio_helptext">' "<legend>Radio:</legend>" '<div class="helptext" id="id_radio_helptext">Radio help text</div>' '<div id="id_radio"><div>' '<label for="id_radio_0"><input type="radio" name="radio" value="a" ' 'required id="id_radio_0" />A</label>' "</div><div>" '<label for="id_radio_1"><input type="radio" name="radio" value="b" ' 'required id="id_radio_1" /> B</label>' "</div></div></fieldset></div>" '<div><fieldset aria-describedby="id_datetime_helptext">' "<legend>Datetime:</legend>" '<div class="helptext" id="id_datetime_helptext">Enter Date and Time</div>' '<input type="text" name="datetime_0" required id="id_datetime_0" />' '<input type="text" name="datetime_1" required id="id_datetime_1" />' "</fieldset></div>", ) f = FieldsetForm({}) self.assertHTMLEqual( '<div><fieldset aria-describedby="id_checkbox_helptext ' 'id_checkbox_error"> <legend>Checkbox:</legend> <div class="helptext" ' 'id="id_checkbox_helptext">Checkbox help text</div> <ul class="errorlist" ' 'id="id_checkbox_error"> <li>This field is required.</li> </ul> ' '<div id="id_checkbox"> <div> <label for="id_checkbox_0"><input ' 'type="checkbox" name="checkbox" value="a" aria-invalid="true" ' 'id="id_checkbox_0" /> A</label> </div> <div> <label for="id_checkbox_1">' '<input type="checkbox" name="checkbox" value="b" aria-invalid="true" ' 'id="id_checkbox_1" /> B</label> </div> </div> </fieldset> </div> <div> ' '<fieldset aria-describedby="id_radio_helptext id_radio_error"> ' '<legend>Radio:</legend> <div class="helptext" id="id_radio_helptext">' 'Radio help text</div> <ul class="errorlist" id="id_radio_error"><li>' 'This field is required.</li> </ul> <div id="id_radio"><div><label ' 'for="id_radio_0"><input type="radio" name="radio" value="a" required ' 'aria-invalid="true" id="id_radio_0" />A</label></div><div><label ' 'for="id_radio_1"><input type="radio" name="radio" value="b" required ' 'aria-invalid="true" id="id_radio_1" />B</label></div></div></fieldset>' '</div><div><fieldset aria-describedby="id_datetime_helptext ' 'id_datetime_error"><legend>Datetime:</legend><div class="helptext" ' 'id="id_datetime_helptext">Enter Date and Time</div><ul class="errorlist" ' 'id="id_datetime_error"><li>This field is required.</li></ul><input ' 'type="text" name="datetime_0" required aria-invalid="true" ' 'id="id_datetime_0" /><input type="text" name="datetime_1" required ' 'aria-invalid="true" id="id_datetime_1" /></fieldset></div>', str(f), ) f = FieldsetForm(auto_id=False) # aria-describedby is not included. self.assertIn("<fieldset>", str(f)) self.assertIn('<div class="helptext">', str(f)) f = FieldsetForm(auto_id="custom_%s") # aria-describedby uses custom auto_id. self.assertIn('fieldset aria-describedby="custom_checkbox_helptext"', str(f)) self.assertIn('<div class="helptext" id="custom_checkbox_helptext">', str(f)) def test_fieldset_custom_aria_describedby(self): # aria-describedby set on widget results in aria-describedby being # added to widget and not the <fieldset>. class FieldsetForm(Form): checkbox = MultipleChoiceField( choices=[("a", "A"), ("b", "B")], widget=CheckboxSelectMultiple(attrs={"aria-describedby": "custom-id"}), help_text="Checkbox help text", ) f = FieldsetForm() self.assertHTMLEqual( str(f), "<div><fieldset><legend>Checkbox:</legend>" '<div class="helptext" id="id_checkbox_helptext">Checkbox help text</div>' '<div id="id_checkbox"><div>' '<label for="id_checkbox_0"><input type="checkbox" name="checkbox" ' 'value="a" aria-describedby="custom-id" id="id_checkbox_0" />A</label>' "</div><div>" '<label for="id_checkbox_1"><input type="checkbox" name="checkbox" ' 'value="b" aria-describedby="custom-id" id="id_checkbox_1" />B</label>' "</div></div></fieldset></div>", ) def test_as_widget_custom_aria_describedby(self): class FoodForm(Form): intl_name = CharField(help_text="The food's international name.") form = FoodForm({"intl_name": "Rendang"}) self.assertHTMLEqual( form["intl_name"].as_widget(attrs={"aria-describedby": "some_custom_id"}), '<input type="text" name="intl_name" value="Rendang"' 'aria-describedby="some_custom_id" required id="id_intl_name">', ) def test_subclassing_forms(self): # You can subclass a Form to add fields. The resulting form subclass # will have all of the fields of the parent Form, plus whichever fields # you define in the subclass. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Musician(Person): instrument = CharField() p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li>""", ) m = Musician(auto_id=False) self.assertHTMLEqual( m.as_ul(), """<li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li> <li>Instrument: <input type="text" name="instrument" required></li>""", ) # Yes, you can subclass multiple forms. The fields are added in the # order in which the parent classes are listed. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Instrument(Form): instrument = CharField() class Beatle(Person, Instrument): haircut_type = CharField() b = Beatle(auto_id=False) self.assertHTMLEqual( b.as_ul(), """<li>Instrument: <input type="text" name="instrument" required></li> <li>First name: <input type="text" name="first_name" required></li> <li>Last name: <input type="text" name="last_name" required></li> <li>Birthday: <input type="text" name="birthday" required></li> <li>Haircut type: <input type="text" name="haircut_type" required></li>""", ) def test_forms_with_prefixes(self): # Sometimes it's necessary to have multiple forms display on the same # HTML page, or multiple copies of the same form. We can accomplish # this with form prefixes. Pass the keyword argument 'prefix' to the # Form constructor to use this feature. This value will be prepended to # each HTML form field name. One way to think about this is "namespaces # for HTML forms". Notice that in the data argument, each field's key # has the prefix, in this case 'person1', prepended to the actual field # name. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() data = { "person1-first_name": "John", "person1-last_name": "Lennon", "person1-birthday": "1940-10-9", } p = Person(data, prefix="person1") self.assertHTMLEqual( p.as_ul(), """ <li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" required></li> <li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" required></li> <li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" required></li> """, ) self.assertHTMLEqual( str(p["first_name"]), '<input type="text" name="person1-first_name" value="John" ' 'id="id_person1-first_name" required>', ) self.assertHTMLEqual( str(p["last_name"]), '<input type="text" name="person1-last_name" value="Lennon" ' 'id="id_person1-last_name" required>', ) self.assertHTMLEqual( str(p["birthday"]), '<input type="text" name="person1-birthday" value="1940-10-9" ' 'id="id_person1-birthday" required>', ) self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data["first_name"], "John") self.assertEqual(p.cleaned_data["last_name"], "Lennon") self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) # Let's try submitting some bad data to make sure form.errors and # field.errors work as expected. data = { "person1-first_name": "", "person1-last_name": "", "person1-birthday": "", } p = Person(data, prefix="person1") self.assertEqual(p.errors["first_name"], ["This field is required."]) self.assertEqual(p.errors["last_name"], ["This field is required."]) self.assertEqual(p.errors["birthday"], ["This field is required."]) self.assertEqual(p["first_name"].errors, ["This field is required."]) # Accessing a nonexistent field. with self.assertRaises(KeyError): p["person1-first_name"].errors # In this example, the data doesn't have a prefix, but the form # requires it, so the form doesn't "see" the fields. data = {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"} p = Person(data, prefix="person1") self.assertEqual(p.errors["first_name"], ["This field is required."]) self.assertEqual(p.errors["last_name"], ["This field is required."]) self.assertEqual(p.errors["birthday"], ["This field is required."]) # With prefixes, a single data dictionary can hold data for multiple # instances of the same form. data = { "person1-first_name": "John", "person1-last_name": "Lennon", "person1-birthday": "1940-10-9", "person2-first_name": "Jim", "person2-last_name": "Morrison", "person2-birthday": "1943-12-8", } p1 = Person(data, prefix="person1") self.assertTrue(p1.is_valid()) self.assertEqual(p1.cleaned_data["first_name"], "John") self.assertEqual(p1.cleaned_data["last_name"], "Lennon") self.assertEqual(p1.cleaned_data["birthday"], datetime.date(1940, 10, 9)) p2 = Person(data, prefix="person2") self.assertTrue(p2.is_valid()) self.assertEqual(p2.cleaned_data["first_name"], "Jim") self.assertEqual(p2.cleaned_data["last_name"], "Morrison") self.assertEqual(p2.cleaned_data["birthday"], datetime.date(1943, 12, 8)) # By default, forms append a hyphen between the prefix and the field # name, but a form can alter that behavior by implementing the # add_prefix() method. This method takes a field name and returns the # prefixed field, according to self.prefix. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() def add_prefix(self, field_name): return ( "%s-prefix-%s" % (self.prefix, field_name) if self.prefix else field_name ) p = Person(prefix="foo") self.assertHTMLEqual( p.as_ul(), """ <li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" required></li> <li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" required></li> <li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" required></li> """, ) data = { "foo-prefix-first_name": "John", "foo-prefix-last_name": "Lennon", "foo-prefix-birthday": "1940-10-9", } p = Person(data, prefix="foo") self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data["first_name"], "John") self.assertEqual(p.cleaned_data["last_name"], "Lennon") self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) def test_class_prefix(self): # Prefix can be also specified at the class level. class Person(Form): first_name = CharField() prefix = "foo" p = Person() self.assertEqual(p.prefix, "foo") p = Person(prefix="bar") self.assertEqual(p.prefix, "bar") def test_forms_with_null_boolean(self): # NullBooleanField is a bit of a special case because its presentation # (widget) is different than its data. This is handled transparently, # though. class Person(Form): name = CharField() is_cool = NullBooleanField() p = Person({"name": "Joe"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": "1"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": "2"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": "3"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": True}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": False}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": "unknown"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": "true"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true" selected>Yes</option> <option value="false">No</option> </select>""", ) p = Person({"name": "Joe", "is_cool": "false"}, auto_id=False) self.assertHTMLEqual( str(p["is_cool"]), """<select name="is_cool"> <option value="unknown">Unknown</option> <option value="true">Yes</option> <option value="false" selected>No</option> </select>""", ) def test_forms_with_file_fields(self): # FileFields are a special case because they take their data from the # request.FILES, not request.POST. class FileForm(Form): file1 = FileField() f = FileForm(auto_id=False) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<input type="file" name="file1" required></td></tr>', ) f = FileForm(data={}, files={}, auto_id=False) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<ul class="errorlist"><li>This field is required.</li></ul>' '<input type="file" name="file1" aria-invalid="true" required></td></tr>', ) f = FileForm( data={}, files={"file1": SimpleUploadedFile("name", b"")}, auto_id=False ) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<ul class="errorlist"><li>The submitted file is empty.</li></ul>' '<input type="file" name="file1" aria-invalid="true" required></td></tr>', ) f = FileForm( data={}, files={"file1": "something that is not a file"}, auto_id=False ) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<ul class="errorlist"><li>No file was submitted. Check the ' "encoding type on the form.</li></ul>" '<input type="file" name="file1" aria-invalid="true" required></td></tr>', ) f = FileForm( data={}, files={"file1": SimpleUploadedFile("name", b"some content")}, auto_id=False, ) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<input type="file" name="file1" required></td></tr>', ) self.assertTrue(f.is_valid()) file1 = SimpleUploadedFile( "我隻氣墊船裝滿晒鱔.txt", "मेरी मँडराने वाली नाव सर्पमीनों से भरी ह".encode(), ) f = FileForm(data={}, files={"file1": file1}, auto_id=False) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<input type="file" name="file1" required></td></tr>', ) # A required file field with initial data should not contain the # required HTML attribute. The file input is left blank by the user to # keep the existing, initial value. f = FileForm(initial={"file1": "resume.txt"}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>', ) def test_filefield_initial_callable(self): class FileForm(Form): file1 = FileField(initial=lambda: "resume.txt") f = FileForm({}) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data["file1"], "resume.txt") def test_filefield_with_fileinput_required(self): class FileForm(Form): file1 = FileField(widget=FileInput) f = FileForm(auto_id=False) self.assertHTMLEqual( f.as_table(), "<tr><th>File1:</th><td>" '<input type="file" name="file1" required></td></tr>', ) # A required file field with initial data doesn't contain the required # HTML attribute. The file input is left blank by the user to keep the # existing, initial value. f = FileForm(initial={"file1": "resume.txt"}, auto_id=False) self.assertHTMLEqual( f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>', ) def test_empty_permitted(self): # Sometimes (pretty much in formsets) we want to allow a form to pass # validation if it is completely empty. We can accomplish this by using # the empty_permitted argument to a form constructor. class SongForm(Form): artist = CharField() name = CharField() # First let's show what happens id empty_permitted=False (the default): data = {"artist": "", "song": ""} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual( form.errors, { "name": ["This field is required."], "artist": ["This field is required."], }, ) self.assertEqual(form.cleaned_data, {}) # Now let's show what happens when empty_permitted=True and the form is # empty. form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {}) # But if we fill in data for one of the fields, the form is no longer # empty and the whole thing must pass validation. data = {"artist": "The Doors", "song": ""} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {"name": ["This field is required."]}) self.assertEqual(form.cleaned_data, {"artist": "The Doors"}) # If a field is not given in the data then None is returned for its # data. Lets make sure that when checking for empty_permitted that None # is treated accordingly. data = {"artist": None, "song": ""} form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) # However, we *really* need to be sure we are checking for None as any # data in initial that returns False on a boolean call needs to be # treated literally. class PriceForm(Form): amount = FloatField() qty = IntegerField() data = {"amount": "0.0", "qty": ""} form = PriceForm( data, initial={"amount": 0.0}, empty_permitted=True, use_required_attribute=False, ) self.assertTrue(form.is_valid()) def test_empty_permitted_and_use_required_attribute(self): msg = ( "The empty_permitted and use_required_attribute arguments may not " "both be True." ) with self.assertRaisesMessage(ValueError, msg): Person(empty_permitted=True, use_required_attribute=True) def test_extracting_hidden_and_visible(self): class SongForm(Form): token = CharField(widget=HiddenInput) artist = CharField() name = CharField() form = SongForm() self.assertEqual([f.name for f in form.hidden_fields()], ["token"]) self.assertEqual([f.name for f in form.visible_fields()], ["artist", "name"]) def test_hidden_initial_gets_id(self): class MyForm(Form): field1 = CharField(max_length=50, show_hidden_initial=True) self.assertHTMLEqual( MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td>' '<input id="id_field1" type="text" name="field1" maxlength="50" required>' '<input type="hidden" name="initial-field1" id="initial-id_field1">' "</td></tr>", ) def test_error_html_required_html_classes(self): class Person(Form): name = CharField() is_cool = NullBooleanField() email = EmailField(required=False) age = IntegerField() p = Person({}) p.error_css_class = "error" p.required_css_class = "required" self.assertHTMLEqual( p.as_ul(), """ <li class="required error"><ul class="errorlist" id="id_name_error"> <li>This field is required.</li></ul> <label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" aria-invalid="true" required aria-describedby="id_name_error"> </li><li class="required"> <label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></li> <li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" maxlength="320"></li> <li class="required error"><ul class="errorlist" id="id_age_error"> <li>This field is required.</li></ul> <label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" aria-invalid="true" required aria-describedby="id_age_error"> </li>""", ) self.assertHTMLEqual( p.as_p(), """ <ul class="errorlist" id="id_name_error"><li>This field is required.</li> </ul><p class="required error"> <label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" aria-invalid="true" required aria-describedby="id_name_error"> </p><p class="required"> <label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></p> <p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" maxlength="320"></p> <ul class="errorlist" id="id_age_error"><li>This field is required.</li> </ul><p class="required error"><label class="required" for="id_age"> Age:</label><input type="number" name="age" id="id_age" aria-invalid="true" required aria-describedby="id_age_error"></p>""", ) self.assertHTMLEqual( p.as_table(), """<tr class="required error"> <th><label class="required" for="id_name">Name:</label></th> <td><ul class="errorlist" id="id_name_error"><li>This field is required.</li></ul> <input type="text" name="name" id="id_name" aria-invalid="true" required aria-describedby="id_name_error"></td></tr> <tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th> <td><select name="is_cool" id="id_is_cool"> <option value="unknown" selected>Unknown</option> <option value="true">Yes</option> <option value="false">No</option> </select></td></tr> <tr><th><label for="id_email">Email:</label></th><td> <input type="email" name="email" id="id_email" maxlength="320"></td></tr> <tr class="required error"><th><label class="required" for="id_age">Age:</label></th> <td><ul class="errorlist" id="id_age_error"><li>This field is required.</li></ul> <input type="number" name="age" id="id_age" aria-invalid="true" required aria-describedby="id_age_error"></td></tr>""", ) self.assertHTMLEqual( p.as_div(), '<div class="required error"><label for="id_name" class="required">Name:' '</label><ul class="errorlist" id="id_name_error"><li>This field is ' 'required.</li></ul><input type="text" name="name" required id="id_name" ' 'aria-invalid="true" aria-describedby="id_name_error" /></div>' '<div class="required"><label for="id_is_cool" class="required">Is cool:' '</label><select name="is_cool" id="id_is_cool">' '<option value="unknown" selected>Unknown</option>' '<option value="true">Yes</option><option value="false">No</option>' '</select></div><div><label for="id_email">Email:</label>' '<input type="email" name="email" id="id_email" maxlength="320"/></div>' '<div class="required error"><label for="id_age" class="required">Age:' '</label><ul class="errorlist" id="id_age_error"><li>This field is ' 'required.</li></ul><input type="number" name="age" required id="id_age" ' 'aria-invalid="true" aria-describedby="id_age_error" /></div>', ) def test_label_has_required_css_class(self): """ required_css_class is added to label_tag() and legend_tag() of required fields. """ class SomeForm(Form): required_css_class = "required" field = CharField(max_length=10) field2 = IntegerField(required=False) f = SomeForm({"field": "test"}) self.assertHTMLEqual( f["field"].label_tag(), '<label for="id_field" class="required">Field:</label>', ) self.assertHTMLEqual( f["field"].legend_tag(), '<legend class="required">Field:</legend>', ) self.assertHTMLEqual( f["field"].label_tag(attrs={"class": "foo"}), '<label for="id_field" class="foo required">Field:</label>', ) self.assertHTMLEqual( f["field"].legend_tag(attrs={"class": "foo"}), '<legend class="foo required">Field:</legend>', ) self.assertHTMLEqual( f["field2"].label_tag(), '<label for="id_field2">Field2:</label>' ) self.assertHTMLEqual( f["field2"].legend_tag(), "<legend>Field2:</legend>", ) def test_label_split_datetime_not_displayed(self): class EventForm(Form): happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget) form = EventForm() self.assertHTMLEqual( form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0">' '<input type="hidden" name="happened_at_1" id="id_happened_at_1">', ) def test_multivalue_field_validation(self): def bad_names(value): if value == "bad value": raise ValidationError("bad value not allowed") class NameField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = ( CharField(label="First name", max_length=10), CharField(label="Last name", max_length=10), ) super().__init__(fields=fields, *args, **kwargs) def compress(self, data_list): return " ".join(data_list) class NameForm(Form): name = NameField(validators=[bad_names]) form = NameForm(data={"name": ["bad", "value"]}) form.full_clean() self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {"name": ["bad value not allowed"]}) form = NameForm(data={"name": ["should be overly", "long for the field names"]}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors, { "name": [ "Ensure this value has at most 10 characters (it has 16).", "Ensure this value has at most 10 characters (it has 24).", ], }, ) form = NameForm(data={"name": ["fname", "lname"]}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {"name": "fname lname"}) def test_multivalue_deep_copy(self): """ #19298 -- MultiValueField needs to override the default as it needs to deep-copy subfields: """ class ChoicesField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = ( ChoiceField(label="Rank", choices=((1, 1), (2, 2))), CharField(label="Name", max_length=10), ) super().__init__(fields=fields, *args, **kwargs) field = ChoicesField() field2 = copy.deepcopy(field) self.assertIsInstance(field2, ChoicesField) self.assertIsNot(field2.fields, field.fields) self.assertIsNot(field2.fields[0].choices, field.fields[0].choices) def test_multivalue_initial_data(self): """ #23674 -- invalid initial data should not break form.changed_data() """ class DateAgeField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (DateField(label="Date"), IntegerField(label="Age")) super().__init__(fields=fields, *args, **kwargs) class DateAgeForm(Form): date_age = DateAgeField() data = {"date_age": ["1998-12-06", 16]} form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]}) self.assertTrue(form.has_changed()) def test_multivalue_optional_subfields(self): class PhoneField(MultiValueField): def __init__(self, *args, **kwargs): fields = ( CharField( label="Country Code", validators=[ RegexValidator( r"^\+[0-9]{1,2}$", message="Enter a valid country code." ) ], ), CharField(label="Phone Number"), CharField( label="Extension", error_messages={"incomplete": "Enter an extension."}, ), CharField( label="Label", required=False, help_text="E.g. home, work." ), ) super().__init__(fields, *args, **kwargs) def compress(self, data_list): if data_list: return "%s.%s ext. %s (label: %s)" % tuple(data_list) return None # An empty value for any field will raise a `required` error on a # required `MultiValueField`. f = PhoneField() with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean("") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(["+61"]) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(["+61", "287654321", "123"]) self.assertEqual( "+61.287654321 ext. 123 (label: Home)", f.clean(["+61", "287654321", "123", "Home"]), ) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(["61", "287654321", "123", "Home"]) # Empty values for fields will NOT raise a `required` error on an # optional `MultiValueField` f = PhoneField(required=False) self.assertIsNone(f.clean("")) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) self.assertEqual("+61. ext. (label: )", f.clean(["+61"])) self.assertEqual( "+61.287654321 ext. 123 (label: )", f.clean(["+61", "287654321", "123"]) ) self.assertEqual( "+61.287654321 ext. 123 (label: Home)", f.clean(["+61", "287654321", "123", "Home"]), ) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(["61", "287654321", "123", "Home"]) # For a required `MultiValueField` with `require_all_fields=False`, a # `required` error will only be raised if all fields are empty. Fields # can individually be required or optional. An empty value for any # required field will raise an `incomplete` error. f = PhoneField(require_all_fields=False) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean("") with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"): f.clean(["+61"]) self.assertEqual( "+61.287654321 ext. 123 (label: )", f.clean(["+61", "287654321", "123"]) ) with self.assertRaisesMessage( ValidationError, "'Enter a complete value.', 'Enter an extension.'" ): f.clean(["", "", "", "Home"]) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(["61", "287654321", "123", "Home"]) # For an optional `MultiValueField` with `require_all_fields=False`, we # don't get any `required` error but we still get `incomplete` errors. f = PhoneField(required=False, require_all_fields=False) self.assertIsNone(f.clean("")) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"): f.clean(["+61"]) self.assertEqual( "+61.287654321 ext. 123 (label: )", f.clean(["+61", "287654321", "123"]) ) with self.assertRaisesMessage( ValidationError, "'Enter a complete value.', 'Enter an extension.'" ): f.clean(["", "", "", "Home"]) with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"): f.clean(["61", "287654321", "123", "Home"]) def test_multivalue_optional_subfields_rendering(self): class PhoneWidget(MultiWidget): def __init__(self, attrs=None): widgets = [TextInput(), TextInput()] super().__init__(widgets, attrs) def decompress(self, value): return [None, None] class PhoneField(MultiValueField): def __init__(self, *args, **kwargs): fields = [CharField(), CharField(required=False)] super().__init__(fields, *args, **kwargs) class PhoneForm(Form): phone1 = PhoneField(widget=PhoneWidget) phone2 = PhoneField(widget=PhoneWidget, required=False) phone3 = PhoneField(widget=PhoneWidget, require_all_fields=False) phone4 = PhoneField( widget=PhoneWidget, required=False, require_all_fields=False, ) form = PhoneForm(auto_id=False) self.assertHTMLEqual( form.as_p(), """ <p>Phone1:<input type="text" name="phone1_0" required> <input type="text" name="phone1_1" required></p> <p>Phone2:<input type="text" name="phone2_0"> <input type="text" name="phone2_1"></p> <p>Phone3:<input type="text" name="phone3_0" required> <input type="text" name="phone3_1"></p> <p>Phone4:<input type="text" name="phone4_0"> <input type="text" name="phone4_1"></p> """, ) def test_custom_empty_values(self): """ Form fields can customize what is considered as an empty value for themselves (#19997). """ class CustomJSONField(CharField): empty_values = [None, ""] def to_python(self, value): # Fake json.loads if value == "{}": return {} return super().to_python(value) class JSONForm(Form): json = CustomJSONField() form = JSONForm(data={"json": "{}"}) form.full_clean() self.assertEqual(form.cleaned_data, {"json": {}}) def test_boundfield_label_tag(self): class SomeForm(Form): field = CharField() boundfield = SomeForm()["field"] testcases = [ # (args, kwargs, expected_label, expected_legend) # without anything: just print the <label>/<legend> ((), {}, '<label for="id_field">Field:</label>', "<legend>Field:</legend>"), # passing just one argument: overrides the field's label ( ("custom",), {}, '<label for="id_field">custom:</label>', "<legend>custom:</legend>", ), # the overridden label is escaped ( ("custom&",), {}, '<label for="id_field">custom&amp;:</label>', "<legend>custom&amp;:</legend>", ), ( (mark_safe("custom&"),), {}, '<label for="id_field">custom&:</label>', "<legend>custom&:</legend>", ), # Passing attrs to add extra attributes on the <label>/<legend> ( (), {"attrs": {"class": "pretty"}}, '<label for="id_field" class="pretty">Field:</label>', '<legend class="pretty">Field:</legend>', ), ] for args, kwargs, expected_label, expected_legend in testcases: with self.subTest(args=args, kwargs=kwargs): self.assertHTMLEqual( boundfield.label_tag(*args, **kwargs), expected_label, ) self.assertHTMLEqual( boundfield.legend_tag(*args, **kwargs), expected_legend, ) def test_boundfield_label_tag_no_id(self): """ If a widget has no id, label_tag() and legend_tag() return the text with no surrounding <label>. """ class SomeForm(Form): field = CharField() boundfield = SomeForm(auto_id="")["field"] self.assertHTMLEqual(boundfield.label_tag(), "Field:") self.assertHTMLEqual(boundfield.legend_tag(), "Field:") self.assertHTMLEqual(boundfield.label_tag("Custom&"), "Custom&amp;:") self.assertHTMLEqual(boundfield.legend_tag("Custom&"), "Custom&amp;:") def test_boundfield_label_tag_custom_widget_id_for_label(self): class CustomIdForLabelTextInput(TextInput): def id_for_label(self, id): return "custom_" + id class EmptyIdForLabelTextInput(TextInput): def id_for_label(self, id): return None class SomeForm(Form): custom = CharField(widget=CustomIdForLabelTextInput) empty = CharField(widget=EmptyIdForLabelTextInput) form = SomeForm() self.assertHTMLEqual( form["custom"].label_tag(), '<label for="custom_id_custom">Custom:</label>' ) self.assertHTMLEqual( form["custom"].legend_tag(), "<legend>Custom:</legend>", ) self.assertHTMLEqual(form["empty"].label_tag(), "<label>Empty:</label>") self.assertHTMLEqual(form["empty"].legend_tag(), "<legend>Empty:</legend>") def test_boundfield_empty_label(self): class SomeForm(Form): field = CharField(label="") boundfield = SomeForm()["field"] self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>') self.assertHTMLEqual( boundfield.legend_tag(), "<legend></legend>", ) def test_boundfield_id_for_label(self): class SomeForm(Form): field = CharField(label="") self.assertEqual(SomeForm()["field"].id_for_label, "id_field") def test_boundfield_id_for_label_override_by_attrs(self): """ If an id is provided in `Widget.attrs`, it overrides the generated ID, unless it is `None`. """ class SomeForm(Form): field = CharField(widget=TextInput(attrs={"id": "myCustomID"})) field_none = CharField(widget=TextInput(attrs={"id": None})) form = SomeForm() self.assertEqual(form["field"].id_for_label, "myCustomID") self.assertEqual(form["field_none"].id_for_label, "id_field_none") def test_boundfield_subwidget_id_for_label(self): """ If auto_id is provided when initializing the form, the generated ID in subwidgets must reflect that prefix. """ class SomeForm(Form): field = MultipleChoiceField( choices=[("a", "A"), ("b", "B")], widget=CheckboxSelectMultiple, ) form = SomeForm(auto_id="prefix_%s") subwidgets = form["field"].subwidgets self.assertEqual(subwidgets[0].id_for_label, "prefix_field_0") self.assertEqual(subwidgets[1].id_for_label, "prefix_field_1") def test_boundfield_widget_type(self): class SomeForm(Form): first_name = CharField() birthday = SplitDateTimeField(widget=SplitHiddenDateTimeWidget) f = SomeForm() self.assertEqual(f["first_name"].widget_type, "text") self.assertEqual(f["birthday"].widget_type, "splithiddendatetime") def test_boundfield_css_classes(self): form = Person() field = form["first_name"] self.assertEqual(field.css_classes(), "") self.assertEqual(field.css_classes(extra_classes=""), "") self.assertEqual(field.css_classes(extra_classes="test"), "test") self.assertEqual(field.css_classes(extra_classes="test test"), "test") def test_label_suffix_override(self): """ BoundField label_suffix (if provided) overrides Form label_suffix """ class SomeForm(Form): field = CharField() boundfield = SomeForm(label_suffix="!")["field"] self.assertHTMLEqual( boundfield.label_tag(label_suffix="$"), '<label for="id_field">Field$</label>', ) self.assertHTMLEqual( boundfield.legend_tag(label_suffix="$"), "<legend>Field$</legend>", ) def test_error_dict(self): class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError( "Non-field error.", code="secret", params={"a": 1, "b": 2} ) form = MyForm({}) self.assertIs(form.is_valid(), False) errors = form.errors.as_text() control = [ "* foo\n * This field is required.", "* bar\n * This field is required.", "* __all__\n * Non-field error.", ] for error in control: self.assertIn(error, errors) errors = form.errors.as_ul() control = [ '<li>foo<ul class="errorlist" id="id_foo_error"><li>This field is required.' "</li></ul></li>", '<li>bar<ul class="errorlist" id="id_bar_error"><li>This field is required.' "</li></ul></li>", '<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul>' "</li>", ] for error in control: self.assertInHTML(error, errors) errors = form.errors.get_json_data() control = { "foo": [{"code": "required", "message": "This field is required."}], "bar": [{"code": "required", "message": "This field is required."}], "__all__": [{"code": "secret", "message": "Non-field error."}], } self.assertEqual(errors, control) self.assertEqual(json.dumps(errors), form.errors.as_json()) def test_error_dict_as_json_escape_html(self): """#21962 - adding html escape flag to ErrorDict""" class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError( "<p>Non-field error.</p>", code="secret", params={"a": 1, "b": 2}, ) control = { "foo": [{"code": "required", "message": "This field is required."}], "bar": [{"code": "required", "message": "This field is required."}], "__all__": [{"code": "secret", "message": "<p>Non-field error.</p>"}], } form = MyForm({}) self.assertFalse(form.is_valid()) errors = json.loads(form.errors.as_json()) self.assertEqual(errors, control) escaped_error = "&lt;p&gt;Non-field error.&lt;/p&gt;" self.assertEqual( form.errors.get_json_data(escape_html=True)["__all__"][0]["message"], escaped_error, ) errors = json.loads(form.errors.as_json(escape_html=True)) control["__all__"][0]["message"] = escaped_error self.assertEqual(errors, control) def test_error_list(self): e = ErrorList() e.append("Foo") e.append(ValidationError("Foo%(bar)s", code="foobar", params={"bar": "bar"})) self.assertIsInstance(e, list) self.assertIn("Foo", e) self.assertIn("Foo", ValidationError(e)) self.assertEqual(e.as_text(), "* Foo\n* Foobar") self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) errors = e.get_json_data() self.assertEqual( errors, [{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}], ) self.assertEqual(json.dumps(errors), e.as_json()) def test_error_list_class_not_specified(self): e = ErrorList() e.append("Foo") e.append(ValidationError("Foo%(bar)s", code="foobar", params={"bar": "bar"})) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_class_has_one_class_specified(self): e = ErrorList(error_class="foobar-error-class") e.append("Foo") e.append(ValidationError("Foo%(bar)s", code="foobar", params={"bar": "bar"})) self.assertEqual( e.as_ul(), '<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>', ) def test_error_list_with_hidden_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField(widget=HiddenInput) p = Person({"first_name": "John"}) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul></li><li> <label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></li>""", ) self.assertHTMLEqual( p.as_p(), """ <ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></p> """, ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"> <li>(Hidden field last_name) This field is required.</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td> <input id="id_first_name" name="first_name" type="text" value="John" required> <input id="id_last_name" name="last_name" type="hidden"></td></tr>""", ) self.assertHTMLEqual( p.as_div(), '<ul class="errorlist nonfield"><li>(Hidden field last_name) This field ' 'is required.</li></ul><div><label for="id_first_name">First name:</label>' '<input id="id_first_name" name="first_name" type="text" value="John" ' 'required><input id="id_last_name" name="last_name" type="hidden"></div>', ) def test_error_list_with_non_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField() def clean(self): raise ValidationError("Generic validation error") p = Person({"first_name": "John", "last_name": "Lennon"}) self.assertHTMLEqual( str(p.non_field_errors()), '<ul class="errorlist nonfield"><li>Generic validation error</li></ul>', ) self.assertHTMLEqual( p.as_ul(), """<li> <ul class="errorlist nonfield"><li>Generic validation error</li></ul></li> <li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required></li> <li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></li>""", ) self.assertHTMLEqual( p.non_field_errors().as_text(), "* Generic validation error" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>Generic validation error</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" required></p> <p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" required></p>""", ) self.assertHTMLEqual( p.as_table(), """ <tr><td colspan="2"><ul class="errorlist nonfield"> <li>Generic validation error</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td> <input id="id_first_name" name="first_name" type="text" value="John" required> </td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td> <input id="id_last_name" name="last_name" type="text" value="Lennon" required> </td></tr> """, ) self.assertHTMLEqual( p.as_div(), '<ul class="errorlist nonfield"><li>Generic validation error</li></ul>' '<div><label for="id_first_name">First name:</label><input ' 'id="id_first_name" name="first_name" type="text" value="John" required>' '</div><div><label for="id_last_name">Last name:</label><input ' 'id="id_last_name" name="last_name" type="text" value="Lennon" required>' "</div>", ) def test_error_escaping(self): class TestForm(Form): hidden = CharField(widget=HiddenInput(), required=False) visible = CharField() def clean_hidden(self): raise ValidationError('Foo & "bar"!') clean_visible = clean_hidden form = TestForm({"hidden": "a", "visible": "b"}) form.is_valid() self.assertHTMLEqual( form.as_ul(), '<li><ul class="errorlist nonfield">' "<li>(Hidden field hidden) Foo &amp; &quot;bar&quot;!</li></ul></li>" '<li><ul class="errorlist" id="id_visible_error"><li>Foo &amp; ' "&quot;bar&quot;!</li></ul>" '<label for="id_visible">Visible:</label> ' '<input type="text" name="visible" aria-invalid="true" value="b" ' 'id="id_visible" required aria-describedby="id_visible_error">' '<input type="hidden" name="hidden" value="a" id="id_hidden"></li>', ) def test_baseform_repr(self): """ BaseForm.__repr__() should contain some basic information about the form. """ p = Person() self.assertEqual( repr(p), "<Person bound=False, valid=Unknown, " "fields=(first_name;last_name;birthday)>", ) p = Person( {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"} ) self.assertEqual( repr(p), "<Person bound=True, valid=Unknown, " "fields=(first_name;last_name;birthday)>", ) p.is_valid() self.assertEqual( repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>", ) p = Person( {"first_name": "John", "last_name": "Lennon", "birthday": "fakedate"} ) p.is_valid() self.assertEqual( repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>", ) def test_baseform_repr_dont_trigger_validation(self): """ BaseForm.__repr__() shouldn't trigger the form validation. """ p = Person( {"first_name": "John", "last_name": "Lennon", "birthday": "fakedate"} ) repr(p) with self.assertRaises(AttributeError): p.cleaned_data self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {"first_name": "John", "last_name": "Lennon"}) def test_accessing_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data if not self.errors: data["username"] = data["username"].lower() return data f = UserForm({"username": "SirRobin", "password": "blue"}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data["username"], "sirrobin") def test_changing_cleaned_data_nothing_returned(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): self.cleaned_data["username"] = self.cleaned_data["username"].lower() # don't return anything f = UserForm({"username": "SirRobin", "password": "blue"}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data["username"], "sirrobin") def test_changing_cleaned_data_in_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data # Return a different dict. We have not changed # self.cleaned_data. return { "username": data["username"].lower(), "password": "this_is_not_a_secret", } f = UserForm({"username": "SirRobin", "password": "blue"}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data["username"], "sirrobin") def test_multipart_encoded_form(self): class FormWithoutFile(Form): username = CharField() class FormWithFile(Form): username = CharField() file = FileField() class FormWithImage(Form): image = ImageField() self.assertFalse(FormWithoutFile().is_multipart()) self.assertTrue(FormWithFile().is_multipart()) self.assertTrue(FormWithImage().is_multipart()) def test_html_safe(self): class SimpleForm(Form): username = CharField() form = SimpleForm() self.assertTrue(hasattr(SimpleForm, "__html__")) self.assertEqual(str(form), form.__html__()) self.assertTrue(hasattr(form["username"], "__html__")) self.assertEqual(str(form["username"]), form["username"].__html__()) def test_use_required_attribute_true(self): class MyForm(Form): use_required_attribute = True f1 = CharField(max_length=30) f2 = CharField(max_length=30, required=False) f3 = CharField(widget=Textarea) f4 = ChoiceField(choices=[("P", "Python"), ("J", "Java")]) form = MyForm() self.assertHTMLEqual( form.as_p(), '<p><label for="id_f1">F1:</label>' '<input id="id_f1" maxlength="30" name="f1" type="text" required></p>' '<p><label for="id_f2">F2:</label>' '<input id="id_f2" maxlength="30" name="f2" type="text"></p>' '<p><label for="id_f3">F3:</label>' '<textarea cols="40" id="id_f3" name="f3" rows="10" required>' "</textarea></p>" '<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' "</select></p>", ) self.assertHTMLEqual( form.as_ul(), '<li><label for="id_f1">F1:</label> ' '<input id="id_f1" maxlength="30" name="f1" type="text" required></li>' '<li><label for="id_f2">F2:</label>' '<input id="id_f2" maxlength="30" name="f2" type="text"></li>' '<li><label for="id_f3">F3:</label>' '<textarea cols="40" id="id_f3" name="f3" rows="10" required>' "</textarea></li>" '<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' "</select></li>", ) self.assertHTMLEqual( form.as_table(), '<tr><th><label for="id_f1">F1:</label></th>' '<td><input id="id_f1" maxlength="30" name="f1" type="text" required>' "</td></tr>" '<tr><th><label for="id_f2">F2:</label></th>' '<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>' '<tr><th><label for="id_f3">F3:</label></th>' '<td><textarea cols="40" id="id_f3" name="f3" rows="10" required>' "</textarea></td></tr>" '<tr><th><label for="id_f4">F4:</label></th><td>' '<select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' "</select></td></tr>", ) self.assertHTMLEqual( form.render(form.template_name_div), '<div><label for="id_f1">F1:</label><input id="id_f1" maxlength="30" ' 'name="f1" type="text" required></div><div><label for="id_f2">F2:</label>' '<input id="id_f2" maxlength="30" name="f2" type="text"></div><div><label ' 'for="id_f3">F3:</label><textarea cols="40" id="id_f3" name="f3" ' 'rows="10" required></textarea></div><div><label for="id_f4">F4:</label>' '<select id="id_f4" name="f4"><option value="P">Python</option>' '<option value="J">Java</option></select></div>', ) def test_use_required_attribute_false(self): class MyForm(Form): use_required_attribute = False f1 = CharField(max_length=30) f2 = CharField(max_length=30, required=False) f3 = CharField(widget=Textarea) f4 = ChoiceField(choices=[("P", "Python"), ("J", "Java")]) form = MyForm() self.assertHTMLEqual( form.as_p(), '<p><label for="id_f1">F1:</label>' '<input id="id_f1" maxlength="30" name="f1" type="text"></p>' '<p><label for="id_f2">F2:</label>' '<input id="id_f2" maxlength="30" name="f2" type="text"></p>' '<p><label for="id_f3">F3:</label>' '<textarea cols="40" id="id_f3" name="f3" rows="10"></textarea></p>' '<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' "</select></p>", ) self.assertHTMLEqual( form.as_ul(), '<li><label for="id_f1">F1:</label>' '<input id="id_f1" maxlength="30" name="f1" type="text"></li>' '<li><label for="id_f2">F2:</label>' '<input id="id_f2" maxlength="30" name="f2" type="text"></li>' '<li><label for="id_f3">F3:</label>' '<textarea cols="40" id="id_f3" name="f3" rows="10"></textarea></li>' '<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' "</select></li>", ) self.assertHTMLEqual( form.as_table(), '<tr><th><label for="id_f1">F1:</label></th>' '<td><input id="id_f1" maxlength="30" name="f1" type="text"></td></tr>' '<tr><th><label for="id_f2">F2:</label></th>' '<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>' '<tr><th><label for="id_f3">F3:</label></th><td>' '<textarea cols="40" id="id_f3" name="f3" rows="10">' "</textarea></td></tr>" '<tr><th><label for="id_f4">F4:</label></th><td>' '<select id="id_f4" name="f4">' '<option value="P">Python</option>' '<option value="J">Java</option>' "</select></td></tr>", ) self.assertHTMLEqual( form.render(form.template_name_div), '<div><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" ' 'name="f1" type="text"></div><div><label for="id_f2">F2:</label>' '<input id="id_f2" maxlength="30" name="f2" type="text"></div><div>' '<label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" ' 'rows="10"></textarea></div><div><label for="id_f4">F4:</label>' '<select id="id_f4" name="f4"><option value="P">Python</option>' '<option value="J">Java</option></select></div>', ) def test_only_hidden_fields(self): # A form with *only* hidden fields that has errors is going to be very # unusual. class HiddenForm(Form): data = IntegerField(widget=HiddenInput) f = HiddenForm({}) self.assertHTMLEqual( f.as_p(), '<ul class="errorlist nonfield">' "<li>(Hidden field data) This field is required.</li></ul>\n<p> " '<input type="hidden" name="data" id="id_data"></p>', ) self.assertHTMLEqual( f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield">' "<li>(Hidden field data) This field is required.</li></ul>" '<input type="hidden" name="data" id="id_data"></td></tr>', ) def test_field_named_data(self): class DataForm(Form): data = CharField(max_length=10) f = DataForm({"data": "xyzzy"}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data, {"data": "xyzzy"}) def test_empty_data_files_multi_value_dict(self): p = Person() self.assertIsInstance(p.data, MultiValueDict) self.assertIsInstance(p.files, MultiValueDict) def test_field_deep_copy_error_messages(self): class CustomCharField(CharField): def __init__(self, **kwargs): kwargs["error_messages"] = {"invalid": "Form custom error message."} super().__init__(**kwargs) field = CustomCharField() field_copy = copy.deepcopy(field) self.assertIsInstance(field_copy, CustomCharField) self.assertIsNot(field_copy.error_messages, field.error_messages) def test_label_does_not_include_new_line(self): form = Person() field = form["first_name"] self.assertEqual( field.label_tag(), '<label for="id_first_name">First name:</label>' ) self.assertEqual( field.legend_tag(), "<legend>First name:</legend>", ) @override_settings(USE_THOUSAND_SEPARATOR=True) def test_label_attrs_not_localized(self): form = Person() field = form["first_name"] self.assertHTMLEqual( field.label_tag(attrs={"number": 9999}), '<label number="9999" for="id_first_name">First name:</label>', ) self.assertHTMLEqual( field.legend_tag(attrs={"number": 9999}), '<legend number="9999">First name:</legend>', ) def test_remove_cached_field(self): class TestForm(Form): name = CharField(max_length=10) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Populate fields cache. [field for field in self] # Removed cached field. del self.fields["name"] f = TestForm({"name": "abcde"}) with self.assertRaises(KeyError): f["name"] def test_aria_describedby_property(self): class TestForm(Form): name = CharField(help_text="Some help text") form = TestForm({"name": "MyName"}) self.assertEqual(form["name"].aria_describedby, "id_name_helptext") form = TestForm(auto_id=None) self.assertEqual(form["name"].aria_describedby, "") class TestFormHidden(Form): name = CharField(help_text="Some help text", widget=HiddenInput) form = TestFormHidden() self.assertEqual(form["name"].aria_describedby, "") class TestFormWithAttrs(Form): name = CharField(widget=TextInput(attrs={"aria-describedby": "my-id"})) form = TestFormWithAttrs({"name": "MyName"}) self.assertIs(form["name"].aria_describedby, None) class TestFormWithoutHelpText(Form): name = CharField() form = TestFormWithoutHelpText() self.assertEqual(form["name"].aria_describedby, "") @jinja2_tests class Jinja2FormsTestCase(FormsTestCase): pass class CustomRenderer(DjangoTemplates): form_template_name = "forms_tests/form_snippet.html" field_template_name = "forms_tests/custom_field.html" class RendererTests(SimpleTestCase): def test_default(self): form = Form() self.assertEqual(form.renderer, get_default_renderer()) def test_kwarg_instance(self): custom = CustomRenderer() form = Form(renderer=custom) self.assertEqual(form.renderer, custom) def test_kwarg_class(self): custom = CustomRenderer() form = Form(renderer=custom) self.assertEqual(form.renderer, custom) def test_attribute_instance(self): class CustomForm(Form): default_renderer = DjangoTemplates() form = CustomForm() self.assertEqual(form.renderer, CustomForm.default_renderer) def test_attribute_class(self): class CustomForm(Form): default_renderer = CustomRenderer form = CustomForm() self.assertIsInstance(form.renderer, CustomForm.default_renderer) def test_attribute_override(self): class CustomForm(Form): default_renderer = DjangoTemplates() custom = CustomRenderer() form = CustomForm(renderer=custom) self.assertEqual(form.renderer, custom) def test_get_context_errors(self): custom = CustomRenderer() form = Form(renderer=custom) context = form.get_context() self.assertEqual(context["errors"].renderer, custom) def test_boundfield_fallback(self): class RendererWithoutBoundFieldClassAttribute: form_template_name = "django/forms/div.html" formset_template_name = "django/forms/formsets/div.html" field_template_name = "django/forms/field.html" def render(self, template_name, context, request=None): return "Nice" class UserForm(Form): name = CharField() form = UserForm(renderer=RendererWithoutBoundFieldClassAttribute()) self.assertIsInstance(form["name"], BoundField) self.assertEqual(form["name"].as_field_group(), "Nice") class TemplateTests(SimpleTestCase): def test_iterate_radios(self): f = FrameworkForm(auto_id="id_%s") t = Template( "{% for radio in form.language %}" '<div class="myradio">{{ radio }}</div>' "{% endfor %}" ) self.assertHTMLEqual( t.render(Context({"form": f})), '<div class="myradio"><label for="id_language_0">' '<input id="id_language_0" name="language" type="radio" value="P" ' "required> Python</label></div>" '<div class="myradio"><label for="id_language_1">' '<input id="id_language_1" name="language" type="radio" value="J" ' "required> Java</label></div>", ) def test_iterate_checkboxes(self): f = SongForm({"composers": ["J", "P"]}, auto_id=False) t = Template( "{% for checkbox in form.composers %}" '<div class="mycheckbox">{{ checkbox }}</div>' "{% endfor %}" ) self.assertHTMLEqual( t.render(Context({"form": f})), '<div class="mycheckbox"><label>' '<input checked name="composers" type="checkbox" value="J"> ' "John Lennon</label></div>" '<div class="mycheckbox"><label>' '<input checked name="composers" type="checkbox" value="P"> ' "Paul McCartney</label></div>", ) def test_templates_with_forms(self): class UserRegistration(Form): username = CharField( max_length=10, help_text=("Good luck picking a username that doesn't already exist."), ) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if ( self.cleaned_data.get("password1") and self.cleaned_data.get("password2") and self.cleaned_data["password1"] != self.cleaned_data["password2"] ): raise ValidationError("Please make sure your passwords match.") return self.cleaned_data # There is full flexibility in displaying form fields in a template. # Just pass a Form instance to the template, and use "dot" access to # refer to individual fields. However, this flexibility comes with the # responsibility of displaying all the errors, including any that might # not be associated with a particular field. t = Template( "<form>" "{{ form.username.errors.as_ul }}" "<p><label>Your username: {{ form.username }}</label></p>" "{{ form.password1.errors.as_ul }}" "<p><label>Password: {{ form.password1 }}</label></p>" "{{ form.password2.errors.as_ul }}" "<p><label>Password (again): {{ form.password2 }}</label></p>" '<input type="submit" required>' "</form>" ) f = UserRegistration(auto_id=False) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p><label>Your username: " '<input type="text" name="username" maxlength="10" required></label></p>' "<p><label>Password: " '<input type="password" name="password1" required></label></p>' "<p><label>Password (again): " '<input type="password" name="password2" required></label></p>' '<input type="submit" required>' "</form>", ) f = UserRegistration({"username": "django"}, auto_id=False) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p><label>Your username: " '<input type="text" name="username" value="django" maxlength="10" required>' "</label></p>" '<ul class="errorlist"><li>This field is required.</li></ul><p>' "<label>Password: " '<input type="password" name="password1" aria-invalid="true" required>' "</label></p>" '<ul class="errorlist"><li>This field is required.</li></ul>' "<p><label>Password (again): " '<input type="password" name="password2" aria-invalid="true" required>' "</label></p>" '<input type="submit" required>' "</form>", ) # Use form.[field].label to output a field's label. 'label' for a field # can by specified by using the 'label' argument to a Field class. If # 'label' is not specified, Django will use the field name with # underscores converted to spaces, and the initial letter capitalized. t = Template( "<form>" "<p><label>{{ form.username.label }}: {{ form.username }}</label></p>" "<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>" "<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>" '<input type="submit" required>' "</form>" ) f = UserRegistration(auto_id=False) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p><label>Username: " '<input type="text" name="username" maxlength="10" required></label></p>' "<p><label>Password1: " '<input type="password" name="password1" required></label></p>' "<p><label>Password2: " '<input type="password" name="password2" required></label></p>' '<input type="submit" required>' "</form>", ) # Use form.[field].label_tag to output a field's label with a <label> # tag wrapped around it, but *only* if the given field has an "id" # attribute. Recall from above that passing the "auto_id" argument to a # Form gives each field an "id" attribute. t = Template( "<form>" "<p>{{ form.username.label_tag }} {{ form.username }}" '<span {% if form.username.id_for_label %}id="' '{{ form.username.id_for_label }}_helptext"{% endif %}>' "{{ form.username.help_text}}</span></p>" "<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>" "<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>" '<input type="submit" required>' "</form>" ) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p>Username: " '<input type="text" name="username" maxlength="10" required>' "<span>Good luck picking a username that doesn't already exist.</span></p>" '<p>Password1: <input type="password" name="password1" required></p>' '<p>Password2: <input type="password" name="password2" required></p>' '<input type="submit" required>' "</form>", ) f = UserRegistration(auto_id="id_%s") self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" '<p><label for="id_username">Username:</label>' '<input id="id_username" type="text" name="username" maxlength="10" ' 'aria-describedby="id_username_helptext" required>' '<span id="id_username_helptext">' "Good luck picking a username that doesn't already exist.</span></p>" '<p><label for="id_password1">Password1:</label>' '<input type="password" name="password1" id="id_password1" required></p>' '<p><label for="id_password2">Password2:</label>' '<input type="password" name="password2" id="id_password2" required></p>' '<input type="submit" required>' "</form>", ) # Use form.[field].legend_tag to output a field's label with a <legend> # tag wrapped around it, but *only* if the given field has an "id" # attribute. Recall from above that passing the "auto_id" argument to a # Form gives each field an "id" attribute. t = Template( "<form>" "<p>{{ form.username.legend_tag }} {{ form.username }}</p>" "<p>{{ form.password1.legend_tag }} {{ form.password1 }}</p>" "<p>{{ form.password2.legend_tag }} {{ form.password2 }}</p>" '<input type="submit" required>' "</form>" ) f = UserRegistration(auto_id=False) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p>Username: " '<input type="text" name="username" maxlength="10" required></p>' '<p>Password1: <input type="password" name="password1" required></p>' '<p>Password2: <input type="password" name="password2" required></p>' '<input type="submit" required>' "</form>", ) f = UserRegistration(auto_id="id_%s") self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p><legend>Username:</legend>" '<input id="id_username" type="text" name="username" maxlength="10" ' 'aria-describedby="id_username_helptext" required></p>' "<p><legend>Password1:</legend>" '<input type="password" name="password1" id="id_password1" required></p>' "<p><legend>Password2:</legend>" '<input type="password" name="password2" id="id_password2" required></p>' '<input type="submit" required>' "</form>", ) # Use form.[field].help_text to output a field's help text. If the # given field does not have help text, nothing will be output. t = Template( "<form>" "<p>{{ form.username.label_tag }} {{ form.username }}<br>" "{{ form.username.help_text }}</p>" "<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>" "<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>" '<input type="submit" required>' "</form>" ) f = UserRegistration(auto_id=False) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p>Username: " '<input type="text" name="username" maxlength="10" required><br>' "Good luck picking a username that doesn&#x27;t already exist.</p>" '<p>Password1: <input type="password" name="password1" required></p>' '<p>Password2: <input type="password" name="password2" required></p>' '<input type="submit" required>' "</form>", ) self.assertEqual( Template("{{ form.password1.help_text }}").render(Context({"form": f})), "", ) # To display the errors that aren't associated with a particular field # e.g. the errors caused by Form.clean() -- use # {{ form.non_field_errors }} in the template. If used on its own, it # is displayed as a <ul> (or an empty string, if the list of errors is # empty). t = Template( "<form>" "{{ form.username.errors.as_ul }}" "<p><label>Your username: {{ form.username }}</label></p>" "{{ form.password1.errors.as_ul }}" "<p><label>Password: {{ form.password1 }}</label></p>" "{{ form.password2.errors.as_ul }}" "<p><label>Password (again): {{ form.password2 }}</label></p>" '<input type="submit" required>' "</form>" ) f = UserRegistration( {"username": "django", "password1": "foo", "password2": "bar"}, auto_id=False, ) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" "<p><label>Your username: " '<input type="text" name="username" value="django" maxlength="10" required>' "</label></p>" "<p><label>Password: " '<input type="password" name="password1" required></label></p>' "<p><label>Password (again): " '<input type="password" name="password2" required></label></p>' '<input type="submit" required>' "</form>", ) t = Template( "<form>" "{{ form.non_field_errors }}" "{{ form.username.errors.as_ul }}" "<p><label>Your username: {{ form.username }}</label></p>" "{{ form.password1.errors.as_ul }}" "<p><label>Password: {{ form.password1 }}</label></p>" "{{ form.password2.errors.as_ul }}" "<p><label>Password (again): {{ form.password2 }}</label></p>" '<input type="submit" required>' "</form>" ) self.assertHTMLEqual( t.render(Context({"form": f})), "<form>" '<ul class="errorlist nonfield">' "<li>Please make sure your passwords match.</li></ul>" "<p><label>Your username: " '<input type="text" name="username" value="django" maxlength="10" required>' "</label></p>" "<p><label>Password: " '<input type="password" name="password1" required></label></p>' "<p><label>Password (again): " '<input type="password" name="password2" required></label></p>' '<input type="submit" required>' "</form>", ) def test_basic_processing_in_view(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if ( self.cleaned_data.get("password1") and self.cleaned_data.get("password2") and self.cleaned_data["password1"] != self.cleaned_data["password2"] ): raise ValidationError("Please make sure your passwords match.") return self.cleaned_data def my_function(method, post_data): if method == "POST": form = UserRegistration(post_data, auto_id=False) else: form = UserRegistration(auto_id=False) if form.is_valid(): return "VALID: %r" % sorted(form.cleaned_data.items()) t = Template( '<form method="post">' "{{ form }}" '<input type="submit" required>' "</form>" ) return t.render(Context({"form": form})) # GET with an empty form and no errors. self.assertHTMLEqual( my_function("GET", {}), '<form method="post">' "<div>Username:" '<input type="text" name="username" maxlength="10" required></div>' "<div>Password1:" '<input type="password" name="password1" required></div>' "<div>Password2:" '<input type="password" name="password2" required></div>' '<input type="submit" required>' "</form>", ) # POST with erroneous data, a redisplayed form, with errors. self.assertHTMLEqual( my_function( "POST", { "username": "this-is-a-long-username", "password1": "foo", "password2": "bar", }, ), '<form method="post">' '<ul class="errorlist nonfield">' "<li>Please make sure your passwords match.</li></ul>" '<div>Username:<ul class="errorlist">' "<li>Ensure this value has at most 10 characters (it has 23).</li></ul>" '<input type="text" name="username" aria-invalid="true" ' 'value="this-is-a-long-username" maxlength="10" required></div>' "<div>Password1:" '<input type="password" name="password1" required></div>' "<div>Password2:" '<input type="password" name="password2" required></div>' '<input type="submit" required>' "</form>", ) # POST with valid data (the success message). self.assertEqual( my_function( "POST", { "username": "adrian", "password1": "secret", "password2": "secret", }, ), "VALID: [('password1', 'secret'), ('password2', 'secret'), " "('username', 'adrian')]", ) def test_custom_field_template(self): class MyForm(Form): first_name = CharField(template_name="forms_tests/custom_field.html") f = MyForm() self.assertHTMLEqual( f.render(), '<div><label for="id_first_name">First name:</label><p>Custom Field</p>' '<input type="text" name="first_name" required id="id_first_name"></div>', ) def test_custom_field_render_template(self): class MyForm(Form): first_name = CharField() f = MyForm() self.assertHTMLEqual( f["first_name"].render(template_name="forms_tests/custom_field.html"), '<label for="id_first_name">First name:</label><p>Custom Field</p>' '<input type="text" name="first_name" required id="id_first_name">', ) class OverrideTests(SimpleTestCase): @override_settings(FORM_RENDERER="forms_tests.tests.test_forms.CustomRenderer") def test_custom_renderer_template_name(self): class Person(Form): first_name = CharField() t = Template("{{ form }}") html = t.render(Context({"form": Person()})) expected = """ <div class="fieldWrapper"><label for="id_first_name">First name:</label> <input type="text" name="first_name" required id="id_first_name"></div> """ self.assertHTMLEqual(html, expected) @override_settings(FORM_RENDERER="forms_tests.tests.test_forms.CustomRenderer") def test_custom_renderer_field_template_name(self): class Person(Form): first_name = CharField() t = Template("{{ form.first_name.as_field_group }}") html = t.render(Context({"form": Person()})) expected = """ <label for="id_first_name">First name:</label> <p>Custom Field</p> <input type="text" name="first_name" required id="id_first_name"> """ self.assertHTMLEqual(html, expected) def test_per_form_template_name(self): class Person(Form): first_name = CharField() template_name = "forms_tests/form_snippet.html" t = Template("{{ form }}") html = t.render(Context({"form": Person()})) expected = """ <div class="fieldWrapper"><label for="id_first_name">First name:</label> <input type="text" name="first_name" required id="id_first_name"></div> """ self.assertHTMLEqual(html, expected) def test_errorlist_override(self): class CustomErrorList(ErrorList): template_name = "forms_tests/error.html" class CommentForm(Form): name = CharField(max_length=50, required=False) email = EmailField() comment = CharField() data = {"email": "invalid"} f = CommentForm(data, auto_id=False, error_class=CustomErrorList) self.assertHTMLEqual( f.as_p(), '<p>Name: <input type="text" name="name" maxlength="50"></p>' '<div class="errorlist">' '<div class="error">Enter a valid email address.</div></div>' "<p>Email: " '<input type="email" name="email" value="invalid" maxlength="320" ' 'aria-invalid="true" required></p><div class="errorlist">' '<div class="error">This field is required.</div></div>' '<p>Comment: <input type="text" name="comment" aria-invalid="true" ' "required></p>", ) def test_custom_renderer_error_dict(self): class CustomRenderer(DjangoTemplates): def render(self, template_name, context, request=None): if template_name == "django/forms/errors/dict/default.html": return "<strong>So many errors!</strong>" return super().render(template_name, context, request) form = Form({}, renderer=CustomRenderer()) form.full_clean() form.add_error(None, "Test error") self.assertHTMLEqual( form.errors.render(), "<strong>So many errors!</strong>", ) def test_cyclic_context_boundfield_render(self): class FirstNameForm(Form): first_name = CharField() template_name_label = "forms_tests/cyclic_context_boundfield_render.html" f = FirstNameForm() try: f.render() except RecursionError: self.fail("Cyclic reference in BoundField.render().") def test_legend_tag(self): class CustomFrameworkForm(FrameworkForm): template_name = "forms_tests/legend_test.html" required_css_class = "required" f = CustomFrameworkForm() self.assertHTMLEqual( str(f), '<label for="id_name" class="required">Name:</label>' '<legend class="required">Language:</legend>', ) class BoundFieldWithoutColon(BoundField): def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None): return super().label_tag( contents=contents, attrs=attrs, label_suffix="", tag=None ) class BoundFieldWithTwoColons(BoundField): def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None): return super().label_tag( contents=contents, attrs=attrs, label_suffix="::", tag=None ) class BoundFieldWithCustomClass(BoundField): def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None): attrs = attrs or {} attrs["class"] = "custom-class" return super().label_tag(contents, attrs, label_suffix, tag) class BoundFieldWithWrappingClass(BoundField): def css_classes(self, extra_classes=None): parent_classes = super().css_classes(extra_classes) return f"field-class {parent_classes}" class BoundFieldOverrideRenderer(DjangoTemplates): bound_field_class = BoundFieldWithoutColon @override_settings( FORM_RENDERER="forms_tests.tests.test_forms.BoundFieldOverrideRenderer" ) class CustomBoundFieldTest(SimpleTestCase): def test_renderer_custom_bound_field(self): t = Template("{{ form }}") html = t.render(Context({"form": Person()})) expected = """ <div><label for="id_first_name">First name</label> <input type="text" name="first_name" required id="id_first_name"></div> <div><label for="id_last_name">Last name</label> <input type="text" name="last_name" required id="id_last_name"></div><div> <label for="id_birthday">Birthday</label> <input type="text" name="birthday" required id="id_birthday"></div>""" self.assertHTMLEqual(html, expected) def test_form_custom_boundfield(self): class CustomBoundFieldPerson(Person): bound_field_class = BoundFieldWithTwoColons with self.subTest("form's BoundField takes over renderer's BoundField"): t = Template("{{ form }}") html = t.render(Context({"form": CustomBoundFieldPerson()})) expected = """ <div><label for="id_first_name">First name::</label> <input type="text" name="first_name" required id="id_first_name"></div> <div><label for="id_last_name">Last name::</label> <input type="text" name="last_name" required id="id_last_name"></div><div> <label for="id_birthday">Birthday::</label> <input type="text" name="birthday" required id="id_birthday"></div>""" self.assertHTMLEqual(html, expected) with self.subTest("Constructor argument takes over class property"): t = Template("{{ form }}") html = t.render( Context( { "form": CustomBoundFieldPerson( bound_field_class=BoundFieldWithCustomClass ) } ) ) expected = """ <div><label class="custom-class" for="id_first_name">First name:</label> <input type="text" name="first_name" required id="id_first_name"></div> <div><label class="custom-class" for="id_last_name">Last name:</label> <input type="text" name="last_name" required id="id_last_name"></div><div> <label class="custom-class" for="id_birthday">Birthday:</label> <input type="text" name="birthday" required id="id_birthday"></div>""" self.assertHTMLEqual(html, expected) with self.subTest("Overriding css_classes works as expected"): t = Template("{{ form }}") html = t.render( Context( { "form": CustomBoundFieldPerson( bound_field_class=BoundFieldWithWrappingClass ) } ) ) expected = """ <div class="field-class"><label for="id_first_name">First name:</label> <input type="text" name="first_name" required id="id_first_name"></div> <div class="field-class"><label for="id_last_name">Last name:</label> <input type="text" name="last_name" required id="id_last_name"></div><div class="field-class"> <label for="id_birthday">Birthday:</label> <input type="text" name="birthday" required id="id_birthday"></div>""" self.assertHTMLEqual(html, expected) def test_field_custom_bound_field(self): class BoundFieldWithTwoColonsCharField(CharField): bound_field_class = BoundFieldWithTwoColons class CustomFieldBoundFieldPerson(Person): bound_field_class = BoundField first_name = BoundFieldWithTwoColonsCharField() last_name = BoundFieldWithTwoColonsCharField( bound_field_class=BoundFieldWithCustomClass ) html = Template("{{ form }}").render( Context({"form": CustomFieldBoundFieldPerson()}) ) expected = """ <div><label for="id_first_name">First name::</label> <input type="text" name="first_name" required id="id_first_name"></div> <div><label class="custom-class" for="id_last_name">Last name:</label> <input type="text" name="last_name" required id="id_last_name"></div><div> <label for="id_birthday">Birthday:</label> <input type="text" name="birthday" required id="id_birthday"></div>""" self.assertHTMLEqual(html, expected)
python
github
https://github.com/django/django
tests/forms_tests/tests/test_forms.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import portal_wizard import share_wizard # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type import os import pwd import sys from string import ascii_letters, digits from six import string_types from six.moves import configparser from ansible.parsing.splitter import unquote from ansible.errors import AnsibleOptionsError # copied from utils, avoid circular reference fun :) def mk_boolean(value): if value is None: return False val = str(value) if val.lower() in [ "true", "t", "y", "1", "yes" ]: return True else: return False def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False): ''' return a configuration variable with casting ''' value = _get_config(p, section, key, env_var, default) if boolean: value = mk_boolean(value) if value: if integer: value = int(value) elif floating: value = float(value) elif islist: if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif isnone: if value == "None": value = None elif isinstance(value, string_types): value = unquote(value) return value def _get_config(p, section, key, env_var, default): ''' helper function for get_config ''' if env_var is not None: value = os.environ.get(env_var, None) if value is not None: return value if p is not None: try: return p.get(section, key, raw=True) except: return default return default def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' p = configparser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = os.path.expanduser(path0) if os.path.isdir(path0): path0 += "/ansible.cfg" path1 = os.getcwd() + "/ansible.cfg" path2 = os.path.expanduser("~/.ansible.cfg") path3 = "/etc/ansible/ansible.cfg" for path in [path0, path1, path2, path3]: if path is not None and os.path.exists(path): try: p.read(path) except configparser.Error as e: raise AnsibleOptionsError("Error reading config file: \n{0}".format(e)) return p, path return None, '' def shell_expand_path(path): ''' shell_expand_path is needed as os.path.expanduser does not work when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE ''' if path: path = os.path.expanduser(os.path.expandvars(path)) return path p, CONFIG_FILE = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] # check all of these extensions when looking for yaml files for things like # group variables -- really anything we can load YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] # sections in config file DEFAULTS='defaults' DEPRECATED_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts')) # generally configurable things DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST)) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8') DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True) DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True) DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None)) DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart') DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True) DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True) DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}') DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True) DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) ### PRIVILEGE ESCALATION ### # Backwards Compat DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) # Become BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas'] DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None) DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # PLUGINS DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True) # paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins') DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins') DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') # cache CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True) # Display ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True) HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True) SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True) DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True) DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True) # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) # obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True) ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True) ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # galaxy related DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024 TREE_DIR = None LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
unknown
codeparrot/codeparrot-clean
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from __future__ import absolute_import from __future__ import print_function from twisted.trial import unittest from buildbot_worker.commands import registry from buildbot_worker.commands import shell class Registry(unittest.TestCase): def test_getFactory(self): factory = registry.getFactory('shell') self.assertEqual(factory, shell.WorkerShellCommand) def test_getFactory_KeyError(self): self.assertRaises( KeyError, lambda: registry.getFactory('nosuchcommand')) def test_getAllCommandNames(self): self.assertTrue('shell' in registry.getAllCommandNames()) def test_all_commands_exist(self): # if this doesn't raise a KeyError, then we're good for n in registry.getAllCommandNames(): registry.getFactory(n)
unknown
codeparrot/codeparrot-clean
class Solution: # @param board, a list of lists of 1 length string # @param word, a string # @return a boolean def __init__(self): self.board = [] self.word = "" def exist(self, board, word): self.word = word if len(self.word) == 0: return True simple_board = [] for i in xrange(len(board)): simple_board.append(list(board[i])) self.board = simple_board for i in xrange(len(self.board)): for j in xrange(len(self.board[0])): if self.board[i][j] == self.word[0]: if self.dfs(i, j, self.word[0]): return True return False def dfs(self, row, col, s): if len(s) == len(self.word): return True up = row-1 down = row+1 left = col-1 right = col+1 if up >= 0 and self.board[up][col] == self.word[len(s)]: temp = self.board[row][col] self.board[row][col] = "." if self.dfs(up, col, s+self.board[up][col]): return True self.board[row][col] = temp if down < len(self.board) and self.board[down][col] == self.word[len(s)]: temp = self.board[row][col] self.board[row][col] = "." if self.dfs(down, col, s+self.board[down][col]): return True self.board[row][col] = temp if left >= 0 and self.board[row][left] == self.word[len(s)]: temp = self.board[row][col] self.board[row][col] = "." if self.dfs(row, left, s+self.board[row][left]): return True self.board[row][col] = temp if right < len(self.board[0]) and self.board[row][right] == self.word[len(s)]: temp = self.board[row][col] self.board[row][col] = "." if self.dfs(row, right, s+self.board[row][right]): return True self.board[row][col] = temp return False if __name__ == "__main__": solution = Solution() #print solution.exist([["a"]], "a") #print solution.exist(["ab", "cd"], "acdb") print solution.exist(["aaa", "aaa", "aab"], "aaaaaaaab") #print solution.exist(["ab"], "ba") #print solution.exist(["CAA", "AAA", "BCD"], "AAB") #import datetime #print datetime.datetime.now() #print solution.exist([ # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # "aaaaaaaaaaaaaaaaaaaaaaaaaaaaab" # ], # "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" #) #import datetime #print datetime.datetime.now()
unknown
codeparrot/codeparrot-clean
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-short-docstring-punctuation """## Communicating Between Processes with MPI TensorFlow natively provides inter-device communication through send and receive ops and inter-node communication through Distributed TensorFlow, based on the same send and receive abstractions. On HPC clusters where Infiniband or other high-speed node interconnects are available, these can end up being insufficient for synchronous data-parallel training (without asynchronous gradient descent). This module implements a variety of MPI ops which can take advantage of hardware-specific MPI libraries for efficient communication. In order to use this module, TensorFlow must be built with an MPI library, which can be provided to the `./configure` script at build time. As a user of TensorFlow, you will need to build TensorFlow yourself to select the MPI library to use; to do so, follow the [instructions for building TensorFlow from source](https://www.tensorflow.org/get_started/os_setup#installing_from_sources). ### Utility Ops In addition to reductions and gathers, this module provides utility operations for detecting the running MPI configuration. Example: ```python from tensorflow.contrib import mpi # Use `mpi.Session` instead of `tf.Session` with mpi.Session() as session: rank = session.run(mpi.rank()) print("My MPI Rank:", rank) if rank == 0: print("MPI Size:", session.run(mpi.size())) ``` @@rank @@size ### Ring Allreduce and Allgather When summing or averaging tensors across many processes, communication can easily become a bottleneck. A naive implementation will send all the tensor values to the same process, perform the reduction, and then broadcast the values back to all other processes, effectively creating a synchronous parameter server in one process. However, the process responsible for performing the reduction will have to receive and send a massive amount of data which scales with the number of processes *and* the number of parameters in the model. Instead of centralizing the reduction and having one primary reducer, we can implement a distributed allreduce or allgather. A bandwidth-optimal allreduce will end up sending 2(N - 1) values for every value in the input tensor, and can be implemented with a ring allreduce [1]. (Intuitively, a linear reduce requires at least (N - 1) sends between the different nodes, and a broadcast of the result also requires (N - 1) sends, for a total of 2 (N - 1); these two steps cannot be combined in a clever way to reduce the number of required sends.) This module implements bandwidth-optimal ring allreduce and ring allgather operations using MPI; by choosing a hardware-appropriate MPI implementation (such as OpenMPI with CUDA-IPC support), you can train large models with synchronous gradient descent with minimal communication overhead. In addition to the `allreduce` and `allgather` functions, a convenience `DistributedOptimizer` wrapper is provided to simplify using these functions for reducing model gradients. Example: ```python import tensorflow as tf from tensorflow.contrib import mpi_collectives as mpi # Construct a simple linear regression model to optimize W = tf.get_variable("W", shape=[20, 1], dtype=tf.float32) B = tf.get_variable("B", shape=[1, 1], dtype=tf.float32) inputs = tf.placeholder("Inputs", shape=[None, 20]) outputs = tf.placeholder("Outputs", shape=[None, 1]) loss = tf.nn.l2_loss(tf.matmul(inputs, W) + B - outputs) # Training using MPI allreduce with DistributedOptimizer optimizer = mpi.DistributedOptimizer(tf.train.AdamOptimizer()) train = optimizer.minimize(loss) # Average loss over all ranks, for printing. # Do not pass this to an optimizer! avg_loss = mpi.allreduce(loss) # On different ranks, feed different input data. with mpi.Session() as session: rank = session.run(mpi.rank()) batch_inputs, batch_outputs = construct_batch_for_rank(rank) feed_dict = {inputs: batch_inputs, outputs: batch_outputs} _, l = session.run([train, avg_loss], feed_dict=feed_dict) print("Average Loss:", l) ``` [1] Patarasuk, Pitch and Yuan, Xin. "Bandwidth Optimal All-reduce Algorithms for Clusters of Workstations". @@Session @@DistributedOptimizer @@allreduce @@allgather """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.contrib.mpi_collectives.mpi_ops import size from tensorflow.contrib.mpi_collectives.mpi_ops import rank from tensorflow.contrib.mpi_collectives.mpi_ops import local_rank from tensorflow.contrib.mpi_collectives.mpi_ops import allgather from tensorflow.contrib.mpi_collectives.mpi_ops import _allreduce from tensorflow.contrib.mpi_collectives.mpi_ops import init def allreduce(tensor, average=True): """Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices. Arguments: tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce. The shape of the input must be identical across all ranks. average: If True, computes the average over all ranks. Otherwise, computes the sum over all ranks. This function performs a bandwidth-optimal ring allreduce on the input tensor. If the input is an tf.IndexedSlices, the function instead does an allgather on the values and the indices, effectively doing an allreduce on the represented tensor. """ if isinstance(tensor, tf.IndexedSlices): # For IndexedSlices, do two allgathers intead of an allreduce. mpi_size = tf.cast(size(), tensor.values.dtype) values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide all gathered values by # the MPI size. new_values = tf.div(values, mpi_size) if average else values return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape) else: mpi_size = tf.cast(size(), tensor.dtype) summed_tensor = _allreduce(tensor) new_tensor = (tf.div(summed_tensor, mpi_size) if average else summed_tensor) return new_tensor class DistributedOptimizer(tf.train.Optimizer): """An optimizer that wraps another tf.Optimizer, using an MPI allreduce to average gradient values before applying gradients to model weights.""" def __init__(self, optimizer, name=None, use_locking=False): """Construct a new DistributedOptimizer, which uses another optimizer under the hood for computing single-process gradient values and applying gradient updates after the gradient values have been averaged across all the MPI ranks. Args: optimizer: Optimizer to use for computing gradients and applying updates. name: Optional name prefix for the operations created when applying gradients. Defaults to "Distributed" followed by the provided optimizer type. use_locking: Whether to use locking when updating variables. See Optimizer.__init__ for more info. """ if name is None: name = "Distributed{}".format(type(optimizer).__name__) self._optimizer = optimizer super(DistributedOptimizer, self).__init__( name=name, use_locking=use_locking) def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = (super(DistributedOptimizer, self) .compute_gradients(*args, **kwargs)) return [(allreduce(gradient), var) for (gradient, var) in gradients] def _apply_dense(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._apply_dense(*args, **kwargs) def _apply_sparse(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._apply_sparse(*args, **kwargs) def _apply_sparse_duplicate_indices(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._apply_sparse_duplicate_indices(*args, **kwargs) def _prepare(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._prepare(*args, **kwargs) def _create_slots(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._create_slots(*args, **kwargs) def _valid_dtypes(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._valid_dtypes(*args, **kwargs) def _finish(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer._finish(*args, **kwargs) class Session(tf.Session): """A class for running TensorFlow operations, with copies of the same graph running distributed across different MPI nodes. The primary difference between `tf.Session` and `tf.contrib.mpi_collectives.Session` is that the MPI `Session` ensures that the `Session` options are correct for use with `tf.contrib.mpi`, and initializes MPI immediately upon the start of the session. """ def __init__(self, target='', graph=None, config=None): """Creates a new TensorFlow MPI session. Unlike a normal `tf.Session`, an MPI Session may only use a single GPU, which must be specified in advance before the session is initialized. In addition, it only uses a single graph evaluation thread, and initializes MPI immediately upon starting. If no `graph` argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with `tf.Graph()` in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. graph: (Optional.) The `Graph` to be launched (described above). config: (Optional.) A `ConfigProto` protocol buffer with configuration options for the session. """ super(Session, self).__init__(target, graph, config=config) # Initialize MPI on the relevant device. # TODO: Move this to library load and eliminate mpi.Session() if graph is None: graph = tf.get_default_graph() with graph.as_default(): self.run(init())
unknown
codeparrot/codeparrot-clean