language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/scopes.py | {
"start": 1298,
"end": 2711
} | class ____:
"""Base class for wrapping an object and exposing its attributes in template scope.
This allows us to expose Python objects (like modules or context objects) to Jinja templates
with explicit attribute whitelisting.
Args:
wrapped_object: The object to wrap and expose to templates
accessible_attributes: Set of attribute names that are allowed to be accessed from templates.
Only these attributes will be accessible via {{ scope.attr }}
"""
def __init__(self, wrapped_object: Any, accessible_attributes: set[str]):
self._wrapped_object = wrapped_object
self._accessible_attributes = accessible_attributes
def __getattr__(self, name: str):
"""Allow access to whitelisted wrapped object attributes."""
# jinja2 applies a hasattr check to any scope fn - we avoid raising our own exception
# for this access
if name.startswith("jinja") or name.startswith("_"):
raise AttributeError(f"{name} not found")
# Check if this attribute is whitelisted
if name not in self._accessible_attributes:
raise AttributeError(
f"Attribute '{name}' is not accessible. "
f"Available attributes: {', '.join(sorted(self._accessible_attributes))}"
)
return getattr(self._wrapped_object, name)
| WrappedObjectScope |
python | gevent__gevent | src/greentest/3.13/test_socket.py | {
"start": 26983,
"end": 80669
} | class ____(unittest.TestCase):
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_socket_type(self):
self.assertTrue(gc.is_tracked(_socket.socket))
with self.assertRaisesRegex(TypeError, "immutable"):
_socket.socket.foo = 1
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipIf(support.is_wasi, "WASI is missing these methods")
def test_socket_methods(self):
# socket methods that depend on a configure HAVE_ check. They should
# be present on all platforms except WASI.
names = [
"_accept", "bind", "connect", "connect_ex", "getpeername",
"getsockname", "listen", "recvfrom", "recvfrom_into", "sendto",
"setsockopt", "shutdown"
]
for name in names:
if not hasattr(socket.socket, name):
self.fail(f"socket method {name} is missing")
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS and AT&T, may successfully
# resolve these IPs. In particular, AT&T's DNS Error Assist service
# will break this test. See https://bugs.python.org/issue42092 for a
# workaround.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OverflowError, socket.if_indextoname, -1)
self.assertRaises(OverflowError, socket.if_indextoname, 2**1000)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
if hasattr(socket, 'if_nameindex'):
indices = dict(socket.if_nameindex())
for index in indices:
index2 = index + 2**32
if index2 not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index2)
for index in 2**32-1, 2**64-1:
if index not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index)
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (
sys.platform.startswith(
('linux', 'android', 'freebsd', 'netbsd', 'gnukfreebsd'))
or is_apple
):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue gh-71123: this fails on Android before API level 23.
if not (support.is_android and platform.android_ver().api_level < 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: when the protocol is omitted, this fails on Android
# before API level 28.
if not (support.is_android and platform.android_ver().api_level < 28):
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind(("0.0.0.0", port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: this fails on Android before API level 23.
if not (support.is_android and platform.android_ver().api_level < 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(repr(family), '<AddressFamily.AF_INET: %r>' % family.value)
self.assertEqual(str(family), str(family.value))
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(repr(type), '<SocketKind.SOCK_STREAM: %r>' % type.value)
self.assertEqual(str(type), str(type.value))
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_getaddrinfo_int_port_overflow(self):
# gh-74895: Test that getaddrinfo does not raise OverflowError on port.
#
# POSIX getaddrinfo() never specify the valid range for "service"
# decimal port number values. For IPv4 and IPv6 they are technically
# unsigned 16-bit values, but the API is protocol agnostic. Which values
# trigger an error from the C library function varies by platform as
# they do not all perform validation.
# The key here is that we don't want to produce OverflowError as Python
# prior to 3.12 did for ints outside of a [LONG_MIN, LONG_MAX] range.
# Leave the error up to the underlying string based platform C API.
from _testcapi import ULONG_MAX, LONG_MAX, LONG_MIN
try:
socket.getaddrinfo(None, ULONG_MAX + 1, type=socket.SOCK_STREAM)
except OverflowError:
# Platforms differ as to what values constitute a getaddrinfo() error
# return. Some fail for LONG_MAX+1, others ULONG_MAX+1, and Windows
# silently accepts such huge "port" aka "service" numeric values.
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MAX + 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MAX - 0xffff + 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MIN - 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
socket.getaddrinfo(None, 0, type=socket.SOCK_STREAM) # No error expected.
socket.getaddrinfo(None, 0xffff, type=socket.SOCK_STREAM) # No error expected.
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
def test_getfqdn_filter_localhost(self):
self.assertEqual(socket.getfqdn(), socket.getfqdn("0.0.0.0"))
self.assertEqual(socket.getfqdn(), socket.getfqdn("::"))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(repr(s.family), '<AddressFamily.AF_INET: %r>' % s.family.value)
self.assertEqual(repr(s.type), '<SocketKind.SOCK_STREAM: %r>' % s.type.value)
self.assertEqual(str(s.family), str(s.family.value))
self.assertEqual(str(s.type), str(s.type.value))
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
unix_name = socket_helper.create_unix_domain_name()
self.addCleanup(os_helper.unlink, unix_name)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with s:
try:
s.bind(unix_name)
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
def test_addressfamily_enum(self):
import _socket, enum
CheckedAddressFamily = enum._old_convert_(
enum.IntEnum, 'AddressFamily', 'socket',
lambda C: C.isupper() and C.startswith('AF_'),
source=_socket,
)
enum._test_simple_enum(CheckedAddressFamily, socket.AddressFamily)
def test_socketkind_enum(self):
import _socket, enum
CheckedSocketKind = enum._old_convert_(
enum.IntEnum, 'SocketKind', 'socket',
lambda C: C.isupper() and C.startswith('SOCK_'),
source=_socket,
)
enum._test_simple_enum(CheckedSocketKind, socket.SocketKind)
def test_msgflag_enum(self):
import _socket, enum
CheckedMsgFlag = enum._old_convert_(
enum.IntFlag, 'MsgFlag', 'socket',
lambda C: C.isupper() and C.startswith('MSG_'),
source=_socket,
)
enum._test_simple_enum(CheckedMsgFlag, socket.MsgFlag)
def test_addressinfo_enum(self):
import _socket, enum
CheckedAddressInfo = enum._old_convert_(
enum.IntFlag, 'AddressInfo', 'socket',
lambda C: C.isupper() and C.startswith('AI_'),
source=_socket)
enum._test_simple_enum(CheckedAddressInfo, socket.AddressInfo)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
| GeneralModuleTests |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_micro_gemm.py | {
"start": 49582,
"end": 58045
} | class ____(CppMicroGemmFP32Vec):
"""
This class generates the code for WoQ int4 micro gemm using AVX512 intrinsics.
It is based on the corresponding ATen kernel.
Shape of packed weight = [N // 64, K, 32], viewed as [N, K // 2]
Shape of packed ScalesAndZeros = [K // group_size, N, 2]
"""
TEMPLATE_ENTRY = r"""
{{declare_kernel}} {
{{kernel.assert_function}}(N % {{block_n}} == 0, "N dimension must be multiple of {{block_n}}");
{{kernel.assert_function}}(K % {{block_k}} == 0, "K dimension must be multiple of {{block_k}}");
auto group_size = q_group_size;
for (int64_t m = 0; m < M; m += {{block_m}}) {
int64_t block_m = std::min<int64_t>(M - m, {{block_m}});
for (int64_t n = 0; n < N; n += {{block_n}}) {
if (block_m == {{block_m}}) {
{{kernel_name}}_kernel<{{block_m}}, {{block_n}}, accum>(
A + m * lda,
reinterpret_cast<const uint8_t*>(B) + n * ldb,
C + m * ldc + n,
K,
lda,
/* ldb */ {{block_n}} / 2,
ldc,
group_size,
ScaleAndZeros + n * 2,
lds,
k_start
);
} else {
switch (block_m) {
{%- for b in range(block_m - 1, 0, -1) %}
case {{b}}:
{{kernel_name}}_kernel<{{b}}, {{block_n}}, accum>(
A + m * lda,
reinterpret_cast<const uint8_t*>(B) + n * ldb,
C + m * ldc + n,
K,
lda,
/* ldb */ {{block_n}} / 2,
ldc,
group_size,
ScaleAndZeros + n * 2,
lds,
k_start
);
break;
{%- endfor %}
default:
{{kernel.assert_function}}(false, "Unsupported block_m: ", block_m);
}
}
}
}
}
"""
TEMPLATE_KERNEL = r"""
inline bool {{kernel_name}}_is_block_start(int index, int k_start, int group_size) {
return (k_start + index) % group_size == 0;
}
inline __m128i {{kernel_name}}_convert_int4_to_int8(const uint8_t* data) {
__m128i tmp = _mm_loadu_si64((const __m128i*)data);
__m128i bytes = _mm_cvtepu8_epi16(tmp);
const __m128i lowMask = _mm_set1_epi8(0xF);
__m128i high = _mm_andnot_si128(lowMask, bytes);
__m128i low = _mm_and_si128(lowMask, bytes);
high = _mm_slli_epi16(high, 4);
bytes = _mm_or_si128(low, high);
return bytes;
}
template <int64_t BLOCK_M, int64_t BLOCK_N, bool accum>
inline void {{kernel_name}}_kernel(
const {{input_t}}* {{restrict_keyword}} A,
const uint8_t* {{restrict_keyword}} B,
{{output_t}}* {{restrict_keyword}} C,
int64_t K,
int64_t lda,
int64_t ldb,
int64_t ldc,
int64_t q_group_size,
const at::BFloat16* {{restrict_keyword}} ScaleAndZeros,
int64_t lds, // leading dimension of ScaleAndZeros
int64_t k_start) {
constexpr int BLOCK_K = {{block_k}};
constexpr int ROWS = BLOCK_M;
constexpr int COLS = BLOCK_N / 16;
const int PREFETCH_SIZE_K = 16 * 4;
const int PREFETCH_SIZE_KB = (PREFETCH_SIZE_K + BLOCK_K - 1) / BLOCK_K;
// number of blocks on K
const int KB = K / BLOCK_K;
__m512 va;
__m512 vb[COLS];
__m512 vc[ROWS * COLS];
__m512 scale[COLS];
__m512 zero[COLS];
// Lookup table to de-quantize int4 values to bf16.
// Values are dequantized as truly int4 [-8, 7] range;
//
// dequant = (bf16(int4_value) * bf16_scale) + bf16_zero
//
static const __m512 lut = _mm512_set_ps(
7.0f, 6.0f, 5.0f, 4.0f,
3.0f, 2.0f, 1.0f, 0.0f,
-1.0f, -2.0f, -3.0f, -4.0f,
-5.0f, -6.0f, -7.0f, -8.0f);
// index for transpose
static const __m512i idx1 = _mm512_set_epi32(
30, 28, 26, 24, 22, 20, 18, 16,
14, 12, 10, 8, 6, 4, 2, 0);
static const __m512i idx2 = _mm512_set_epi32(
31, 29, 27, 25, 23, 21, 19, 17,
15, 13, 11, 9, 7, 5, 3, 1);
// load scale and zero point
auto load_scale_and_zeros = [&](int i, int _kb) {
// load 2x bfloat16 vector
__m512i t = _mm512_loadu_si512((__m512i*)(ScaleAndZeros + _kb * lds + 32 * i));
_mm_prefetch(ScaleAndZeros + (_kb + PREFETCH_SIZE_KB) * lds + 32 * i, _MM_HINT_T0);
// convert to 2x f32 vector
__m512 a, b;
at::vec::cvtbf16_fp32(t, a, b);
// transpose scale_and_zero from {16, 2} to {2, 16}
// inputs:
// a: {s0, z0, s1, z1, ..., s7, z7}
// b: {s8, z8, s9, z9, ..., s15, z15}
// output:
// scale: {s0, s1, s2, ..., s15}
// zero: {z0, z1, z2, ..., z15}
scale[i] = _mm512_mask_permutex2var_ps(a, 0xffff, idx1, b);
zero[i] = _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b);
};
auto loadc = [&](auto i) {
if constexpr (accum) {
constexpr int row = i / COLS;
constexpr int col = i % COLS;
vc[i] = _mm512_loadu_ps(C + row * ldc + col * 16);
} else {
vc[i] = _mm512_setzero_ps();
}
};
c10::ForcedUnroll<ROWS * COLS>{}(loadc);
auto compute = [&, COLS](auto i, int k) {
constexpr int row = i / COLS;
constexpr int col = i % COLS;
if constexpr (col == 0) {
float aa = static_cast<float>(A[row * lda + k]);
_mm_prefetch(A + row * lda + k + PREFETCH_SIZE_K, _MM_HINT_T0);
va = _mm512_set1_ps(aa);
}
if constexpr (row == 0) {
if constexpr (COLS == 4) {
// when BLOCK_N = 64, handle each row at a time
// to reduce de-quantize overhead.
if constexpr (col == 0) {
__m256i b4 = _mm256_loadu_si256((__m256i*)(B + k * ldb));
_mm_prefetch(B + (k + PREFETCH_SIZE_K) * ldb, _MM_HINT_T0);
__m512i b32 = _mm512_cvtepu8_epi32(_mm256_castsi256_si128(b4));
vb[0] = _mm512_permutexvar_ps(b32, lut);
vb[0] = _mm512_fmadd_ps(vb[0], scale[0], zero[0]);
vb[2] = _mm512_permutexvar_ps(_mm512_srli_epi32(b32, 4), lut);
vb[2] = _mm512_fmadd_ps(vb[2], scale[2], zero[2]);
b32 = _mm512_cvtepu8_epi32(_mm256_extracti128_si256(b4, 1));
vb[1] = _mm512_permutexvar_ps(b32, lut);
vb[1] = _mm512_fmadd_ps(vb[1], scale[1], zero[1]);
vb[3] = _mm512_permutexvar_ps(_mm512_srli_epi32(b32, 4), lut);
vb[3] = _mm512_fmadd_ps(vb[3], scale[3], zero[3]);
}
} else {
__m128i b8 = {{kernel_name}}_convert_int4_to_int8(B + k * ldb + col * 8);
__m512i b32 = _mm512_cvtepu8_epi32(b8);
vb[col] = _mm512_permutexvar_ps(b32, lut);
vb[col] = _mm512_fmadd_ps(vb[col], scale[col], zero[col]);
}
}
constexpr int idx = row * COLS + col;
vc[idx] = _mm512_fmadd_ps(va, vb[col], vc[idx]);
};
for (int k = 0, kb = 0; k < K; ++k) {
if ({{kernel_name}}_is_block_start(k, k_start, q_group_size)) {
c10::ForcedUnroll<COLS>{}(load_scale_and_zeros, kb++);
}
c10::ForcedUnroll<ROWS * COLS>{}(compute, k);
}
//store to C
auto storec = [&, COLS](auto i) {
constexpr int row = i / COLS;
constexpr int col = i % COLS;
_mm512_storeu_ps(C + row * ldc + col * 16, vc[i]);
};
c10::ForcedUnroll<ROWS * COLS>{}(storec);
}
"""
def get_kernel_extra_args_declare(self) -> str:
return (
"const int64_t q_group_size,\n"
" const at::BFloat16* __restrict__ ScaleAndZeros,\n"
" const int64_t lds,\n"
" int64_t k_start,"
)
def get_kernel_extra_args(self, **kwargs) -> list[str]:
assert "kernel" in kwargs
assert "qscale_and_zeros" in kwargs
kernel = kwargs["kernel"]
qscale_and_zeros = kwargs["qscale_and_zeros"]
return [
"group_size,",
f"&({kernel.index(qscale_and_zeros, [0, 0, 0])}),",
"N * 2,", # lds
"k_start,",
]
def is_woq_int4(self):
return True
@register_micro_gemm(
*generate_gemm_config(
VecAMX,
[ # (block_m, block_n, block_k)
(16, 32, 32),
(32, 32, 32),
],
input_dtype=torch.bfloat16,
input2_dtype=torch.uint8,
output_dtype=torch.float,
compute_dtype=torch.float,
extra_check=check_amx_extra,
),
)
| CppMicroGemmWoQInt4Avx512 |
python | pytorch__pytorch | torch/utils/hipify/hipify_python.py | {
"start": 3566,
"end": 4299
} | class ____:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# To the programmer, the output of hipify most likely are intermediates.
# This class allows users of hipify to ask for a cleanup by running the
# hipify and compilation in a with instantiating this context manager class
# with keep_intermediates=False.
# The main usecase is the cpp_extensions, specifically the load method.
# It is a good idea to keep intermediates (in case of errors or to
# not recompile unchanged files), but in cases where you don't want to
# keep them (e.g. in the CI), this can be used to remove files.
| bcolors |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 5927,
"end": 6022
} | class ____(BaseModel):
"""Container for review nodes."""
nodes: list[ReviewNode]
| Reviews |
python | pdm-project__pdm | src/pdm/models/requirements.py | {
"start": 8235,
"end": 8614
} | class ____(Requirement):
def as_line(self) -> str:
extras = f"[{','.join(sorted(self.extras))}]" if self.extras else ""
return f"{self.project_name}{extras}{self.specifier or ''}{self._format_marker()}"
# Cache for checked paths to avoid checking the same path multiple times
_checked_paths: set[Path] = set()
@dataclasses.dataclass(eq=False)
| NamedRequirement |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 24151,
"end": 64950
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Monster()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsMonster(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def MonsterBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Monster
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Monster
def Pos(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = o + self._tab.Pos
obj = Vec3()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Mana(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 150
# Monster
def Hp(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 100
# Monster
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Monster
def Inventory(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def InventoryAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def InventoryLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def InventoryIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Monster
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 8
# Monster
def TestType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def Test(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def Test4(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Test4Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Test4IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
# Monster
def Testarrayofstring(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Monster
def TestarrayofstringLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayofstringIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
return o == 0
# an example documentation comment: this will end up in the generated code
# multiline too
# Monster
def Testarrayoftables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
obj = Monster()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def TestarrayoftablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayoftablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
return o == 0
# Monster
def Enemy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
obj = Monster()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Testnestedflatbuffer(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestnestedflatbufferAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def TestnestedflatbufferNestedRoot(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
from MyGame.Example.Monster import Monster
return Monster.GetRootAs(self._tab.Bytes, self._tab.Vector(o))
return 0
# Monster
def TestnestedflatbufferLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestnestedflatbufferIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
return o == 0
# Monster
def Testempty(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
obj = Stat()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Testbool(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Monster
def Testhashs32Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu32Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(38))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs64Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(40))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu64Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(42))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs32Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(44))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu32Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(46))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs64Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(48))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu64Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(50))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def Testarrayofbools(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestarrayofboolsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# Monster
def TestarrayofboolsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayofboolsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
return o == 0
# Monster
def Testf(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(54))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 3.14159
# Monster
def Testf2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(56))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 3.0
# Monster
def Testf3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(58))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# Monster
def Testarrayofstring2(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Monster
def Testarrayofstring2Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Testarrayofstring2IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
return o == 0
# Monster
def Testarrayofsortedstruct(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 8
obj = Ability()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def TestarrayofsortedstructLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayofsortedstructIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
return o == 0
# Monster
def Flex(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def FlexAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def FlexLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def FlexIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
return o == 0
# Monster
def Test5(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Test5Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Test5IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
return o == 0
# Monster
def VectorOfLongs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfLongsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# Monster
def VectorOfLongsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfLongsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
return o == 0
# Monster
def VectorOfDoubles(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfDoublesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# Monster
def VectorOfDoublesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfDoublesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
return o == 0
# Monster
def ParentNamespaceTest(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(72))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
obj = InParentNamespace()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfReferrables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
obj = Referrable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfReferrablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfReferrablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
return o == 0
# Monster
def SingleWeakReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(76))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfWeakReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfWeakReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfWeakReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfWeakReferencesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
return o == 0
# Monster
def VectorOfStrongReferrables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
obj = Referrable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfStrongReferrablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfStrongReferrablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
return o == 0
# Monster
def CoOwningReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(82))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfCoOwningReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfCoOwningReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfCoOwningReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfCoOwningReferencesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
return o == 0
# Monster
def NonOwningReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(86))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfNonOwningReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfNonOwningReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfNonOwningReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfNonOwningReferencesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
return o == 0
# Monster
def AnyUniqueType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(90))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def AnyUnique(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(92))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def AnyAmbiguousType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(94))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def AnyAmbiguous(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(96))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def VectorOfEnums(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def VectorOfEnumsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def VectorOfEnumsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfEnumsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
return o == 0
# Monster
def SignedEnum(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(100))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return -1
# Monster
def Testrequirednestedflatbuffer(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestrequirednestedflatbufferAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def TestrequirednestedflatbufferNestedRoot(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
from MyGame.Example.Monster import Monster
return Monster.GetRootAs(self._tab.Bytes, self._tab.Vector(o))
return 0
# Monster
def TestrequirednestedflatbufferLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestrequirednestedflatbufferIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
return o == 0
# Monster
def ScalarKeySortedTables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(104))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
obj = Stat()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def ScalarKeySortedTablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(104))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def ScalarKeySortedTablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(104))
return o == 0
# Monster
def NativeInline(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(106))
if o != 0:
x = o + self._tab.Pos
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def LongEnumNonEnumDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(108))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def LongEnumNormalDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(110))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 2
# Monster
def NanDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(112))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('nan')
# Monster
def InfDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(114))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('inf')
# Monster
def PositiveInfDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(116))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('inf')
# Monster
def InfinityDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(118))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('inf')
# Monster
def PositiveInfinityDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(120))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('inf')
# Monster
def NegativeInfDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(122))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('-inf')
# Monster
def NegativeInfinityDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(124))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('-inf')
# Monster
def DoubleInfDefault(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(126))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('inf')
def MonsterStart(builder):
builder.StartObject(62)
def MonsterAddPos(builder, pos):
builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pos), 0)
def MonsterAddMana(builder, mana):
builder.PrependInt16Slot(1, mana, 150)
def MonsterAddHp(builder, hp):
builder.PrependInt16Slot(2, hp, 100)
def MonsterAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def MonsterAddInventory(builder, inventory):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(inventory), 0)
def MonsterStartInventoryVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def MonsterAddColor(builder, color):
builder.PrependUint8Slot(6, color, 8)
def MonsterAddTestType(builder, testType):
builder.PrependUint8Slot(7, testType, 0)
def MonsterAddTest(builder, test):
builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(test), 0)
def MonsterAddTest4(builder, test4):
builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(test4), 0)
def MonsterStartTest4Vector(builder, numElems):
return builder.StartVector(4, numElems, 2)
def MonsterAddTestarrayofstring(builder, testarrayofstring):
builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofstring), 0)
def MonsterStartTestarrayofstringVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def MonsterAddTestarrayoftables(builder, testarrayoftables):
builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayoftables), 0)
def MonsterStartTestarrayoftablesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def MonsterAddEnemy(builder, enemy):
builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(enemy), 0)
def MonsterAddTestnestedflatbuffer(builder, testnestedflatbuffer):
builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(testnestedflatbuffer), 0)
def MonsterStartTestnestedflatbufferVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def MonsterMakeTestnestedflatbufferVectorFromBytes(builder, bytes):
builder.StartVector(1, len(bytes), 1)
builder.head = builder.head - len(bytes)
builder.Bytes[builder.head : builder.head + len(bytes)] = bytes
return builder.EndVector()
def MonsterAddTestempty(builder, testempty):
builder.PrependUOffsetTRelativeSlot(14, flatbuffers.number_types.UOffsetTFlags.py_type(testempty), 0)
def MonsterAddTestbool(builder, testbool):
builder.PrependBoolSlot(15, testbool, 0)
def MonsterAddTesthashs32Fnv1(builder, testhashs32Fnv1):
builder.PrependInt32Slot(16, testhashs32Fnv1, 0)
def MonsterAddTesthashu32Fnv1(builder, testhashu32Fnv1):
builder.PrependUint32Slot(17, testhashu32Fnv1, 0)
def MonsterAddTesthashs64Fnv1(builder, testhashs64Fnv1):
builder.PrependInt64Slot(18, testhashs64Fnv1, 0)
def MonsterAddTesthashu64Fnv1(builder, testhashu64Fnv1):
builder.PrependUint64Slot(19, testhashu64Fnv1, 0)
def MonsterAddTesthashs32Fnv1a(builder, testhashs32Fnv1a):
builder.PrependInt32Slot(20, testhashs32Fnv1a, 0)
def MonsterAddTesthashu32Fnv1a(builder, testhashu32Fnv1a):
builder.PrependUint32Slot(21, testhashu32Fnv1a, 0)
def MonsterAddTesthashs64Fnv1a(builder, testhashs64Fnv1a):
builder.PrependInt64Slot(22, testhashs64Fnv1a, 0)
def MonsterAddTesthashu64Fnv1a(builder, testhashu64Fnv1a):
builder.PrependUint64Slot(23, testhashu64Fnv1a, 0)
def MonsterAddTestarrayofbools(builder, testarrayofbools):
builder.PrependUOffsetTRelativeSlot(24, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofbools), 0)
def MonsterStartTestarrayofboolsVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def MonsterAddTestf(builder, testf):
builder.PrependFloat32Slot(25, testf, 3.14159)
def MonsterAddTestf2(builder, testf2):
builder.PrependFloat32Slot(26, testf2, 3.0)
def MonsterAddTestf3(builder, testf3):
builder.PrependFloat32Slot(27, testf3, 0.0)
def MonsterAddTestarrayofstring2(builder, testarrayofstring2):
builder.PrependUOffsetTRelativeSlot(28, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofstring2), 0)
def MonsterStartTestarrayofstring2Vector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def MonsterAddTestarrayofsortedstruct(builder, testarrayofsortedstruct):
builder.PrependUOffsetTRelativeSlot(29, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofsortedstruct), 0)
def MonsterStartTestarrayofsortedstructVector(builder, numElems):
return builder.StartVector(8, numElems, 4)
def MonsterAddFlex(builder, flex):
builder.PrependUOffsetTRelativeSlot(30, flatbuffers.number_types.UOffsetTFlags.py_type(flex), 0)
def MonsterStartFlexVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def MonsterAddTest5(builder, test5):
builder.PrependUOffsetTRelativeSlot(31, flatbuffers.number_types.UOffsetTFlags.py_type(test5), 0)
def MonsterStartTest5Vector(builder, numElems):
return builder.StartVector(4, numElems, 2)
def MonsterAddVectorOfLongs(builder, vectorOfLongs):
builder.PrependUOffsetTRelativeSlot(32, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfLongs), 0)
def MonsterStartVectorOfLongsVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def MonsterAddVectorOfDoubles(builder, vectorOfDoubles):
builder.PrependUOffsetTRelativeSlot(33, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfDoubles), 0)
def MonsterStartVectorOfDoublesVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def MonsterAddParentNamespaceTest(builder, parentNamespaceTest):
builder.PrependUOffsetTRelativeSlot(34, flatbuffers.number_types.UOffsetTFlags.py_type(parentNamespaceTest), 0)
def MonsterAddVectorOfReferrables(builder, vectorOfReferrables):
builder.PrependUOffsetTRelativeSlot(35, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfReferrables), 0)
def MonsterStartVectorOfReferrablesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def MonsterAddSingleWeakReference(builder, singleWeakReference):
builder.PrependUint64Slot(36, singleWeakReference, 0)
def MonsterAddVectorOfWeakReferences(builder, vectorOfWeakReferences):
builder.PrependUOffsetTRelativeSlot(37, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfWeakReferences), 0)
def MonsterStartVectorOfWeakReferencesVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def MonsterAddVectorOfStrongReferrables(builder, vectorOfStrongReferrables):
builder.PrependUOffsetTRelativeSlot(38, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfStrongReferrables), 0)
def MonsterStartVectorOfStrongReferrablesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def MonsterAddCoOwningReference(builder, coOwningReference):
builder.PrependUint64Slot(39, coOwningReference, 0)
def MonsterAddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences):
builder.PrependUOffsetTRelativeSlot(40, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfCoOwningReferences), 0)
def MonsterStartVectorOfCoOwningReferencesVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def MonsterAddNonOwningReference(builder, nonOwningReference):
builder.PrependUint64Slot(41, nonOwningReference, 0)
def MonsterAddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences):
builder.PrependUOffsetTRelativeSlot(42, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfNonOwningReferences), 0)
def MonsterStartVectorOfNonOwningReferencesVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def MonsterAddAnyUniqueType(builder, anyUniqueType):
builder.PrependUint8Slot(43, anyUniqueType, 0)
def MonsterAddAnyUnique(builder, anyUnique):
builder.PrependUOffsetTRelativeSlot(44, flatbuffers.number_types.UOffsetTFlags.py_type(anyUnique), 0)
def MonsterAddAnyAmbiguousType(builder, anyAmbiguousType):
builder.PrependUint8Slot(45, anyAmbiguousType, 0)
def MonsterAddAnyAmbiguous(builder, anyAmbiguous):
builder.PrependUOffsetTRelativeSlot(46, flatbuffers.number_types.UOffsetTFlags.py_type(anyAmbiguous), 0)
def MonsterAddVectorOfEnums(builder, vectorOfEnums):
builder.PrependUOffsetTRelativeSlot(47, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfEnums), 0)
def MonsterStartVectorOfEnumsVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def MonsterAddSignedEnum(builder, signedEnum):
builder.PrependInt8Slot(48, signedEnum, -1)
def MonsterAddTestrequirednestedflatbuffer(builder, testrequirednestedflatbuffer):
builder.PrependUOffsetTRelativeSlot(49, flatbuffers.number_types.UOffsetTFlags.py_type(testrequirednestedflatbuffer), 0)
def MonsterStartTestrequirednestedflatbufferVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def MonsterMakeTestrequirednestedflatbufferVectorFromBytes(builder, bytes):
builder.StartVector(1, len(bytes), 1)
builder.head = builder.head - len(bytes)
builder.Bytes[builder.head : builder.head + len(bytes)] = bytes
return builder.EndVector()
def MonsterAddScalarKeySortedTables(builder, scalarKeySortedTables):
builder.PrependUOffsetTRelativeSlot(50, flatbuffers.number_types.UOffsetTFlags.py_type(scalarKeySortedTables), 0)
def MonsterStartScalarKeySortedTablesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def MonsterAddNativeInline(builder, nativeInline):
builder.PrependStructSlot(51, flatbuffers.number_types.UOffsetTFlags.py_type(nativeInline), 0)
def MonsterAddLongEnumNonEnumDefault(builder, longEnumNonEnumDefault):
builder.PrependUint64Slot(52, longEnumNonEnumDefault, 0)
def MonsterAddLongEnumNormalDefault(builder, longEnumNormalDefault):
builder.PrependUint64Slot(53, longEnumNormalDefault, 2)
def MonsterAddNanDefault(builder, nanDefault):
builder.PrependFloat32Slot(54, nanDefault, float('nan'))
def MonsterAddInfDefault(builder, infDefault):
builder.PrependFloat32Slot(55, infDefault, float('inf'))
def MonsterAddPositiveInfDefault(builder, positiveInfDefault):
builder.PrependFloat32Slot(56, positiveInfDefault, float('inf'))
def MonsterAddInfinityDefault(builder, infinityDefault):
builder.PrependFloat32Slot(57, infinityDefault, float('inf'))
def MonsterAddPositiveInfinityDefault(builder, positiveInfinityDefault):
builder.PrependFloat32Slot(58, positiveInfinityDefault, float('inf'))
def MonsterAddNegativeInfDefault(builder, negativeInfDefault):
builder.PrependFloat32Slot(59, negativeInfDefault, float('-inf'))
def MonsterAddNegativeInfinityDefault(builder, negativeInfinityDefault):
builder.PrependFloat32Slot(60, negativeInfinityDefault, float('-inf'))
def MonsterAddDoubleInfDefault(builder, doubleInfDefault):
builder.PrependFloat64Slot(61, doubleInfDefault, float('inf'))
def MonsterEnd(builder):
return builder.EndObject()
try:
from typing import List, Optional, Union
except:
pass
| Monster |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_delete_field_pending_with_fk_constraint_app/models.py | {
"start": 166,
"end": 311
} | class ____(models.Model):
field = models.IntegerField(default=0, null=False)
fk_table = FlexibleForeignKey(FkTable, db_index=False)
| TestTable |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_restore.py | {
"start": 2774,
"end": 9584
} | class ____(GenericParentValTestLossBoringModel[int]):
pass
def test_model_properties_fit_ckpt_path(tmp_path):
"""Test that properties like `current_epoch` and `global_step` in model and trainer are always the same."""
model = BoringModel()
checkpoint_callback = ModelCheckpoint(dirpath=tmp_path, save_last=True)
trainer_args = {
"default_root_dir": tmp_path,
"max_epochs": 1,
"limit_train_batches": 2,
"limit_val_batches": 2,
"logger": False,
"callbacks": [checkpoint_callback, ModelTrainerPropertyParity()], # this performs the assertions
}
trainer = Trainer(**trainer_args)
trainer.fit(model)
trainer_args.update(max_epochs=2)
trainer = Trainer(**trainer_args)
trainer.fit(model, ckpt_path=str(tmp_path / "last.ckpt"))
@RunIf(sklearn=True)
def test_trainer_properties_restore_ckpt_path(tmp_path):
"""Test that required trainer properties are set correctly when resuming from checkpoint in different phases."""
class CustomClassifModel(ClassificationModel):
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = CustomClassifModel()
dm = ClassifDataModule()
checkpoint_callback = ModelCheckpoint(dirpath=tmp_path, save_last=True)
trainer_args = {
"default_root_dir": tmp_path,
"max_epochs": 1,
"limit_train_batches": 2,
"limit_val_batches": 2,
"limit_test_batches": 2,
"limit_predict_batches": 2,
"logger": False,
"callbacks": [checkpoint_callback],
"num_sanity_val_steps": 0,
}
trainer = Trainer(**trainer_args)
trainer.fit(model, datamodule=dm)
resume_ckpt = str(tmp_path / "last.ckpt")
state_dict = torch.load(resume_ckpt, weights_only=True)
trainer_args.update({"max_epochs": 3, "enable_checkpointing": False, "callbacks": []})
class CustomClassifModel(CustomClassifModel):
def _is_equal(self, a, b):
if isinstance(a, Tensor):
return torch.all(torch.eq(a, b))
if isinstance(a, Mapping):
return all(self._is_equal(a.get(k, None), b.get(k, None)) for k in b)
return a == b
def _check_optimizers(self):
return all(
self._is_equal(optimizer.state_dict(), state)
for optimizer, state in zip(self.trainer.optimizers, state_dict["optimizer_states"])
)
def _check_schedulers(self):
return all(
self._is_equal(config.scheduler.state_dict(), state)
for config, state in zip(self.trainer.lr_scheduler_configs, state_dict["lr_schedulers"])
)
def _check_model_state_dict(self):
return all(
self._is_equal(actual, expected)
for actual, expected in zip(self.state_dict(), state_dict["state_dict"])
)
def _test_on_val_test_predict_start(self):
assert self.trainer.current_epoch == state_dict["epoch"]
assert self.trainer.global_step == 0
assert self._check_model_state_dict()
def on_train_start(self):
assert self.trainer.current_epoch == state_dict["epoch"] + 1
assert self.trainer.global_step == state_dict["global_step"]
assert self._check_model_state_dict()
assert self._check_optimizers()
assert self._check_schedulers()
def on_validation_start(self):
if self.trainer.state.fn == TrainerFn.VALIDATING:
self._test_on_val_test_predict_start()
def on_test_start(self):
self._test_on_val_test_predict_start()
for fn in ("fit", "validate", "test", "predict"):
model = CustomClassifModel()
dm = ClassifDataModule()
trainer = Trainer(**trainer_args)
trainer_fn = getattr(trainer, fn)
trainer_fn(model, datamodule=dm, ckpt_path=resume_ckpt)
def test_correct_step_and_epoch(tmp_path):
model = BoringModel()
first_max_epochs = 2
train_batches = 2
trainer = Trainer(
default_root_dir=tmp_path, max_epochs=first_max_epochs, limit_train_batches=train_batches, limit_val_batches=0
)
assert trainer.current_epoch == 0
assert trainer.global_step == 0
trainer.fit(model)
assert trainer.current_epoch == first_max_epochs
assert trainer.global_step == first_max_epochs * train_batches
# save checkpoint after loop ends, training end called, epoch count increased
ckpt_path = str(tmp_path / "model.ckpt")
trainer.save_checkpoint(ckpt_path)
ckpt = torch.load(ckpt_path, weights_only=True)
assert ckpt["epoch"] == first_max_epochs
assert ckpt["global_step"] == first_max_epochs * train_batches
max_epochs = first_max_epochs + 2
trainer = Trainer(
default_root_dir=tmp_path, max_epochs=max_epochs, limit_train_batches=train_batches, limit_val_batches=0
)
# the ckpt state is not loaded at this point
assert trainer.current_epoch == 0
assert trainer.global_step == 0
class TestModel(BoringModel):
def on_train_start(self) -> None:
assert self.trainer.current_epoch == first_max_epochs
assert self.trainer.global_step == first_max_epochs * train_batches
assert self.trainer.fit_loop.epoch_loop._batches_that_stepped == first_max_epochs * train_batches
trainer.fit(TestModel(), ckpt_path=ckpt_path)
assert trainer.current_epoch == max_epochs
assert trainer.global_step == max_epochs * train_batches
assert trainer.fit_loop.epoch_loop._batches_that_stepped == max_epochs * train_batches
def test_fit_twice(tmp_path):
epochs = []
class TestModel(BoringModel):
def on_train_epoch_end(self, *_):
epochs.append(self.current_epoch)
trainer = Trainer(
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
default_root_dir=tmp_path,
logger=False,
enable_checkpointing=False,
enable_model_summary=False,
enable_progress_bar=False,
)
trainer.fit(TestModel())
trainer.fit_loop.max_epochs = 4
trainer.fit(TestModel())
assert epochs == [0, 1, 2, 3]
def test_try_resume_from_non_existing_checkpoint(tmp_path):
"""Test that trying to resume from non-existing `ckpt_path` fails with an error."""
model = BoringModel()
trainer = Trainer(logger=False)
with pytest.raises(FileNotFoundError, match="Checkpoint file not found"):
trainer.fit(model, ckpt_path=str(tmp_path / "non_existing.ckpt"))
| GenericValTestLossBoringModel |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 26758,
"end": 28169
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Eurolinux'
strategy_class = RedHatStrategy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
use=dict(type='str', choices=list(STRATS.keys()))
),
supports_check_mode=True,
)
hostname = Hostname(module)
name = module.params['name']
current_hostname = hostname.get_current_hostname()
permanent_hostname = hostname.get_permanent_hostname()
changed = hostname.update_current_and_permanent_hostname()
if name != current_hostname:
name_before = current_hostname
else:
name_before = permanent_hostname
# NOTE: socket.getfqdn() calls gethostbyaddr(socket.gethostname()), which can be
# slow to return if the name does not resolve correctly.
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if changed:
kw['diff'] = {'after': 'hostname = ' + name + '\n',
'before': 'hostname = ' + name_before + '\n'}
module.exit_json(**kw)
if __name__ == '__main__':
main()
| EurolinuxHostname |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_negation.py | {
"start": 1694,
"end": 1795
} | class ____:
"""This is also ok"""
def __ne__(self, other):
return not self == other
| Klass |
python | nedbat__coveragepy | tests/test_arcs.py | {
"start": 5473,
"end": 12282
} | class ____(CoverageTest):
"""Arc-measuring tests involving context managers."""
def test_with(self) -> None:
self.check_coverage(
"""\
def example():
with open("test", "w", encoding="utf-8") as f:
f.write("3")
a = 4
example()
""",
branchz="",
branchz_missing="",
)
def test_with_return(self) -> None:
self.check_coverage(
"""\
def example():
with open("test", "w", encoding="utf-8") as f:
f.write("3")
return 4
example()
""",
branchz="",
branchz_missing="",
)
def test_bug_146(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/146
self.check_coverage(
"""\
for i in range(2):
with open("test", "w", encoding="utf-8") as f:
print(3)
print(4)
print(5)
""",
branchz="12 15",
branchz_missing="",
)
assert self.stdout() == "3\n4\n3\n4\n5\n"
def test_nested_with_return(self) -> None:
self.check_coverage(
"""\
def example(x):
with open("test", "w", encoding="utf-8") as f2:
a = 3
with open("test2", "w", encoding="utf-8") as f4:
f2.write("5")
return 6
example(8)
""",
branchz="",
branchz_missing="",
)
def test_break_through_with(self) -> None:
self.check_coverage(
"""\
for i in range(1+1):
with open("test", "w", encoding="utf-8") as f:
print(3)
break
print(5)
""",
branchz="12 15",
branchz_missing="15",
)
def test_continue_through_with(self) -> None:
self.check_coverage(
"""\
for i in range(1+1):
with open("test", "w", encoding="utf-8") as f:
print(3)
continue
print(5)
""",
branchz="12 15",
branchz_missing="",
)
# https://github.com/coveragepy/coveragepy/issues/1270
def test_raise_through_with(self) -> None:
cov = self.check_coverage(
"""\
from contextlib import nullcontext
def f(x):
with nullcontext():
print(4)
raise Exception("Boo6")
print(6)
try:
f(8)
except Exception:
print("oops 10")
""",
branchz="",
branchz_missing="",
)
expected = "line 3 didn't jump to the function exit"
assert self.get_missing_arc_description(cov, 3, -2) == expected
def test_untaken_if_through_with(self) -> None:
cov = self.check_coverage(
"""\
from contextlib import nullcontext
def f(x):
with nullcontext():
print(4)
if x == 5:
print(6)
print(7)
f(8)
""",
branchz="56 57",
branchz_missing="56",
)
assert self.stdout() == "4\n7\n"
expected = "line 3 didn't jump to the function exit"
assert self.get_missing_arc_description(cov, 3, -2) == expected
def test_untaken_raise_through_with(self) -> None:
cov = self.check_coverage(
"""\
from contextlib import nullcontext
def f(x):
with nullcontext():
print(4)
if x == 5:
raise Exception("Boo6")
print(7)
try:
f(9)
except Exception:
print("oops 11")
""",
branchz="56 57",
branchz_missing="56",
)
assert self.stdout() == "4\n7\n"
expected = "line 3 didn't jump to the function exit"
assert self.get_missing_arc_description(cov, 3, -2) == expected
def test_leaving_module(self) -> None:
cov = self.check_coverage(
"""\
print(a := 1)
if a == 1:
print(3)
""",
branchz="2. 23",
branchz_missing="2.",
)
assert self.stdout() == "1\n3\n"
expected = "line 2 didn't exit the module because the condition on line 2 was always true"
assert self.get_missing_arc_description(cov, 2, -1) == expected
def test_with_with_lambda(self) -> None:
self.check_coverage(
"""\
from contextlib import nullcontext
with nullcontext(lambda x: 2):
print(3)
print(4)
""",
branchz="",
branchz_missing="",
)
def test_multiline_with(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/1880
self.check_coverage(
"""\
import contextlib, itertools
nums = itertools.count()
with (
contextlib.nullcontext() as x,
):
while next(nums) < 6:
y = 7
z = 8
""",
branchz="67 68",
branchz_missing="",
)
def test_multi_multiline_with(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/1880
self.check_coverage(
"""\
import contextlib, itertools
nums = itertools.count()
with (
contextlib.nullcontext() as x,
contextlib.nullcontext() as y,
contextlib.nullcontext() as z,
):
while next(nums) < 8:
y = 9
z = 10
""",
branchz="89 8A",
branchz_missing="",
)
def test_multi_multiline_with_backslash(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/1880
self.check_coverage(
"""\
import contextlib, itertools
nums = itertools.count()
with contextlib.nullcontext() as x, \\
contextlib.nullcontext() as y, \\
contextlib.nullcontext() as z:
while next(nums) < 6:
y = 7
z = 8
""",
branchz="67 68",
branchz_missing="",
)
| WithTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/definition/multi.py | {
"start": 3382,
"end": 20326
} | class ____(PartitionsDefinition[MultiPartitionKey]):
"""Takes the cross-product of partitions from two partitions definitions.
For example, with a static partitions definition where the partitions are ["a", "b", "c"]
and a daily partitions definition, this partitions definition will have the following
partitions:
2020-01-01|a
2020-01-01|b
2020-01-01|c
2020-01-02|a
2020-01-02|b
...
We recommended limiting partition counts for each asset to 100,000 partitions or fewer.
Args:
partitions_defs (Mapping[str, PartitionsDefinition]):
A mapping of dimension name to partitions definition. The total set of partitions will
be the cross-product of the partitions from each PartitionsDefinition.
Args:
partitions_defs (Sequence[PartitionDimensionDefinition]):
A sequence of PartitionDimensionDefinition objects, each of which contains a dimension
name and a PartitionsDefinition. The total set of partitions will be the cross-product
of the partitions from each PartitionsDefinition. This sequence is ordered by
dimension name, to ensure consistent ordering of the partitions.
"""
def __init__(self, partitions_defs: Mapping[str, PartitionsDefinition]):
if not len(partitions_defs.keys()) == 2:
raise DagsterInvalidInvocationError(
"Dagster currently only supports multi-partitions definitions with 2 partitions"
" definitions. Your multi-partitions definition has"
f" {len(partitions_defs.keys())} partitions definitions."
)
check.mapping_param(
partitions_defs, "partitions_defs", key_type=str, value_type=PartitionsDefinition
)
_check_valid_partitions_dimensions(partitions_defs)
self._partitions_defs: list[PartitionDimensionDefinition] = sorted(
[
PartitionDimensionDefinition(name, partitions_def)
for name, partitions_def in partitions_defs.items()
],
key=lambda x: x.name,
)
@property
def partitions_subset_class(self) -> type["PartitionsSubset"]:
return DefaultPartitionsSubset
def get_partition_keys_in_range( # pyright: ignore[reportIncompatibleMethodOverride]
self,
partition_key_range: PartitionKeyRange,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> Sequence[str]:
with partition_loading_context(dynamic_partitions_store=dynamic_partitions_store):
start: MultiPartitionKey = self.get_partition_key_from_str(partition_key_range.start)
end: MultiPartitionKey = self.get_partition_key_from_str(partition_key_range.end)
partition_key_sequences = [
partition_dim.partitions_def.get_partition_keys_in_range(
PartitionKeyRange(
start.keys_by_dimension[partition_dim.name],
end.keys_by_dimension[partition_dim.name],
),
)
for partition_dim in self._partitions_defs
]
return [
MultiPartitionKey(
{
self._partitions_defs[i].name: key
for i, key in enumerate(partition_key_tuple)
}
)
for partition_key_tuple in itertools.product(*partition_key_sequences)
]
def get_serializable_unique_identifier(self) -> str:
return hashlib.sha1(
str(
{
dim_def.name: dim_def.partitions_def.get_serializable_unique_identifier()
for dim_def in self.partitions_defs
}
).encode("utf-8")
).hexdigest()
@property
def partition_dimension_names(self) -> list[str]:
return [dim_def.name for dim_def in self._partitions_defs]
@property
def partitions_defs(self) -> Sequence[PartitionDimensionDefinition]:
return self._partitions_defs
def get_partitions_def_for_dimension(self, dimension_name: str) -> PartitionsDefinition:
for dim_def in self._partitions_defs:
if dim_def.name == dimension_name:
return dim_def.partitions_def
check.failed(f"Invalid dimension name {dimension_name}")
# We override the default implementation of `has_partition_key` for performance.
def has_partition_key(self, partition_key: Union[MultiPartitionKey, str]) -> bool:
if isinstance(partition_key, str):
try:
partition_key = self.get_partition_key_from_str(partition_key)
except CheckError:
return False
if partition_key.keys_by_dimension.keys() != set(self.partition_dimension_names):
raise DagsterUnknownPartitionError(
f"Invalid partition key {partition_key}. The dimensions of the partition key are"
" not the dimensions of the partitions definition."
)
for dimension in self.partitions_defs:
if not dimension.partitions_def.has_partition_key(
partition_key.keys_by_dimension[dimension.name]
):
return False
return True
# store results for repeated calls with the same current_time
@lru_cache(maxsize=1)
def _get_partition_keys(self, _current_time: datetime) -> Sequence[MultiPartitionKey]:
partition_key_sequences = [
partition_dim.partitions_def.get_partition_keys()
for partition_dim in self._partitions_defs
]
keys = [
MultiPartitionKey(
{self._partitions_defs[i].name: key for i, key in enumerate(partition_key_tuple)}
)
for partition_key_tuple in itertools.product(*partition_key_sequences)
]
# in some cases, an underlying partitions definition may have keys in a format
# that produce invalid multi-partition keys (e.g. they have a | character).
# in these cases, we filter out the invalid keys.
return [key for key in keys if self._is_valid_key_format(key)]
@public
def get_partition_keys(
self,
current_time: Optional[datetime] = None,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> Sequence[MultiPartitionKey]:
"""Returns a list of MultiPartitionKeys representing the partition keys of the
PartitionsDefinition.
Args:
current_time (Optional[datetime]): A datetime object representing the current time, only
applicable to time-based partition dimensions.
dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore
object that is responsible for fetching dynamic partitions. Required when a
dimension is a DynamicPartitionsDefinition with a name defined. Users can pass the
DagsterInstance fetched via `context.instance` to this argument.
Returns:
Sequence[MultiPartitionKey]
"""
with partition_loading_context(current_time, dynamic_partitions_store) as ctx:
return self._get_partition_keys(_current_time=ctx.effective_dt)
def get_paginated_partition_keys(
self,
context: PartitionLoadingContext,
limit: int,
ascending: bool,
cursor: Optional[str] = None,
) -> PaginatedResults[str]:
"""Returns a connection object that contains a list of partition keys and all the necessary
information to paginate through them.
Args:
cursor: (Optional[str]): A cursor to track the progress paginating through the returned partition key results.
limit: (Optional[int]): The maximum number of partition keys to return.
Returns:
PaginatedResults[MultiPartitionKey]
"""
with partition_loading_context(new_ctx=context) as ctx:
partition_keys = []
iterator = MultiDimensionalPartitionKeyIterator(
context=ctx,
partition_defs=self._partitions_defs,
cursor=MultiPartitionCursor.from_cursor(cursor),
ascending=ascending,
)
next_cursor = cursor
while iterator.has_next():
partition_key = next(iterator)
if not partition_key:
break
partition_keys.append(partition_key)
next_cursor = iterator.cursor().to_string()
if len(partition_keys) >= limit:
break
if not next_cursor:
next_cursor = MultiPartitionCursor(last_seen_key=None).to_string()
return PaginatedResults(
results=partition_keys, cursor=next_cursor, has_more=iterator.has_next()
)
def _is_valid_key_format(self, partition_key: str) -> bool:
"""Checks if the given partition key is in the correct format for a multi-partition key
of this MultiPartitionsDefinition.
"""
return len(partition_key.split(MULTIPARTITION_KEY_DELIMITER)) == len(self.partitions_defs)
def filter_valid_partition_keys(self, partition_keys: set[str]) -> set[MultiPartitionKey]:
partition_keys_by_dimension = {
dim.name: dim.partitions_def.get_partition_keys() for dim in self.partitions_defs
}
validated_partitions = set()
for partition_key in partition_keys:
if not self._is_valid_key_format(partition_key):
continue
partition_key_strs = partition_key.split(MULTIPARTITION_KEY_DELIMITER)
multipartition_key = MultiPartitionKey(
{dim.name: partition_key_strs[i] for i, dim in enumerate(self._partitions_defs)}
)
if all(
key in partition_keys_by_dimension.get(dim, [])
for dim, key in multipartition_key.keys_by_dimension.items()
):
validated_partitions.add(partition_key)
return validated_partitions
def __eq__(self, other):
return (
isinstance(other, MultiPartitionsDefinition)
and self.partitions_defs == other.partitions_defs
)
def __hash__(self):
return hash(
tuple(
[
(partitions_def.name, partitions_def.__repr__())
for partitions_def in self.partitions_defs
]
)
)
def __str__(self) -> str:
dimension_1 = self._partitions_defs[0]
dimension_2 = self._partitions_defs[1]
partition_str = (
"Multi-partitioned, with dimensions: \n"
f"{dimension_1.name.capitalize()}: {dimension_1.partitions_def} \n"
f"{dimension_2.name.capitalize()}: {dimension_2.partitions_def}"
)
return partition_str
def __repr__(self) -> str:
return f"{type(self).__name__}(dimensions={[str(dim) for dim in self.partitions_defs]}"
def get_partition_key_from_str(self, partition_key_str: str) -> MultiPartitionKey:
"""Given a string representation of a partition key, returns a MultiPartitionKey object."""
check.str_param(partition_key_str, "partition_key_str")
partition_key_strs = partition_key_str.split(MULTIPARTITION_KEY_DELIMITER)
check.invariant(
len(partition_key_strs) == len(self.partitions_defs),
f"Expected {len(self.partitions_defs)} partition keys in partition key string"
f" {partition_key_str}, but got {len(partition_key_strs)}",
)
return MultiPartitionKey(
{dim.name: partition_key_strs[i] for i, dim in enumerate(self._partitions_defs)}
)
def _get_primary_and_secondary_dimension(
self,
) -> tuple[PartitionDimensionDefinition, PartitionDimensionDefinition]:
# Multipartitions subsets are serialized by primary dimension. If changing
# the selection of primary/secondary dimension, will need to also update the
# serialization of MultiPartitionsSubsets
time_dimensions = self._get_time_window_dims()
if len(time_dimensions) == 1:
primary_dimension, secondary_dimension = (
time_dimensions[0],
next(iter([dim for dim in self.partitions_defs if dim != time_dimensions[0]])),
)
else:
primary_dimension, secondary_dimension = (
self.partitions_defs[0],
self.partitions_defs[1],
)
return primary_dimension, secondary_dimension
@property
def primary_dimension(self) -> PartitionDimensionDefinition:
return self._get_primary_and_secondary_dimension()[0]
@property
def secondary_dimension(self) -> PartitionDimensionDefinition:
return self._get_primary_and_secondary_dimension()[1]
def get_tags_for_partition_key(self, partition_key: str) -> Mapping[str, str]:
partition_key = cast("MultiPartitionKey", self.get_partition_key_from_str(partition_key))
tags = {**super().get_tags_for_partition_key(partition_key)}
tags.update(get_tags_from_multi_partition_key(partition_key))
return tags
@property
def time_window_dimension(self) -> PartitionDimensionDefinition:
time_window_dims = self._get_time_window_dims()
check.invariant(
len(time_window_dims) == 1, "Expected exactly one time window partitioned dimension"
)
return next(iter(time_window_dims))
def _get_time_window_dims(self) -> list[PartitionDimensionDefinition]:
return [
dim
for dim in self.partitions_defs
if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)
]
@property
def has_time_window_dimension(self) -> bool:
return bool(self._get_time_window_dims())
@property
def time_window_partitions_def(self) -> TimeWindowPartitionsDefinition:
check.invariant(self.has_time_window_dimension, "Must have time window dimension")
return cast(
"TimeWindowPartitionsDefinition",
check.inst(self.primary_dimension.partitions_def, TimeWindowPartitionsDefinition),
)
def time_window_for_partition_key(self, partition_key: str) -> TimeWindow:
if not isinstance(partition_key, MultiPartitionKey):
partition_key = self.get_partition_key_from_str(partition_key)
time_window_dimension = self.time_window_dimension
return cast(
"TimeWindowPartitionsDefinition", time_window_dimension.partitions_def
).time_window_for_partition_key(
cast("MultiPartitionKey", partition_key).keys_by_dimension[time_window_dimension.name]
)
def get_multipartition_keys_with_dimension_value(
self, dimension_name: str, dimension_partition_key: str
) -> Sequence[MultiPartitionKey]:
check.str_param(dimension_name, "dimension_name")
check.str_param(dimension_partition_key, "dimension_partition_key")
matching_dimensions = [
dimension for dimension in self.partitions_defs if dimension.name == dimension_name
]
other_dimensions = [
dimension for dimension in self.partitions_defs if dimension.name != dimension_name
]
check.invariant(
len(matching_dimensions) == 1,
f"Dimension {dimension_name} not found in MultiPartitionsDefinition with dimensions"
f" {[dim.name for dim in self.partitions_defs]}",
)
partition_sequences = [
partition_dim.partitions_def.get_partition_keys() for partition_dim in other_dimensions
] + [[dimension_partition_key]]
# Names of partitions dimensions in the same order as partition_sequences
partition_dim_names = [dim.name for dim in other_dimensions] + [dimension_name]
return [
MultiPartitionKey(
{
partition_dim_names[i]: partition_key
for i, partition_key in enumerate(partitions_tuple)
}
)
for partitions_tuple in itertools.product(*partition_sequences)
]
def get_num_partitions(self) -> int:
# Static partitions definitions can contain duplicate keys (will throw error in 1.3.0)
# In the meantime, relying on get_num_partitions to handle duplicates to display
# correct counts in the Dagster UI.
dimension_counts = [dim.partitions_def.get_num_partitions() for dim in self.partitions_defs]
return reduce(lambda x, y: x * y, dimension_counts, 1)
| MultiPartitionsDefinition |
python | getsentry__sentry | tests/sentry/api/endpoints/test_team_alerts_triggered.py | {
"start": 379,
"end": 5957
} | class ____(APITestCase):
endpoint = "sentry-api-0-team-alerts-triggered"
def test_simple(self) -> None:
project1 = self.create_project(
teams=[self.team], slug="foo"
) # This project will return counts for this team
user_owned_rule = self.create_alert_rule(
organization=self.organization,
projects=[project1],
name="user owned rule",
query="",
aggregate="count()",
time_window=1,
threshold_type=AlertRuleThresholdType.ABOVE,
resolve_threshold=10,
threshold_period=1,
owner=Actor.from_identifier(self.user.id),
)
user_owned_incident = self.create_incident(status=20, alert_rule=user_owned_rule)
activities = []
for i in range(1, 9):
activities.append(
IncidentActivity(
incident=user_owned_incident,
type=IncidentActivityType.CREATED.value,
value=INCIDENT_STATUS[IncidentStatus.OPEN],
date_added=before_now(days=i),
)
)
IncidentActivity.objects.bulk_create(activities)
self.login_as(user=self.user)
response = self.get_success_response(self.team.organization.slug, self.team.slug)
assert len(response.data) == 90
for i in range(1, 9):
assert (
response.data[
before_now(days=i)
.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
]
== 1
)
for i in range(10, 90):
assert (
response.data[
before_now(days=i)
.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
]
== 0
)
response = self.get_success_response(
self.team.organization.slug, self.team.slug, statsPeriod="7d"
)
assert len(response.data) == 7
assert (
response.data[
before_now(days=0).replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
]
== 0
)
for i in range(1, 6):
assert (
response.data[
before_now(days=i)
.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
]
== 1
)
def test_not_as_simple(self) -> None:
team_with_user = self.create_team(
organization=self.organization, name="Lonely Team", members=[self.user]
)
project1 = self.create_project(
teams=[self.team], slug="foo"
) # This project will return counts for this team
project2 = self.create_project(
# teams=[team_with_user], slug="bar"
teams=[team_with_user],
slug="bar",
) # but not this project, cause this team isn't on it (but the user is)
user_owned_rule = self.create_alert_rule(
organization=self.organization,
projects=[project2],
name="user owned rule",
query="",
aggregate="count()",
time_window=1,
threshold_type=AlertRuleThresholdType.ABOVE,
resolve_threshold=10,
threshold_period=1,
owner=Actor.from_identifier(self.user.id),
)
user_owned_incident = self.create_incident(
projects=[project2], status=20, alert_rule=user_owned_rule
)
team_owned_rule = self.create_alert_rule(
organization=self.organization,
projects=[project1],
name="team owned rule",
query="",
aggregate="count()",
time_window=1,
threshold_type=AlertRuleThresholdType.ABOVE,
resolve_threshold=10,
threshold_period=1,
owner=Actor.from_identifier(f"team:{self.team.id}"),
)
team_owned_incident = self.create_incident(
projects=[project1], status=20, alert_rule=team_owned_rule
)
IncidentActivity.objects.create(
incident=user_owned_incident,
type=IncidentActivityType.CREATED.value,
value=INCIDENT_STATUS[IncidentStatus.OPEN],
)
IncidentActivity.objects.create(
incident=team_owned_incident,
type=IncidentActivityType.CREATED.value,
value=INCIDENT_STATUS[IncidentStatus.OPEN],
date_added=before_now(days=2),
)
self.login_as(user=self.user)
response = self.get_success_response(self.team.organization.slug, self.team.slug)
assert len(response.data) == 90
assert (
response.data[
before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
]
== 1
)
# only getting the team owned incident, because the user owned incident is for another project that the team isn't on
for i in range(0, 90):
if i != 2:
assert (
response.data[
before_now(days=i)
.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
]
== 0
)
@freeze_time()
| TeamAlertsTriggeredTotalsEndpointTest |
python | spack__spack | lib/spack/spack/test/web.py | {
"start": 8180,
"end": 8377
} | class ____(Exception):
def __init__(self):
self.response = {
"Error": {"Code": "NoSuchKey"},
"ResponseMetadata": {"HTTPStatusCode": 404},
}
| MockClientError |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 339657,
"end": 340408
} | class ____(ValueChannelMixin, core.PositionValueDef):
"""
Latitude2Value schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude2"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
| Latitude2Value |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 214894,
"end": 223952
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
def test_push_to_hub(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
output_dir_name = tmp_repo.repo_name
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"{USER}/{output_dir_name}")
model = RegressionPreTrainedModel.from_pretrained(repo_name)
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def test_push_to_hub_in_organization(self):
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir)
trainer.save_model()
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_model_id=f"valid_org/{output_dir_name}",
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"valid_org/{output_dir_name}")
model = RegressionPreTrainedModel.from_pretrained(f"valid_org/{output_dir_name}")
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def get_commit_history(self, repo):
commit_logs = subprocess.run(
["git", "log"],
capture_output=True,
check=True,
encoding="utf-8",
cwd=repo,
).stdout
commits = commit_logs.split("\n\n")[1::2]
return [commit.strip() for commit in commits]
# TODO: @ydshieh or @SunMarc
@unittest.skip("unknown failure reason, possibly staging hub issue")
def test_push_to_hub_with_saves_each_epoch(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertLogs(level="WARNING") as logs:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
# To avoid any flakiness if the training goes faster than the uploads.
hub_always_push=True,
save_strategy="epoch",
)
trainer.train()
commits = list_repo_commits(f"{USER}/{output_dir_name}", token=self._token)
commits = [c.title for c in commits]
self.assertIn("initial commit", commits)
self.assertIn("Training in progress, epoch 1", commits)
self.assertIn("Training in progress, epoch 2", commits)
# Epochs 3 and 4 are not guaranteed to be present (empty commits)
self.assertTrue(any("Skipping to prevent empty commit." in record.message for record in logs.records))
def test_push_to_hub_with_saves_each_n_steps(self):
num_gpus = max(1, backend_device_count(torch_device))
if num_gpus > 2:
self.skipTest(reason="More than 2 GPUs available")
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertLogs(level="WARNING") as logs:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
# To avoid any flakiness if the training goes faster than the uploads.
hub_always_push=True,
save_strategy="steps",
save_steps=5,
)
trainer.train()
commits = list_repo_commits(f"{USER}/{output_dir_name}", token=self._token)
commits = [c.title for c in commits]
self.assertIn("initial commit", commits)
# Some commits are skipped if nothing has changed
# We expect 1 commit per 5 epochs + 1 commit at the end
nb_empty_commits = len(
[record for record in logs.records if "Skipping to prevent empty commit." in record.message]
)
nb_epoch_commits = len([commit for commit in commits if "Training in progress, step" in commit])
# max_steps depend on the number of available GPUs
max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader()))
nb_expected_commits = len(range(5, max_steps, 5))
# '>=' since final commit might be an empty commit as well (not deterministic)
self.assertGreaterEqual(nb_empty_commits + nb_epoch_commits, nb_expected_commits)
@require_tensorboard
def test_push_to_hub_with_tensorboard_logs(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
hub_token=self._token,
save_strategy="epoch",
report_to=["tensorboard"],
keep_report_to=True,
)
trainer.train()
# Push the runs via `push_to_hub()`
trainer.push_to_hub()
files = list_repo_files(f"{USER}/{output_dir_name}", token=self._token)
found_log = False
for f in files:
if len(f.split("runs")) > 1 and "events.out.tfevents" in f:
found_log = True
assert found_log is True, "No tensorboard log found in repo"
def test_push_to_hub_tags(self):
# Checks if `trainer.push_to_hub()` works correctly by adding the desired
# tag without having to pass `tags` in `push_to_hub`
# see:
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
)
trainer.model.add_model_tags(["test-trainer-tags"])
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"{USER}/{output_dir_name}")
model_card = ModelCard.load(repo_name)
self.assertTrue("test-trainer-tags" in model_card.data.tags)
def test_push_to_hub_with_revision(self):
# Checks if `trainer.push_to_hub()` works correctly by adding revision
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
)
branch = "v1.0"
create_branch(repo_id=trainer.hub_model_id, branch=branch, token=self._token, exist_ok=True)
push_commit = trainer.push_to_hub(revision=branch)
commits = list_repo_commits(repo_id=trainer.hub_model_id, revision=branch, token=self._token)
self.assertEqual(commits[0].commit_id, push_commit.oid)
@require_torch
@require_optuna
| TrainerIntegrationWithHubTester |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py | {
"start": 4503,
"end": 4755
} | class ____(PatchMerger):
def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None:
super().__init__(dim, context_dim, spatial_merge_size)
self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6)
| Qwen2_5_VLPatchMerger |
python | huggingface__transformers | src/transformers/models/deformable_detr/configuration_deformable_detr.py | {
"start": 921,
"end": 14503
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate
a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Deformable DETR
[SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PreTrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use
`two_stage_num_proposals` instead.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
backbone (`str`, *optional*, defaults to `"resnet50"`):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
`use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
num_feature_levels (`int`, *optional*, defaults to 4):
The number of input feature levels.
encoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the encoder.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
two_stage (`bool`, *optional*, defaults to `False`):
Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
Deformable DETR, which are further fed into the decoder for iterative bounding box refinement.
two_stage_num_proposals (`int`, *optional*, defaults to 300):
The number of region proposals to be generated, in case `two_stage` is set to `True`.
with_box_refine (`bool`, *optional*, defaults to `False`):
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
based on the predictions from the previous layer.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
kernels are not supported by PyTorch ONNX export.
Examples:
```python
>>> from transformers import DeformableDetrConfig, DeformableDetrModel
>>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration
>>> configuration = DeformableDetrConfig()
>>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
>>> model = DeformableDetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deformable_detr"
sub_configs = {"backbone_config": AutoConfig}
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
use_timm_backbone=True,
backbone_config=None,
num_channels=3,
num_queries=300,
max_position_embeddings=1024,
encoder_layers=6,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=1024,
decoder_attention_heads=8,
encoder_layerdrop=0.0,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
init_xavier_std=1.0,
return_intermediate=True,
auxiliary_loss=False,
position_embedding_type="sine",
backbone="resnet50",
use_pretrained_backbone=True,
backbone_kwargs=None,
dilation=False,
num_feature_levels=4,
encoder_n_points=4,
decoder_n_points=4,
two_stage=False,
two_stage_num_proposals=300,
with_box_refine=False,
class_cost=1,
bbox_cost=5,
giou_cost=2,
mask_loss_coefficient=1,
dice_loss_coefficient=1,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
eos_coefficient=0.1,
focal_alpha=0.25,
disable_custom_kernels=False,
**kwargs,
):
# We default to values which were previously hard-coded in the model. This enables configurability of the config
# while keeping the default behavior the same.
if use_timm_backbone and backbone_kwargs is None:
backbone_kwargs = {}
if dilation:
backbone_kwargs["output_stride"] = 16
backbone_kwargs["out_indices"] = [2, 3, 4] if num_feature_levels > 1 else [4]
backbone_kwargs["in_chans"] = num_channels
# Backwards compatibility
elif not use_timm_backbone and backbone in (None, "resnet50"):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_channels = num_channels
self.num_queries = num_queries
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.backbone_kwargs = backbone_kwargs
self.dilation = dilation
# deformable attributes
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
self.two_stage = two_stage
self.two_stage_num_proposals = two_stage_num_proposals
self.with_box_refine = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
self.focal_alpha = focal_alpha
self.disable_custom_kernels = disable_custom_kernels
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
self.tie_encoder_decoder = True
__all__ = ["DeformableDetrConfig"]
| DeformableDetrConfig |
python | python-pillow__Pillow | src/PIL/ImageFilter.py | {
"start": 8406,
"end": 8607
} | class ____(BuiltinFilter):
name = "Edge-enhance More"
# fmt: off
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1,
)
# fmt: on
| EDGE_ENHANCE_MORE |
python | lazyprogrammer__machine_learning_examples | rl/tic_tac_toe.py | {
"start": 1188,
"end": 4605
} | class ____:
def __init__(self, eps=0.1, alpha=0.5):
self.eps = eps # probability of choosing random action instead of greedy
self.alpha = alpha # learning rate
self.verbose = False
self.state_history = []
def setV(self, V):
self.V = V
def set_symbol(self, sym):
self.sym = sym
def set_verbose(self, v):
# if true, will print values for each position on the board
self.verbose = v
def reset_history(self):
self.state_history = []
def take_action(self, env):
# choose an action based on epsilon-greedy strategy
r = np.random.rand()
best_state = None
if r < self.eps:
# take a random action
if self.verbose:
print("Taking a random action")
possible_moves = []
for i in range(LENGTH):
for j in range(LENGTH):
if env.is_empty(i, j):
possible_moves.append((i, j))
idx = np.random.choice(len(possible_moves))
next_move = possible_moves[idx]
else:
# choose the best action based on current values of states
# loop through all possible moves, get their values
# keep track of the best value
pos2value = {} # for debugging
next_move = None
best_value = -1
for i in range(LENGTH):
for j in range(LENGTH):
if env.is_empty(i, j):
# what is the state if we made this move?
env.board[i,j] = self.sym
state = env.get_state()
env.board[i,j] = 0 # don't forget to change it back!
pos2value[(i,j)] = self.V[state]
if self.V[state] > best_value:
best_value = self.V[state]
best_state = state
next_move = (i, j)
# if verbose, draw the board w/ the values
if self.verbose:
print("Taking a greedy action")
for i in range(LENGTH):
print("------------------")
for j in range(LENGTH):
if env.is_empty(i, j):
# print the value
print(" %.2f|" % pos2value[(i,j)], end="")
else:
print(" ", end="")
if env.board[i,j] == env.x:
print("x |", end="")
elif env.board[i,j] == env.o:
print("o |", end="")
else:
print(" |", end="")
print("")
print("------------------")
# make the move
env.board[next_move[0], next_move[1]] = self.sym
def update_state_history(self, s):
# cannot put this in take_action, because take_action only happens
# once every other iteration for each player
# state history needs to be updated every iteration
# s = env.get_state() # don't want to do this twice so pass it in
self.state_history.append(s)
def update(self, env):
# we want to BACKTRACK over the states, so that:
# V(prev_state) = V(prev_state) + alpha*(V(next_state) - V(prev_state))
# where V(next_state) = reward if it's the most current state
#
# NOTE: we ONLY do this at the end of an episode
# not so for all the algorithms we will study
reward = env.reward(self.sym)
target = reward
for prev in reversed(self.state_history):
value = self.V[prev] + self.alpha*(target - self.V[prev])
self.V[prev] = value
target = value
self.reset_history()
# this class represents a tic-tac-toe game
# is a CS101-type of project
| Agent |
python | django__django | tests/admin_views/models.py | {
"start": 24628,
"end": 24736
} | class ____(models.Model):
name = models.CharField(max_length=100, verbose_name="State verbose_name")
| State |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 2401,
"end": 2534
} | class ____(Web3Exception):
"""
The supplied address does not have a valid checksum, as defined in EIP-55
"""
| InvalidAddress |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_single.py | {
"start": 1660,
"end": 28761
} | class ____(testing.AssertsCompiledSQL, fixtures.MappedTest):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"employees",
metadata,
Column(
"employee_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("manager_data", String(50)),
Column("engineer_info", String(50)),
Column("type", String(20)),
)
Table(
"reports",
metadata,
Column(
"report_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("employee_id", ForeignKey("employees.employee_id")),
Column("name", String(50)),
)
@classmethod
def setup_classes(cls):
global Employee, Manager, Engineer, JuniorEngineer
class Employee(cls.Comparable):
pass
class Manager(Employee):
pass
class Engineer(Employee):
pass
class JuniorEngineer(Engineer):
pass
class Report(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
(
Report,
reports,
Employee,
Manager,
JuniorEngineer,
employees,
Engineer,
) = (
cls.classes.Report,
cls.tables.reports,
cls.classes.Employee,
cls.classes.Manager,
cls.classes.JuniorEngineer,
cls.tables.employees,
cls.classes.Engineer,
)
cls.mapper_registry.map_imperatively(
Report, reports, properties={"employee": relationship(Employee)}
)
cls.mapper_registry.map_imperatively(
Employee,
employees,
polymorphic_identity="employee",
polymorphic_on=employees.c.type,
properties={
"reports": relationship(Report, back_populates="employee")
},
)
cls.mapper_registry.map_imperatively(
Manager, inherits=Employee, polymorphic_identity="manager"
)
cls.mapper_registry.map_imperatively(
Engineer, inherits=Employee, polymorphic_identity="engineer"
)
cls.mapper_registry.map_imperatively(
JuniorEngineer,
inherits=Engineer,
polymorphic_identity="juniorengineer",
)
def _fixture_one(self):
JuniorEngineer, Manager, Engineer = (
self.classes.JuniorEngineer,
self.classes.Manager,
self.classes.Engineer,
)
session = fixture_session()
m1 = Manager(name="Tom", manager_data="knows how to manage things")
e1 = Engineer(name="Kurt", engineer_info="knows how to hack")
e2 = JuniorEngineer(name="Ed", engineer_info="oh that ed")
session.add_all([m1, e1, e2])
session.flush()
return session, m1, e1, e2
def test_single_inheritance(self):
Employee, JuniorEngineer, Manager, Engineer = (
self.classes.Employee,
self.classes.JuniorEngineer,
self.classes.Manager,
self.classes.Engineer,
)
session, m1, e1, e2 = self._fixture_one()
assert session.query(Employee).all() == [m1, e1, e2]
assert session.query(Engineer).all() == [e1, e2]
assert session.query(Manager).all() == [m1]
assert session.query(JuniorEngineer).all() == [e2]
m1 = session.query(Manager).one()
session.expire(m1, ["manager_data"])
eq_(m1.manager_data, "knows how to manage things")
row = (
session.query(Engineer.name, Engineer.employee_id)
.filter(Engineer.name == "Kurt")
.first()
)
assert row.name == "Kurt"
assert row.employee_id == e1.employee_id
def test_discrim_bound_param_cloned_ok(self):
"""Test #6824
note this changes a bit with #12395"""
Manager = self.classes.Manager
subq1 = select(Manager.employee_id).label("foo")
subq2 = select(Manager.employee_id).label("bar")
self.assert_compile(
select(subq1, subq2),
"SELECT (SELECT employees.employee_id FROM employees "
"WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS foo, "
"(SELECT employees.employee_id FROM employees "
"WHERE employees.type IN (__[POSTCOMPILE_type_2])) AS bar",
checkparams={"type_1": ["manager"], "type_2": ["manager"]},
)
def test_multi_qualification(self):
Manager, Engineer = (self.classes.Manager, self.classes.Engineer)
session, m1, e1, e2 = self._fixture_one()
ealias = aliased(Engineer)
eq_(
session.query(Manager, ealias).join(ealias, true()).all(),
[(m1, e1), (m1, e2)],
)
eq_(session.query(Manager.name).all(), [("Tom",)])
eq_(
session.query(Manager.name, ealias.name)
.join(ealias, true())
.all(),
[("Tom", "Kurt"), ("Tom", "Ed")],
)
eq_(
session.query(func.upper(Manager.name), func.upper(ealias.name))
.join(ealias, true())
.all(),
[("TOM", "KURT"), ("TOM", "ED")],
)
eq_(
session.query(Manager)
.add_entity(ealias)
.join(ealias, true())
.all(),
[(m1, e1), (m1, e2)],
)
eq_(
session.query(Manager.name)
.add_columns(ealias.name)
.join(ealias, true())
.all(),
[("Tom", "Kurt"), ("Tom", "Ed")],
)
# TODO: I think raise error on this for now
# self.assertEquals(
# session.query(Employee.name, Manager.manager_data,
# Engineer.engineer_info).all(),
# []
# )
def test_column_qualification(self):
Employee, JuniorEngineer, Manager, Engineer = (
self.classes.Employee,
self.classes.JuniorEngineer,
self.classes.Manager,
self.classes.Engineer,
)
session, m1, e1, e2 = self._fixture_one()
m1id, e1id, e2id = m1.employee_id, e1.employee_id, e2.employee_id
def scalar(q):
return [x for x, in q]
eq_(scalar(session.query(Employee.employee_id)), [m1id, e1id, e2id])
eq_(scalar(session.query(Engineer.employee_id)), [e1id, e2id])
eq_(scalar(session.query(Manager.employee_id)), [m1id])
# this currently emits "WHERE type IN (?, ?) AND type IN (?, ?)",
# so no result.
eq_(session.query(Manager.employee_id, Engineer.employee_id).all(), [])
# however, with #12395, a with_polymorphic will merge the IN
# together
wp = with_polymorphic(Employee, [Manager, Engineer])
eq_(
session.query(
wp.Manager.employee_id, wp.Engineer.employee_id
).all(),
[(m1id, m1id), (e1id, e1id), (e2id, e2id)],
)
eq_(scalar(session.query(JuniorEngineer.employee_id)), [e2id])
def test_bundle_qualification(self):
Employee, JuniorEngineer, Manager, Engineer = (
self.classes.Employee,
self.classes.JuniorEngineer,
self.classes.Manager,
self.classes.Engineer,
)
session, m1, e1, e2 = self._fixture_one()
m1id, e1id, e2id = m1.employee_id, e1.employee_id, e2.employee_id
def scalar(q):
return [x[0] for x, in q]
eq_(
scalar(session.query(Bundle("name", Employee.employee_id))),
[m1id, e1id, e2id],
)
eq_(
scalar(session.query(Bundle("name", Engineer.employee_id))),
[e1id, e2id],
)
eq_(scalar(session.query(Bundle("name", Manager.employee_id))), [m1id])
# this currently emits "WHERE type IN (?, ?) AND type IN (?, ?)",
# so no result.
eq_(
session.query(
Bundle("name", Manager.employee_id, Engineer.employee_id)
).all(),
[],
)
# however, with #12395, a with_polymorphic will merge the IN
# together
wp = with_polymorphic(Employee, [Manager, Engineer])
eq_(
session.query(
Bundle("name", wp.Manager.employee_id, wp.Engineer.employee_id)
).all(),
[((m1id, m1id),), ((e1id, e1id),), ((e2id, e2id),)],
)
eq_(
scalar(session.query(Bundle("name", JuniorEngineer.employee_id))),
[e2id],
)
def test_from_subq(self):
Engineer = self.classes.Engineer
stmt = select(Engineer)
subq = aliased(
Engineer,
stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery(),
)
# so here we have an extra "WHERE type in ()", because
# both the inner and the outer queries have the Engineer entity.
# this is expected at the moment but it would be nice if
# _enable_single_crit or something similar could propagate here.
# legacy from_self() takes care of this because it applies
# _enable_single_crit at that moment.
stmt = select(subq).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
stmt,
"SELECT anon_1.employees_employee_id AS "
"anon_1_employees_employee_id, "
"anon_1.employees_name AS "
"anon_1_employees_name, "
"anon_1.employees_manager_data AS "
"anon_1_employees_manager_data, "
"anon_1.employees_engineer_info AS "
"anon_1_employees_engineer_info, "
"anon_1.employees_type AS "
"anon_1_employees_type FROM (SELECT "
"employees.employee_id AS "
"employees_employee_id, employees.name AS "
"employees_name, employees.manager_data AS "
"employees_manager_data, "
"employees.engineer_info AS "
"employees_engineer_info, employees.type "
"AS employees_type FROM employees WHERE "
"employees.type IN (__[POSTCOMPILE_type_1])) AS "
"anon_1 WHERE anon_1.employees_type IN (__[POSTCOMPILE_type_2])",
use_default_dialect=True,
)
def test_select_from_aliased_w_subclass(self):
Engineer = self.classes.Engineer
sess = fixture_session()
a1 = aliased(Engineer)
self.assert_compile(
sess.query(a1.employee_id).select_from(a1),
"SELECT employees_1.employee_id AS employees_1_employee_id "
"FROM employees AS employees_1 WHERE employees_1.type "
"IN (__[POSTCOMPILE_type_1])",
)
self.assert_compile(
sess.query(literal("1")).select_from(a1),
"SELECT :param_1 AS anon_1 FROM employees AS employees_1 "
"WHERE employees_1.type IN (__[POSTCOMPILE_type_1])",
)
@testing.combinations(
(
lambda Engineer, Report: select(Report.report_id)
.select_from(Engineer)
.join(Engineer.reports),
),
(
lambda Engineer, Report: select(Report.report_id).select_from(
orm_join(Engineer, Report, Engineer.reports)
),
),
(
lambda Engineer, Report: select(Report.report_id).join_from(
Engineer, Report, Engineer.reports
),
),
(
lambda Engineer, Report: select(Report.report_id)
.select_from(Engineer)
.join(Report),
),
argnames="stmt_fn",
)
@testing.combinations(True, False, argnames="alias_engineer")
def test_select_col_only_from_w_join(self, stmt_fn, alias_engineer):
"""test #11412 which seems to have been fixed by #10365"""
Engineer = self.classes.Engineer
Report = self.classes.Report
if alias_engineer:
Engineer = aliased(Engineer)
stmt = testing.resolve_lambda(
stmt_fn, Engineer=Engineer, Report=Report
)
if alias_engineer:
self.assert_compile(
stmt,
"SELECT reports.report_id FROM employees AS employees_1 "
"JOIN reports ON employees_1.employee_id = "
"reports.employee_id WHERE employees_1.type "
"IN (__[POSTCOMPILE_type_1])",
)
else:
self.assert_compile(
stmt,
"SELECT reports.report_id FROM employees JOIN reports "
"ON employees.employee_id = reports.employee_id "
"WHERE employees.type IN (__[POSTCOMPILE_type_1])",
)
@testing.combinations(
(
lambda Engineer, Report: select(Report)
.select_from(Engineer)
.join(Engineer.reports),
),
(
lambda Engineer, Report: select(Report).select_from(
orm_join(Engineer, Report, Engineer.reports)
),
),
(
lambda Engineer, Report: select(Report).join_from(
Engineer, Report, Engineer.reports
),
),
argnames="stmt_fn",
)
@testing.combinations(True, False, argnames="alias_engineer")
def test_select_from_w_join_left(self, stmt_fn, alias_engineer):
"""test #8721"""
Engineer = self.classes.Engineer
Report = self.classes.Report
if alias_engineer:
Engineer = aliased(Engineer)
stmt = testing.resolve_lambda(
stmt_fn, Engineer=Engineer, Report=Report
)
if alias_engineer:
self.assert_compile(
stmt,
"SELECT reports.report_id, reports.employee_id, reports.name "
"FROM employees AS employees_1 JOIN reports "
"ON employees_1.employee_id = reports.employee_id "
"WHERE employees_1.type IN (__[POSTCOMPILE_type_1])",
)
else:
self.assert_compile(
stmt,
"SELECT reports.report_id, reports.employee_id, reports.name "
"FROM employees JOIN reports ON employees.employee_id = "
"reports.employee_id "
"WHERE employees.type IN (__[POSTCOMPILE_type_1])",
)
@testing.combinations(
(
lambda Engineer, Report: select(
Report.report_id, Engineer.employee_id
)
.select_from(Engineer)
.join(Engineer.reports),
),
(
lambda Engineer, Report: select(
Report.report_id, Engineer.employee_id
).select_from(orm_join(Engineer, Report, Engineer.reports)),
),
(
lambda Engineer, Report: select(
Report.report_id, Engineer.employee_id
).join_from(Engineer, Report, Engineer.reports),
),
)
def test_select_from_w_join_left_including_entity(self, stmt_fn):
"""test #8721"""
Engineer = self.classes.Engineer
Report = self.classes.Report
stmt = testing.resolve_lambda(
stmt_fn, Engineer=Engineer, Report=Report
)
self.assert_compile(
stmt,
"SELECT reports.report_id, employees.employee_id "
"FROM employees JOIN reports ON employees.employee_id = "
"reports.employee_id "
"WHERE employees.type IN (__[POSTCOMPILE_type_1])",
)
@testing.combinations(
(
lambda Engineer, Report: select(Report).join(
Report.employee.of_type(Engineer)
),
),
(
lambda Engineer, Report: select(Report).select_from(
orm_join(Report, Engineer, Report.employee.of_type(Engineer))
)
),
(
lambda Engineer, Report: select(Report).join_from(
Report, Engineer, Report.employee.of_type(Engineer)
),
),
)
def test_select_from_w_join_right(self, stmt_fn):
"""test #8721"""
Engineer = self.classes.Engineer
Report = self.classes.Report
stmt = testing.resolve_lambda(
stmt_fn, Engineer=Engineer, Report=Report
)
self.assert_compile(
stmt,
"SELECT reports.report_id, reports.employee_id, reports.name "
"FROM reports JOIN employees ON employees.employee_id = "
"reports.employee_id AND employees.type "
"IN (__[POSTCOMPILE_type_1])",
)
def test_from_statement_select(self):
Engineer = self.classes.Engineer
stmt = select(Engineer)
q = select(Engineer).from_statement(stmt)
self.assert_compile(
q,
"SELECT employees.employee_id, employees.name, "
"employees.manager_data, employees.engineer_info, "
"employees.type FROM employees WHERE employees.type "
"IN (__[POSTCOMPILE_type_1])",
)
def test_from_statement_update(self):
"""test #6591"""
Engineer = self.classes.Engineer
from sqlalchemy import update
stmt = (
update(Engineer)
.values(engineer_info="bar")
.returning(Engineer.employee_id)
)
q = select(Engineer).from_statement(stmt)
self.assert_compile(
q,
"UPDATE employees SET engineer_info=:engineer_info "
"WHERE employees.type IN (__[POSTCOMPILE_type_1]) "
"RETURNING employees.employee_id",
dialect="default_enhanced",
)
def test_union_modifiers(self):
Engineer, Manager = self.classes("Engineer", "Manager")
sess = fixture_session()
q1 = sess.query(Engineer).filter(Engineer.engineer_info == "foo")
q2 = sess.query(Manager).filter(Manager.manager_data == "bar")
assert_sql = (
"SELECT anon_1.employees_employee_id AS "
"anon_1_employees_employee_id, "
"anon_1.employees_name AS anon_1_employees_name, "
"anon_1.employees_manager_data AS anon_1_employees_manager_data, "
"anon_1.employees_engineer_info AS anon_1_employees_engineer_info, " # noqa
"anon_1.employees_type AS anon_1_employees_type "
"FROM (SELECT employees.employee_id AS employees_employee_id, "
"employees.name AS employees_name, "
"employees.manager_data AS employees_manager_data, "
"employees.engineer_info AS employees_engineer_info, "
"employees.type AS employees_type FROM employees "
"WHERE employees.engineer_info = :engineer_info_1 "
"AND employees.type IN (__[POSTCOMPILE_type_1]) "
"%(token)s "
"SELECT employees.employee_id AS employees_employee_id, "
"employees.name AS employees_name, "
"employees.manager_data AS employees_manager_data, "
"employees.engineer_info AS employees_engineer_info, "
"employees.type AS employees_type FROM employees "
"WHERE employees.manager_data = :manager_data_1 "
"AND employees.type IN (__[POSTCOMPILE_type_2])) AS anon_1"
)
for meth, token in [
(q1.union, "UNION"),
(q1.union_all, "UNION ALL"),
(q1.except_, "EXCEPT"),
(q1.except_all, "EXCEPT ALL"),
(q1.intersect, "INTERSECT"),
(q1.intersect_all, "INTERSECT ALL"),
]:
self.assert_compile(
meth(q2),
assert_sql % {"token": token},
checkparams={
"engineer_info_1": "foo",
"type_1": ["engineer", "juniorengineer"],
"manager_data_1": "bar",
"type_2": ["manager"],
},
)
def test_having(self):
Engineer, Manager = self.classes("Engineer", "Manager")
sess = fixture_session()
self.assert_compile(
sess.query(Engineer)
.group_by(Engineer.employee_id)
.having(Engineer.name == "js"),
"SELECT employees.employee_id AS employees_employee_id, "
"employees.name AS employees_name, employees.manager_data "
"AS employees_manager_data, employees.engineer_info "
"AS employees_engineer_info, employees.type AS employees_type "
"FROM employees WHERE employees.type IN (__[POSTCOMPILE_type_1]) "
"GROUP BY employees.employee_id HAVING employees.name = :name_1",
)
def test_select_from_count(self):
Manager, Engineer = (self.classes.Manager, self.classes.Engineer)
sess = fixture_session()
m1 = Manager(name="Tom", manager_data="data1")
e1 = Engineer(name="Kurt", engineer_info="knows how to hack")
sess.add_all([m1, e1])
sess.flush()
eq_(sess.query(func.count(1)).select_from(Manager).all(), [(1,)])
def test_select_from_subquery(self):
Manager, JuniorEngineer, employees, Engineer = (
self.classes.Manager,
self.classes.JuniorEngineer,
self.tables.employees,
self.classes.Engineer,
)
sess = fixture_session()
m1 = Manager(name="Tom", manager_data="data1")
m2 = Manager(name="Tom2", manager_data="data2")
e1 = Engineer(name="Kurt", engineer_info="knows how to hack")
e2 = JuniorEngineer(name="Ed", engineer_info="oh that ed")
sess.add_all([m1, m2, e1, e2])
sess.flush()
ma = aliased(
Manager,
employees.select()
.where(employees.c.type == "manager")
.order_by(employees.c.employee_id)
.limit(10)
.subquery(),
)
eq_(
sess.query(ma).all(),
[m1, m2],
)
def test_select_from_subquery_with_composed_union(self):
Report, reports, Manager, JuniorEngineer, employees, Engineer = (
self.classes.Report,
self.tables.reports,
self.classes.Manager,
self.classes.JuniorEngineer,
self.tables.employees,
self.classes.Engineer,
)
sess = fixture_session()
r1, r2, r3, r4 = (
Report(name="r1"),
Report(name="r2"),
Report(name="r3"),
Report(name="r4"),
)
m1 = Manager(name="manager1", manager_data="data1", reports=[r1])
m2 = Manager(name="manager2", manager_data="data2", reports=[r2])
e1 = Engineer(name="engineer1", engineer_info="einfo1", reports=[r3])
e2 = JuniorEngineer(
name="engineer2", engineer_info="einfo2", reports=[r4]
)
sess.add_all([m1, m2, e1, e2])
sess.flush()
stmt = select(reports, employees).select_from(
reports.outerjoin(
employees,
and_(
employees.c.employee_id == reports.c.employee_id,
employees.c.type == "manager",
),
)
)
subq = stmt.subquery()
ra = aliased(Report, subq)
# this test previously used select_entity_from(). the standard
# conversion to use aliased() needs to be adjusted to be against
# Employee, not Manager, otherwise the ORM will add the manager single
# inh criteria to the outside which will break the outer join
ma = aliased(Employee, subq)
eq_(
sess.query(ra, ma).order_by(ra.name).all(),
[(r1, m1), (r2, m2), (r3, None), (r4, None)],
)
# however if someone really wants to run that SELECT statement and
# get back these two entities, they can use from_statement() more
# directly. in 1.4 we don't even need tablename label style for the
# select(), automatic disambiguation works great
eq_(
sess.query(Report, Manager)
.from_statement(stmt.order_by(reports.c.name))
.all(),
[(r1, m1), (r2, m2), (r3, None), (r4, None)],
)
def test_count(self):
Employee = self.classes.Employee
JuniorEngineer = self.classes.JuniorEngineer
Manager = self.classes.Manager
Engineer = self.classes.Engineer
sess = fixture_session()
m1 = Manager(name="Tom", manager_data="data1")
m2 = Manager(name="Tom2", manager_data="data2")
e1 = Engineer(name="Kurt", engineer_info="data3")
e2 = JuniorEngineer(name="marvin", engineer_info="data4")
sess.add_all([m1, m2, e1, e2])
sess.flush()
eq_(sess.query(Manager).count(), 2)
eq_(sess.query(Engineer).count(), 2)
eq_(sess.query(Employee).count(), 4)
eq_(sess.query(Manager).filter(Manager.name.like("%m%")).count(), 2)
eq_(sess.query(Employee).filter(Employee.name.like("%m%")).count(), 3)
def test_exists_standalone(self):
Engineer = self.classes.Engineer
sess = fixture_session()
self.assert_compile(
sess.query(
sess.query(Engineer).filter(Engineer.name == "foo").exists()
),
"SELECT EXISTS (SELECT 1 FROM employees WHERE "
"employees.name = :name_1 AND employees.type "
"IN (__[POSTCOMPILE_type_1])) AS anon_1",
)
def test_type_filtering(self):
Report, Manager, Engineer = (
self.classes.Report,
self.classes.Manager,
self.classes.Engineer,
)
sess = fixture_session()
m1 = Manager(name="Tom", manager_data="data1")
r1 = Report(employee=m1)
sess.add_all([m1, r1])
sess.flush()
rq = sess.query(Report)
assert (
len(rq.filter(Report.employee.of_type(Manager).has()).all()) == 1
)
assert (
len(rq.filter(Report.employee.of_type(Engineer).has()).all()) == 0
)
def test_type_joins(self):
Report, Manager, Engineer = (
self.classes.Report,
self.classes.Manager,
self.classes.Engineer,
)
sess = fixture_session()
m1 = Manager(name="Tom", manager_data="data1")
r1 = Report(employee=m1)
sess.add_all([m1, r1])
sess.flush()
rq = sess.query(Report)
assert len(rq.join(Report.employee.of_type(Manager)).all()) == 1
assert len(rq.join(Report.employee.of_type(Engineer)).all()) == 0
| SingleInheritanceTest |
python | spack__spack | lib/spack/spack/test/error_messages.py | {
"start": 1473,
"end": 1595
} | class ____(Package):
version("3.5")
version("3.4")
depends_on("y4@4.0")
""",
)
_pkgy4 = (
"y4",
"""\
| Y3 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/dataform.py | {
"start": 1231,
"end": 4811
} | class ____(BaseSensorOperator):
"""
Checks for the status of a Workflow Invocation in Google Cloud Dataform.
:param project_id: Required, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param region: Required, The location of the Dataform workflow invocation (for example europe-west1).
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required, ID of the workflow invocation to be checked.
:param expected_statuses: The expected state of the operation.
See:
https://cloud.google.com/python/docs/reference/dataform/latest/google.cloud.dataform_v1beta1.types.WorkflowInvocation.State
:param failure_statuses: State that will terminate the sensor with an exception
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("workflow_invocation_id",)
def __init__(
self,
*,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
expected_statuses: set[int] | int,
failure_statuses: Iterable[int] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.repository_id = repository_id
self.workflow_invocation_id = workflow_invocation_id
self.expected_statuses = (
{expected_statuses} if isinstance(expected_statuses, int) else expected_statuses
)
self.failure_statuses = failure_statuses
self.project_id = project_id
self.region = region
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataformHook | None = None
def poke(self, context: Context) -> bool:
self.hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
workflow_invocation = self.hook.get_workflow_invocation(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=self.workflow_invocation_id,
)
workflow_status = workflow_invocation.state
if workflow_status is not None:
if self.failure_statuses and workflow_status in self.failure_statuses:
message = (
f"Workflow Invocation with id '{self.workflow_invocation_id}' "
f"state is: {workflow_status}. Terminating sensor..."
)
raise AirflowException(message)
return workflow_status in self.expected_statuses
| DataformWorkflowInvocationStateSensor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 392472,
"end": 392798
} | class ____(sgqlc.types.Interface):
"""Entities that can be deleted."""
__schema__ = github_schema
__field_names__ = ("viewer_can_delete",)
viewer_can_delete = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanDelete")
"""Check if the current viewer can delete this object."""
| Deletable |
python | walkccc__LeetCode | solutions/3104. Find Longest Self-Contained Substring/3104.py | {
"start": 0,
"end": 1275
} | class ____:
def maxSubstringLength(self, s: str) -> int:
allCount = collections.Counter(s)
# Similar to 395. Longest Substring with At Least K Repeating Characters
def maxSubstringLengthWithNUniqueLetters(n: int) -> int:
res = -1
# the number of unique letters
uniqueLetters = 0
# the number of letters that have all their frequency in the substring
lettersHavingAllFreq = 0
count = collections.Counter()
l = 0
for r, c in enumerate(s):
count[c] += 1
if count[c] == 1:
uniqueLetters += 1
if count[c] == allCount[c]:
lettersHavingAllFreq += 1
while uniqueLetters > n:
if count[s[l]] == allCount[s[l]]:
lettersHavingAllFreq -= 1
count[s[l]] -= 1
if count[s[l]] == 0:
uniqueLetters -= 1
l += 1
# Since both the number of unique letters and the number of letters
# having all their frequency are equal to n, this is a valid window.
# Implcit: uniqueLetters == n
if lettersHavingAllFreq == n and r - l + 1 < len(s):
res = max(res, r - l + 1)
return res
return max(maxSubstringLengthWithNUniqueLetters(n)
for n in range(1, 27))
| Solution |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/build_warnings/package.py | {
"start": 229,
"end": 1716
} | class ____(Package):
"""This package's install fails but only emits warnings."""
homepage = "http://www.example.com/trivial_install"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
if sys.platform == "win32":
with open("configure.bat", "w", encoding="utf-8") as f:
f.write(
"""
@ECHO off
ECHO 'checking for gcc... /Users/gamblin2/src/spack/lib/spack/env/clang/clang'
ECHO 'checking whether the C compiler works... yes'
ECHO 'checking for C compiler default output file name... a.out'
ECHO 'WARNING: ALL CAPITAL WARNING!'
ECHO 'checking for suffix of executables...'
ECHO 'foo.c:89: warning: some weird warning!'
EXIT /B 1
"""
)
Executable("configure.bat")("--prefix=%s" % self.prefix)
else:
with open("configure", "w", encoding="utf-8") as f:
f.write(
"""#!/bin/sh\n
echo 'checking for gcc... /Users/gamblin2/src/spack/lib/spack/env/clang/clang'
echo 'checking whether the C compiler works... yes'
echo 'checking for C compiler default output file name... a.out'
echo 'WARNING: ALL CAPITAL WARNING!'
echo 'checking for suffix of executables...'
echo 'foo.c:89: warning: some weird warning!'
exit 1
"""
)
configure()
| BuildWarnings |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/utils/filesystem.py | {
"start": 409,
"end": 3094
} | class ____(FileSystemEventHandler):
"""Basic handler that clears the screen and re-executes the callback when any of the watched paths change
in a way that would affect the hash of the paths. Passes the new hash to the callback.
"""
def __init__(
self,
paths: Sequence[Path],
includes: Optional[Sequence[str]],
excludes: Sequence[str],
callback: Callable[[str], Any],
):
self._callback = callback
self._paths = paths
self._includes = includes
self._excludes = excludes
self._prev_hash = hash_paths(
self._paths, self._includes, self._excludes, error_on_missing=False
)
self.clear_and_execute(self._prev_hash)
def dispatch(self, _event: FileSystemEvent): # pyright: ignore[reportIncompatibleMethodOverride]
new_hash = hash_paths(self._paths, self._includes, self._excludes, error_on_missing=False)
if new_hash != self._prev_hash:
self.clear_and_execute(new_hash)
self._prev_hash = new_hash
def clear_and_execute(self, new_hash: str):
clear_screen()
self._callback(new_hash)
current_time = datetime.datetime.now().strftime("%H:%M:%S")
print(f"\nUpdated at {current_time}, watching for changes...") # noqa: T201
# This is a global variable that is used to signal the watcher to exit in tests
SHOULD_WATCHER_EXIT = False
def watch_paths(
paths: Sequence[Path],
callback: Callable[[str], Any],
includes: Optional[Sequence[str]] = None,
excludes: Sequence[str] = DEFAULT_FILE_EXCLUDE_PATTERNS,
):
"""Watches the given paths for changes and calls the callback when they change.
The callback should take a single argument, the new hash of the paths.
Runs synchronously until the observer is stopped, or keyboard interrupt is received.
Args:
paths: The paths to watch.
callback: The callback to call when path contents change.
includes: A list of glob patterns to target, excluding files that don't match any of the patterns.
excludes: A list of glob patterns to exclude, including files that match any of the patterns. Defaults to
various Python-related files that are not relevant to the contents of the project.
"""
observer = Observer()
handler = PathChangeHandler(paths, includes, excludes, callback)
for path in paths:
observer.schedule(handler, str(path), recursive=True)
observer.start()
try:
while observer.is_alive() and not SHOULD_WATCHER_EXIT:
time.sleep(0.5)
finally:
observer.stop()
observer.join()
| PathChangeHandler |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 12696,
"end": 14220
} | class ____(TestCase, BaseMetricsTestCase):
def setUp(self) -> None:
super().setUp()
self.now = timezone.now().replace(minute=0, second=0, microsecond=0)
for _ in range(2):
self.store_session(self.build_session(status="exited"))
self.dataset = Dataset.Metrics
def test_sessions(self) -> None:
incident = self.create_incident(
date_started=self.now - timedelta(minutes=120), query="", projects=[self.project]
)
alert_rule = self.create_alert_rule(
self.organization,
[self.project],
query="",
time_window=1,
dataset=self.dataset,
aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate",
)
incident.update(alert_rule=alert_rule)
snuba_query = incident.alert_rule.snuba_query
project_ids = list(
IncidentProject.objects.filter(incident=incident).values_list("project_id", flat=True)
)
params = GetMetricIssueAggregatesParams(
snuba_query=snuba_query,
date_started=incident.date_started,
current_end_date=incident.current_end_date,
organization=incident.organization,
project_ids=project_ids,
)
incident_aggregates = get_metric_issue_aggregates(params)
assert "count" in incident_aggregates
assert incident_aggregates["count"] == 100.0
@freeze_time()
| GetCrashRateMetricsIncidentAggregatesTest |
python | apache__airflow | airflow-ctl/src/airflowctl/exceptions.py | {
"start": 1137,
"end": 1278
} | class ____(AirflowCtlException):
"""Raise when the requested object/resource is not available in the system."""
| AirflowCtlNotFoundException |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/powerbi.py | {
"start": 1076,
"end": 1236
} | class ____(Enum):
"""Power BI refresh dataset details."""
REQUEST_ID = "request_id"
STATUS = "status"
ERROR = "error"
| PowerBIDatasetRefreshFields |
python | apache__airflow | providers/opsgenie/src/airflow/providers/opsgenie/typing/opsgenie.py | {
"start": 938,
"end": 2575
} | class ____(TypedDict):
"""
Payload schema for creating an Opsgenie alert.
:param message: The Message of the Opsgenie alert.
:param alias: Client-defined identifier of the alert.
:param description: Description field of the alert.
:param responders: Teams, users, escalations and schedules that
the alert will be routed to send notifications.
:param visible_to: Teams and users that the alert will become visible
to without sending any notification.
:param actions: Custom actions that will be available for the alert.
:param tags: Tags of the alert.
:param details: Map of key-value pairs to use as custom properties of the alert.
:param entity: Entity field of the alert that is
generally used to specify which domain alert is related to.
:param source: Source field of the alert. Default value is
IP address of the incoming request.
:param priority: Priority level of the alert. Default value is P3.
:param user: Display name of the request owner.
:param note: Additional note that will be added while creating the alert.
"""
message: Required[str]
alias: NotRequired[str | None]
description: NotRequired[str | None]
responders: NotRequired[list[dict] | None]
visible_to: NotRequired[list[dict] | None]
actions: NotRequired[list[str] | None]
tags: NotRequired[list[str] | None]
details: NotRequired[dict | None]
entity: NotRequired[str | None]
source: NotRequired[str | None]
priority: NotRequired[str | None]
user: NotRequired[str | None]
note: NotRequired[str | None]
| CreateAlertPayload |
python | huggingface__transformers | src/transformers/models/hgnet_v2/modeling_hgnet_v2.py | {
"start": 11065,
"end": 12136
} | class ____(nn.Module):
def __init__(self, config: HGNetV2Config):
super().__init__()
self.stages = nn.ModuleList([])
for stage_index in range(len(config.stage_in_channels)):
resnet_stage = HGNetV2Stage(config, stage_index)
self.stages.append(resnet_stage)
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_state,
hidden_states=hidden_states,
)
| HGNetV2Encoder |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 47020,
"end": 47396
} | class ____(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
| E17 |
python | django__django | django/contrib/auth/admin.py | {
"start": 1604,
"end": 10163
} | class ____(admin.ModelAdmin):
add_form_template = "admin/auth/user/add_form.html"
change_user_password_template = None
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name", "email")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": ("username", "usable_password", "password1", "password2"),
},
),
)
form = UserChangeForm
add_form = AdminUserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ("username", "email", "first_name", "last_name", "is_staff")
list_filter = ("is_staff", "is_superuser", "is_active", "groups")
search_fields = ("username", "first_name", "last_name", "email")
ordering = ("username",)
filter_horizontal = (
"groups",
"user_permissions",
)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super().get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults["form"] = self.add_form
defaults.update(kwargs)
return super().get_form(request, obj, **defaults)
def get_urls(self):
return [
path(
"<id>/password/",
self.admin_site.admin_view(self.user_change_password),
name="auth_user_password_change",
),
*super().get_urls(),
]
def lookup_allowed(self, lookup, value, request):
# Don't allow lookups involving passwords.
return not lookup.startswith("password") and super().lookup_allowed(
lookup, value, request
)
@method_decorator([sensitive_post_parameters(), csrf_protect])
def add_view(self, request, form_url="", extra_context=None):
if request.method in ("GET", "HEAD", "OPTIONS", "TRACE"):
return self._add_view(request, form_url, extra_context)
with transaction.atomic(using=router.db_for_write(self.model)):
return self._add_view(request, form_url, extra_context)
def _add_view(self, request, form_url="", extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
"order to add users, Django requires that your user "
'account have both the "Add user" and "Change user" '
"permissions set."
)
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.opts.get_field(self.model.USERNAME_FIELD)
defaults = {
"auto_populated_fields": (),
"username_help_text": username_field.help_text,
}
extra_context.update(defaults)
return super().add_view(request, form_url, extra_context)
@method_decorator(sensitive_post_parameters())
def user_change_password(self, request, id, form_url=""):
user = self.get_object(request, unquote(id))
if not self.has_change_permission(request, user):
raise PermissionDenied
if user is None:
raise Http404(
_("%(name)s object with primary key %(key)r does not exist.")
% {
"name": self.opts.verbose_name,
"key": escape(id),
}
)
if request.method == "POST":
form = self.change_password_form(user, request.POST)
if form.is_valid():
# If disabling password-based authentication was requested
# (via the form field `usable_password`), the submit action
# must be "unset-password". This check is most relevant when
# the admin user has two submit buttons available (for example
# when Javascript is disabled).
valid_submission = (
form.cleaned_data["set_usable_password"]
or "unset-password" in request.POST
)
if not valid_submission:
msg = gettext("Conflicting form data submitted. Please try again.")
messages.error(request, msg)
return HttpResponseRedirect(request.get_full_path())
user = form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
if user.has_usable_password():
msg = gettext("Password changed successfully.")
else:
msg = gettext("Password-based authentication was disabled.")
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(
reverse(
"%s:%s_%s_change"
% (
self.admin_site.name,
user._meta.app_label,
user._meta.model_name,
),
args=(user.pk,),
)
)
else:
form = self.change_password_form(user)
fieldsets = [(None, {"fields": list(form.base_fields)})]
admin_form = admin.helpers.AdminForm(form, fieldsets, {})
if user.has_usable_password():
title = _("Change password: %s")
else:
title = _("Set password: %s")
context = {
"title": title % escape(user.get_username()),
"adminForm": admin_form,
"form_url": form_url,
"form": form,
"is_popup": (IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET),
"is_popup_var": IS_POPUP_VAR,
"add": True,
"change": False,
"has_delete_permission": False,
"has_change_permission": True,
"has_absolute_url": False,
"opts": self.opts,
"original": user,
"save_as": False,
"show_save": True,
**self.admin_site.each_context(request),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.change_user_password_template
or "admin/auth/user/change_password.html",
context,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if "_addanother" not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST = request.POST.copy()
request.POST["_continue"] = 1
return super().response_add(request, obj, post_url_continue)
| UserAdmin |
python | google__jax | tests/pmap_test.py | {
"start": 112049,
"end": 118090
} | class ____(jtu.JaxTestCase):
def testShardsPerAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=map(pxla.ShardedAxis, (0, 1)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(0,4)),
(slice(2,4), slice(4,8))))
def testShardedAxisPermutation(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=map(pxla.ShardedAxis, (1, 0)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(2,4), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(4,8))))
def testShardedAxisPermutationAndReplication(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=(pxla.Replicated(2),
pxla.ShardedAxis(1),
pxla.ShardedAxis(0)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(2,4), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(4,8))) * 2)
def testUnshardedAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Chunked([2]), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((slice(0,2), slice(None)),
(slice(2,4), slice(None))))
def testNoSharding(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.NoSharding()),
mesh_mapping=())
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((slice(None), slice(None)),))
def testUnmaterializedAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(4), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((0, slice(None)),
(1, slice(None)),
(2, slice(None)),
(3, slice(None))))
shape = (2, 2)
spec = pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.Unstacked(2)),
mesh_mapping=(pxla.ShardedAxis(0),))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((slice(None), 0),
(slice(None), 1)))
def testReplicationAfterUnsharded(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0), pxla.Replicated(3)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
tuple([(0, slice(None))] * 3 + [(1, slice(None))] * 3))
def testReplicationPosition2(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.Chunked([2])),
mesh_mapping=(pxla.ShardedAxis(0), pxla.ShardedAxis(1), pxla.Replicated(3)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((0, slice(0, 4)), (0, slice(0, 4)), (0, slice(0, 4)),
(0, slice(4, 8)), (0, slice(4, 8)), (0, slice(4, 8)),
(1, slice(0, 4)), (1, slice(0, 4)), (1, slice(0, 4)),
(1, slice(4, 8)), (1, slice(4, 8)), (1, slice(4, 8))))
def testReplicationPosition1(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.Chunked([2])),
mesh_mapping=(pxla.ShardedAxis(0), pxla.Replicated(3), pxla.ShardedAxis(1)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((0, slice(0, 4)), (0, slice(4, 8)),
(0, slice(0, 4)), (0, slice(4, 8)),
(0, slice(0, 4)), (0, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8))))
def testReplicationPosition0(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.NoSharding()),
mesh_mapping=(pxla.Replicated(3), pxla.ShardedAxis(0)))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
tuple([(0, slice(None)), (1, slice(None))] * 3))
def testMultipleReplications(self):
shape = (2, 7, 4)
spec = pxla.ShardingSpec(
sharding=(pxla.Unstacked(2), pxla.NoSharding(), pxla.Chunked([2])),
mesh_mapping=(pxla.Replicated(3), pxla.Replicated(2),
pxla.ShardedAxis(0), pxla.Replicated(2),
pxla.ShardedAxis(1)))
self.assertEqual(
sharding_specs.spec_to_indices(shape, spec),
((0, slice(None), slice(0, 2)), (0, slice(None), slice(2, 4)),
(0, slice(None), slice(0, 2)), (0, slice(None), slice(2, 4)),
(1, slice(None), slice(0, 2)), (1, slice(None), slice(2, 4)),
(1, slice(None), slice(0, 2)), (1, slice(None), slice(2, 4))) * 3 * 2)
def testReplicatedScalar(self):
shape = ()
spec = pxla.ShardingSpec(sharding=(),
mesh_mapping=(pxla.Replicated(3),))
self.assertEqual(sharding_specs.spec_to_indices(shape, spec),
((), (), ()))
def _spec_str(spec):
return (f"({spec.sharding},"
f"{spec.mesh_mapping},)")
@jtu.pytest_mark_if_available('multiaccelerator')
| SpecToIndicesTest |
python | pytorch__pytorch | test/test_custom_ops.py | {
"start": 2970,
"end": 17800
} | class ____(CustomOpTestCaseBase):
@parametrize("check_gradients", (False, "auto"))
@parametrize("dynamic", (True, False))
def test_aot_autograd_check_degenerate_cases(
self, device, dynamic, check_gradients
):
def simple(x):
return x.clone()
# Should not raise
x = torch.randn(3, device=device)
optests.aot_autograd_check(
simple, (x,), {}, dynamic=dynamic, check_gradients=check_gradients
)
def outputs_dont_require_grad(x):
return x.detach()
# Should not raise
y = torch.randn(3, device=device, requires_grad=True)
optests.aot_autograd_check(
simple, (y,), {}, dynamic=dynamic, check_gradients=check_gradients
)
def no_outputs(x):
return x.detach()
# Should not raise
x = torch.randn(3, device=device, requires_grad=True)
y = torch.randn(3, device=device, requires_grad=False)
optests.aot_autograd_check(
no_outputs, (x,), {}, dynamic=dynamic, check_gradients=check_gradients
)
optests.aot_autograd_check(
no_outputs, (y,), {}, dynamic=dynamic, check_gradients=check_gradients
)
def test_incorrect_schema_mutation(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
guard = torch._C._AutoDispatchBelowAutograd()
try:
return op(x)
finally:
del guard
@staticmethod
def backward(ctx, gx):
return gx
def foo_impl(x):
x.sin_()
return x.clone()
lib.impl("foo", Foo.apply, "Autograd")
lib.impl("foo", foo_impl, "CPU")
lib.impl("foo", foo_impl, "CUDA")
lib.impl("foo", foo_impl, "XPU")
x = torch.tensor(3.14159 / 3, requires_grad=True, device=device)
with self.assertRaisesRegex(
optests.OpCheckError, "Argument x is not defined as mutable but was mutated"
):
torch.library.opcheck(op, (x,), {})
def test_incorrect_schema_view(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# Emulate AutoDispatchBelowADInplaceOrView, which is not bound into python
with torch._C._AutoDispatchBelowAutograd():
with torch._C._ExcludeDispatchKeyGuard(
torch._C.DispatchKeySet(torch._C.DispatchKey.ADInplaceOrView)
):
return op(x)
@staticmethod
def backward(ctx, gx):
return gx
def foo_impl(x):
return x.view_as(x)
def foo_meta(x):
return x.view_as(x)
lib.impl("foo", Foo.apply, "Autograd")
lib.impl("foo", foo_impl, "CPU")
lib.impl("foo", foo_meta, "Meta")
x = torch.tensor(3.14159 / 3, requires_grad=True)
with self.assertRaisesRegex(
optests.OpCheckError,
"Argument x is not defined to alias output but was aliasing",
):
torch.library.opcheck(op, (x,), {})
# https://github.com/pytorch/pytorch/issues/142410
def test_opcheck_unbacked_stride(self, device):
@torch.library.custom_op("test::f", mutates_args=[])
def f(x: torch.Tensor) -> torch.Tensor:
return x.new_zeros((x.size(0), 18))
@f.register_fake
def _(x: torch.Tensor) -> torch.Tensor:
ctx = torch.library.get_ctx()
s = ctx.new_dynamic_size()
return torch.empty(x.shape[0], s, device=x.device, dtype=x.dtype)
example = torch.zeros([10, 20], device=device)
torch.library.opcheck(f, args=[example])
# https://github.com/pytorch/pytorch/issues/150472
def test_single_element_tuple_output(self, device):
# Helper function to register id_tuple custom and the fake tensor implementation
# so that Dynamo has the fake tensor implementation
def get_id_tuple():
@torch.library.custom_op("test::id_tuple", mutates_args=[])
def id_tuple(x: torch.Tensor) -> Tuple[torch.Tensor]:
return (x.clone(),)
@id_tuple.register_fake
def _(
x: torch.Tensor,
) -> Tuple[torch.Tensor]:
return (x.clone(),)
return id_tuple
id_tuple = get_id_tuple()
x = torch.randn(3, device=device)
ret = id_tuple(x)
# Check if ret is a tuple and has exactly one and the same element
self.assertIsInstance(ret, tuple)
self.assertEqual(len(ret), 1)
self.assertEqual(x, ret[0])
def test_missing_abstract_impl(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
with torch._C._AutoDispatchBelowAutograd():
return op(x)
@staticmethod
def backward(ctx, gx):
return 2 * gx
def foo_impl(x):
return torch.tensor(x.cpu().numpy() ** 2, device=x.device)
lib.impl("foo", Foo.apply, "Autograd")
lib.impl("foo", foo_impl, "CPU")
lib.impl("foo", foo_impl, "CUDA")
lib.impl("foo", foo_impl, "XPU")
x = torch.tensor([0, 1.0], requires_grad=True)
with self.assertRaisesRegex(
optests.OpCheckError,
"_test_custom_op.foo.default",
):
torch.library.opcheck(op, (x,), {})
@skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
def test_incorrect_abstract_impl(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# Emulate AutoDispatchBelowADInplaceOrView, which is not bound into python
guard = torch._C._AutoDispatchBelowAutograd()
guard2 = torch._C.ExcludeDispatchKeyGuard(
torch._C.DispatchKeySet(torch._C.DispatchKey.ADInplaceOrView)
)
try:
return op(x)
finally:
del guard
del guard2
@staticmethod
def backward(ctx, gx):
return gx
def foo_impl(x):
return x**2
def foo_meta(x):
return x.unsqueeze(1) ** 2
lib.impl("foo", Foo.apply, "Autograd")
lib.impl("foo", foo_impl, "CPU")
lib.impl("foo", foo_impl, "CUDA")
lib.impl("foo", foo_impl, "XPU")
lib.impl("foo", foo_meta, "Meta")
x = torch.tensor([0, 1.0], requires_grad=True)
with self.assertRaisesRegex(optests.OpCheckError, "Shapes .* are not equal"):
torch.library.opcheck(op, (x,), {})
def test_missing_functionalization(self, device):
lib = self.lib()
lib.define("foo(Tensor(a!) x) -> Tensor(a!)")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.mark_dirty(x)
with torch._C._AutoDispatchBelowAutograd():
return op(x)
@staticmethod
def backward(ctx, gx):
return gx
def foo_impl(x):
return x.sin_()
def foo_meta(x):
return x
lib.impl("foo", Foo.apply, "Autograd")
lib.impl("foo", foo_impl, "CPU")
lib.impl("foo", foo_impl, "CUDA")
lib.impl("foo", foo_impl, "XPU")
lib.impl("foo", foo_meta, "Meta")
x = torch.tensor([0, 1.0])
y = x.clone()
with self.assertRaisesRegex(
optests.OpCheckError,
"We only support functionalizing operators whose outputs do not have alias annotations",
):
torch.library.opcheck(op, (y,), {})
def test_autograd_registered_at_backend(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gx):
return gx * 0.5
lib.impl("foo", Foo.apply, "CPU")
lib.impl("foo", Foo.apply, "CUDA")
lib.impl("foo", Foo.apply, "XPU")
lib.impl("foo", lambda x: x.clone(), "Meta")
x = torch.randn([], requires_grad=True)
with self.assertRaisesRegex(
torch.testing._internal.optests.OpCheckError,
"does not have an autograd kernel",
):
torch.library.opcheck(op, (x,), {})
# I'm not sure why this is necessary
del lib
def test_global_state_mutation(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
invoked = 0
@staticmethod
def forward(ctx, x):
Foo.invoked += 1
return x.clone() * Foo.invoked
@staticmethod
def backward(ctx, gx):
return gx
lib.impl("foo", Foo.apply, "CompositeImplicitAutograd")
x = torch.tensor(3.14159 / 3, requires_grad=True)
with self.assertRaisesRegex(
optests.OpCheckError, "eager-mode PyTorch vs AOTDispatcher"
):
torch.library.opcheck(op, (x,), {})
# Test that we can actually see the absolute difference numbers
try:
torch.library.opcheck(op, (x,), {})
except optests.OpCheckError as err:
orig = err.__context__.__context__
self.assertIn("Absolute difference:", str(orig))
# Test atol/rtol overrides
torch.library.opcheck(op, (x,), {}, atol=3, rtol=0.01)
@ops(custom_op_db.custom_op_db, dtypes=OpDTypes.any_one)
def test_opcheck_opinfo(self, device, dtype, op):
for sample_input in op.sample_inputs(
device, dtype, requires_grad=op.supports_autograd
):
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
torch.library.opcheck(op.op, args, kwargs)
def test_opcheck_fails_basic(self, device):
@custom_op(f"{self.test_ns}::foo")
def foo(x: torch.Tensor) -> torch.Tensor: ...
@foo.impl(["cpu", "cuda"])
def foo_impl(x):
return x.sum()
x = torch.randn(3, device=device, requires_grad=True)
# Triggers the CustomOp autograd NYI error
with self.assertRaisesRegex(
optests.OpCheckError, "Autograd has not been implemented for operator"
):
torch.library.opcheck(self.get_op(f"{self.test_ns}::foo"), (x,), {})
def test_autograd_registration_check_autograd_kernel(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
with torch._C._AutoDispatchBelowAutograd():
return op(x)
@staticmethod
def backward(ctx, gx):
return gx
def foo_impl(x):
return x.sin()
lib.impl("foo", Foo.apply, "Autograd")
lib.impl("foo", foo_impl, "CPU")
lib.impl("foo", foo_impl, "CUDA")
lib.impl("foo", foo_impl, "XPU")
x = torch.randn(3, requires_grad=True, device=device)
# Should not raise
optests.autograd_registration_check(op, (x,), {})
def test_autograd_registration_check_compositeimplicitautograd(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
def foo_impl(x):
return x.sin().cos()
lib.impl("foo", foo_impl, "CompositeImplicitAutograd")
x = torch.randn(3, requires_grad=True, device=device)
# Should not raise
optests.autograd_registration_check(op, (x,), {})
def test_autograd_registration_check_incorrect_composite(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
def foo_impl(x):
return x.sin().cos()
lib.impl("foo", foo_impl, "CompositeExplicitAutograd")
x = torch.randn(3, requires_grad=True, device=device)
with self.assertRaisesRegex(AssertionError, "incorrectly registered"):
optests.autograd_registration_check(op, (x,), {})
def test_autograd_registration_check_incorrect(self, device):
lib = self.lib()
lib.define("foo(Tensor x) -> Tensor")
op = self.ns().foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return torch.sin(x)
@staticmethod
def backward(ctx, gx):
return gx
lib.impl("foo", Foo.apply, "CPU")
lib.impl("foo", Foo.apply, "CUDA")
lib.impl("foo", Foo.apply, "XPU")
x = torch.randn(3, requires_grad=True, device=device)
with self.assertRaisesRegex(AssertionError, "incorrectly registered"):
optests.autograd_registration_check(op, (x,), {})
def test_assert_raises_regex(self, device):
from torch.testing._internal.optests.aot_autograd import assert_raises_regex
with assert_raises_regex(RuntimeError, "c"):
raise RuntimeError("abcd")
with assert_raises_regex(RuntimeError, "c.*"):
raise RuntimeError("abcd")
with self.assertRaisesRegex(AssertionError, "instead got"):
with assert_raises_regex(RuntimeError, "c.*"):
raise ValueError("abcd")
with self.assertRaisesRegex(AssertionError, "Expected exception"):
with assert_raises_regex(RuntimeError, "c.*"):
pass
with self.assertRaisesRegex(AssertionError, "to match regex"):
with assert_raises_regex(RuntimeError, "f"):
raise RuntimeError("abcd")
| TestCustomOpTesting |
python | python__mypy | mypyc/ir/func_ir.py | {
"start": 1838,
"end": 3925
} | class ____:
"""Signature of a function in IR."""
# TODO: Track if method?
def __init__(self, args: Sequence[RuntimeArg], ret_type: RType) -> None:
self.args = tuple(args)
self.ret_type = ret_type
# Bitmap arguments are use to mark default values for arguments that
# have types with overlapping error values.
self.num_bitmap_args = num_bitmap_args(self.args)
if self.num_bitmap_args:
extra = [
RuntimeArg(bitmap_name(i), bitmap_rprimitive, pos_only=True)
for i in range(self.num_bitmap_args)
]
self.args = self.args + tuple(reversed(extra))
def real_args(self) -> tuple[RuntimeArg, ...]:
"""Return arguments without any synthetic bitmap arguments."""
if self.num_bitmap_args:
return self.args[: -self.num_bitmap_args]
return self.args
def bound_sig(self) -> FuncSignature:
if self.num_bitmap_args:
return FuncSignature(self.args[1 : -self.num_bitmap_args], self.ret_type)
else:
return FuncSignature(self.args[1:], self.ret_type)
def __repr__(self) -> str:
return f"FuncSignature(args={self.args!r}, ret={self.ret_type!r})"
def serialize(self) -> JsonDict:
if self.num_bitmap_args:
args = self.args[: -self.num_bitmap_args]
else:
args = self.args
return {"args": [t.serialize() for t in args], "ret_type": self.ret_type.serialize()}
@classmethod
def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> FuncSignature:
return FuncSignature(
[RuntimeArg.deserialize(arg, ctx) for arg in data["args"]],
deserialize_type(data["ret_type"], ctx),
)
def num_bitmap_args(args: tuple[RuntimeArg, ...]) -> int:
n = 0
for arg in args:
if arg.type.error_overlap and arg.kind.is_optional():
n += 1
return (n + (BITMAP_BITS - 1)) // BITMAP_BITS
FUNC_NORMAL: Final = 0
FUNC_STATICMETHOD: Final = 1
FUNC_CLASSMETHOD: Final = 2
| FuncSignature |
python | pypa__pipenv | pipenv/patched/pip/_internal/metadata/importlib/_dists.py | {
"start": 3465,
"end": 8429
} | class ____(BaseDistribution):
def __init__(
self,
dist: importlib.metadata.Distribution,
info_location: Optional[BasePath],
installed_location: Optional[BasePath],
) -> None:
self._dist = dist
self._info_location = info_location
self._installed_location = installed_location
@classmethod
def from_directory(cls, directory: str) -> BaseDistribution:
info_location = pathlib.Path(directory)
dist = importlib.metadata.Distribution.at(info_location)
return cls(dist, info_location, info_location.parent)
@classmethod
def from_metadata_file_contents(
cls,
metadata_contents: bytes,
filename: str,
project_name: str,
) -> BaseDistribution:
# Generate temp dir to contain the metadata file, and write the file contents.
temp_dir = pathlib.Path(
TempDirectory(kind="metadata", globally_managed=True).path
)
metadata_path = temp_dir / "METADATA"
metadata_path.write_bytes(metadata_contents)
# Construct dist pointing to the newly created directory.
dist = importlib.metadata.Distribution.at(metadata_path.parent)
return cls(dist, metadata_path.parent, None)
@classmethod
def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
try:
with wheel.as_zipfile() as zf:
dist = WheelDistribution.from_zipfile(zf, name, wheel.location)
except zipfile.BadZipFile as e:
raise InvalidWheel(wheel.location, name) from e
return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location))
@property
def location(self) -> Optional[str]:
if self._info_location is None:
return None
return str(self._info_location.parent)
@property
def info_location(self) -> Optional[str]:
if self._info_location is None:
return None
return str(self._info_location)
@property
def installed_location(self) -> Optional[str]:
if self._installed_location is None:
return None
return normalize_path(str(self._installed_location))
@property
def canonical_name(self) -> NormalizedName:
return get_dist_canonical_name(self._dist)
@property
def version(self) -> Version:
if version := parse_name_and_version_from_info_directory(self._dist)[1]:
return parse_version(version)
return parse_version(self._dist.version)
@property
def raw_version(self) -> str:
return self._dist.version
def is_file(self, path: InfoPath) -> bool:
return self._dist.read_text(str(path)) is not None
def iter_distutils_script_names(self) -> Iterator[str]:
# A distutils installation is always "flat" (not in e.g. egg form), so
# if this distribution's info location is NOT a pathlib.Path (but e.g.
# zipfile.Path), it can never contain any distutils scripts.
if not isinstance(self._info_location, pathlib.Path):
return
for child in self._info_location.joinpath("scripts").iterdir():
yield child.name
def read_text(self, path: InfoPath) -> str:
content = self._dist.read_text(str(path))
if content is None:
raise FileNotFoundError(path)
return content
def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
# importlib.metadata's EntryPoint structure satisfies BaseEntryPoint.
return self._dist.entry_points
def _metadata_impl(self) -> email.message.Message:
# From Python 3.10+, importlib.metadata declares PackageMetadata as the
# return type. This protocol is unfortunately a disaster now and misses
# a ton of fields that we need, including get() and get_payload(). We
# rely on the implementation that the object is actually a Message now,
# until upstream can improve the protocol. (python/cpython#94952)
return cast(email.message.Message, self._dist.metadata)
def iter_provided_extras(self) -> Iterable[NormalizedName]:
return [
canonicalize_name(extra)
for extra in self.metadata.get_all("Provides-Extra", [])
]
def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
contexts: Sequence[Dict[str, str]] = [{"extra": e} for e in extras]
for req_string in self.metadata.get_all("Requires-Dist", []):
# strip() because email.message.Message.get_all() may return a leading \n
# in case a long header was wrapped.
req = get_requirement(req_string.strip())
if not req.marker:
yield req
elif not extras and req.marker.evaluate({"extra": ""}):
yield req
elif any(req.marker.evaluate(context) for context in contexts):
yield req
| Distribution |
python | Netflix__metaflow | metaflow/_vendor/typing_extensions.py | {
"start": 75379,
"end": 134519
} | class ____(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"): # 3.11+
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from metaflow._vendor.typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"): # 3.11+
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"): # 3.11+
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from metaflow._vendor.typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'): # 3.11+
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9): # 3.9-3.10
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _RequiredForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, 'ReadOnly'):
ReadOnly = typing.ReadOnly
elif sys.version_info[:2] >= (3, 9): # 3.9-3.12
@_ExtensionsSpecialForm
def ReadOnly(self, parameters):
"""A special typing construct to mark an item of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this property.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
ReadOnly = _ReadOnlyForm(
'ReadOnly',
doc="""A special typing construct to mark a key of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this propery.
""")
_UNPACK_DOC = """\
Type unpack operator.
The type unpack operator takes the child types from some container type,
such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For
example:
# For some generic class `Foo`:
Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str]
Ts = TypeVarTuple('Ts')
# Specifies that `Bar` is generic in an arbitrary number of types.
# (Think of `Ts` as a tuple of an arbitrary number of individual
# `TypeVar`s, which the `Unpack` is 'pulling out' directly into the
# `Generic[]`.)
class Bar(Generic[Unpack[Ts]]): ...
Bar[int] # Valid
Bar[int, str] # Also valid
From Python 3.11, this can also be done using the `*` operator:
Foo[*tuple[int, str]]
class Bar(Generic[*Ts]): ...
The operator can also be used along with a `TypedDict` to annotate
`**kwargs` in a function signature. For instance:
class Movie(TypedDict):
name: str
year: int
# This function expects two keyword arguments - *name* of type `str` and
# *year* of type `int`.
def foo(**kwargs: Unpack[Movie]): ...
Note that there is only some runtime checking of this operator. Not
everything the runtime allows may be accepted by static type checkers.
For more information, see PEP 646 and PEP 692.
"""
if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[]
Unpack = typing.Unpack
def _is_unpack(obj):
return get_origin(obj) is Unpack
elif sys.version_info[:2] >= (3, 9): # 3.9+
class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, getitem):
super().__init__(getitem)
self.__doc__ = _UNPACK_DOC
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@property
def __typing_unpacked_tuple_args__(self):
assert self.__origin__ is Unpack
assert len(self.__args__) == 1
arg, = self.__args__
if isinstance(arg, (typing._GenericAlias, _types.GenericAlias)):
if arg.__origin__ is not tuple:
raise TypeError("Unpack[...] must be used with a tuple type")
return arg.__args__
return None
@_UnpackSpecialForm
def Unpack(self, parameters):
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else: # 3.8
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
class _UnpackForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC)
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if _PEP_696_IMPLEMENTED:
from typing import TypeVarTuple
elif hasattr(typing, "TypeVarTuple"): # 3.11+
def _unpack_args(*args):
newargs = []
for arg in args:
subargs = getattr(arg, '__typing_unpacked_tuple_args__', None)
if subargs is not None and not (subargs and subargs[-1] is ...):
newargs.extend(subargs)
else:
newargs.append(arg)
return newargs
# Add default parameter - PEP 696
class TypeVarTuple(metaclass=_TypeVarLikeMeta):
"""Type variable tuple."""
_backported_typevarlike = typing.TypeVarTuple
def __new__(cls, name, *, default=NoDefault):
tvt = typing.TypeVarTuple(name)
_set_default(tvt, default)
_set_module(tvt)
def _typevartuple_prepare_subst(alias, args):
params = alias.__parameters__
typevartuple_index = params.index(tvt)
for param in params[typevartuple_index + 1:]:
if isinstance(param, TypeVarTuple):
raise TypeError(
f"More than one TypeVarTuple parameter in {alias}"
)
alen = len(args)
plen = len(params)
left = typevartuple_index
right = plen - typevartuple_index - 1
var_tuple_index = None
fillarg = None
for k, arg in enumerate(args):
if not isinstance(arg, type):
subargs = getattr(arg, '__typing_unpacked_tuple_args__', None)
if subargs and len(subargs) == 2 and subargs[-1] is ...:
if var_tuple_index is not None:
raise TypeError(
"More than one unpacked "
"arbitrary-length tuple argument"
)
var_tuple_index = k
fillarg = subargs[0]
if var_tuple_index is not None:
left = min(left, var_tuple_index)
right = min(right, alen - var_tuple_index - 1)
elif left + right > alen:
raise TypeError(f"Too few arguments for {alias};"
f" actual {alen}, expected at least {plen - 1}")
if left == alen - right and tvt.has_default():
replacement = _unpack_args(tvt.__default__)
else:
replacement = args[left: alen - right]
return (
*args[:left],
*([fillarg] * (typevartuple_index - left)),
replacement,
*([fillarg] * (plen - right - left - typevartuple_index - 1)),
*args[alen - right:],
)
tvt.__typing_prepare_subst__ = _typevartuple_prepare_subst
return tvt
def __init_subclass__(self, *args, **kwds):
raise TypeError("Cannot subclass special typing classes")
else: # <=3.10
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=NoDefault):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"): # 3.11+
reveal_type = typing.reveal_type
else: # <=3.10
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
if hasattr(typing, "_ASSERT_NEVER_REPR_MAX_LENGTH"): # 3.11+
_ASSERT_NEVER_REPR_MAX_LENGTH = typing._ASSERT_NEVER_REPR_MAX_LENGTH
else: # <=3.10
_ASSERT_NEVER_REPR_MAX_LENGTH = 100
if hasattr(typing, "assert_never"): # 3.11+
assert_never = typing.assert_never
else: # <=3.10
def assert_never(arg: Never, /) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
value = repr(arg)
if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH:
value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...'
raise AssertionError(f"Expected code to be unreachable, but got: {value}")
if sys.version_info >= (3, 12): # 3.12+
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else: # <=3.11
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
frozen_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from metaflow._vendor.typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``frozen_default`` indicates whether the ``frozen`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"frozen_default": frozen_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"): # 3.12+
override = typing.override
else: # <=3.11
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(arg: _F, /) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None:
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
There is no runtime checking of these properties. The decorator
sets the ``__override__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
See PEP 698 for details.
"""
try:
arg.__override__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return arg
if hasattr(warnings, "deprecated"):
deprecated = warnings.deprecated
else:
_T = typing.TypeVar("_T")
class deprecated:
"""Indicate that a class, function or overload is deprecated.
When this decorator is applied to an object, the type checker
will generate a diagnostic on usage of the deprecated object.
Usage:
@deprecated("Use B instead")
class A:
pass
@deprecated("Use g instead")
def f():
pass
@overload
@deprecated("int support is deprecated")
def g(x: int) -> int: ...
@overload
def g(x: str) -> int: ...
The warning specified by *category* will be emitted at runtime
on use of deprecated objects. For functions, that happens on calls;
for classes, on instantiation and on creation of subclasses.
If the *category* is ``None``, no warning is emitted at runtime.
The *stacklevel* determines where the
warning is emitted. If it is ``1`` (the default), the warning
is emitted at the direct caller of the deprecated object; if it
is higher, it is emitted further up the stack.
Static type checker behavior is not affected by the *category*
and *stacklevel* arguments.
The deprecation message passed to the decorator is saved in the
``__deprecated__`` attribute on the decorated object.
If applied to an overload, the decorator
must be after the ``@overload`` decorator for the attribute to
exist on the overload as returned by ``get_overloads()``.
See PEP 702 for details.
"""
def __init__(
self,
message: str,
/,
*,
category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
stacklevel: int = 1,
) -> None:
if not isinstance(message, str):
raise TypeError(
"Expected an object of type str for 'message', not "
f"{type(message).__name__!r}"
)
self.message = message
self.category = category
self.stacklevel = stacklevel
def __call__(self, arg: _T, /) -> _T:
# Make sure the inner functions created below don't
# retain a reference to self.
msg = self.message
category = self.category
stacklevel = self.stacklevel
if category is None:
arg.__deprecated__ = msg
return arg
elif isinstance(arg, type):
import functools
from types import MethodType
original_new = arg.__new__
@functools.wraps(original_new)
def __new__(cls, *args, **kwargs):
if cls is arg:
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
if original_new is not object.__new__:
return original_new(cls, *args, **kwargs)
# Mirrors a similar check in object.__new__.
elif cls.__init__ is object.__init__ and (args or kwargs):
raise TypeError(f"{cls.__name__}() takes no arguments")
else:
return original_new(cls)
arg.__new__ = staticmethod(__new__)
original_init_subclass = arg.__init_subclass__
# We need slightly different behavior if __init_subclass__
# is a bound method (likely if it was implemented in Python)
if isinstance(original_init_subclass, MethodType):
original_init_subclass = original_init_subclass.__func__
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = classmethod(__init_subclass__)
# Or otherwise, which likely means it's a builtin such as
# object's implementation of __init_subclass__.
else:
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = __init_subclass__
arg.__deprecated__ = __new__.__deprecated__ = msg
__init_subclass__.__deprecated__ = msg
return arg
elif callable(arg):
import functools
@functools.wraps(arg)
def wrapper(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return arg(*args, **kwargs)
arg.__deprecated__ = wrapper.__deprecated__ = msg
return wrapper
else:
raise TypeError(
"@deprecated decorator with non-None category must be applied to "
f"a class or callable, not {arg!r}"
)
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
expect_val = elen
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
# deal with TypeVarLike defaults
# required TypeVarLikes cannot appear after a defaulted one.
if alen < elen:
# since we validate TypeVarLike default in _collect_type_vars
# or _collect_parameters we can safely check parameters[alen]
if (
getattr(parameters[alen], '__default__', NoDefault)
is not NoDefault
):
return
num_default_tv = sum(getattr(p, '__default__', NoDefault)
is not NoDefault for p in parameters)
elen -= num_default_tv
expect_val = f"at least {elen}"
things = "arguments" if sys.version_info >= (3, 10) else "parameters"
raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}"
f" for {cls}; actual {alen}, expected {expect_val}")
else:
# Python 3.11+
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
expect_val = elen
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
# deal with TypeVarLike defaults
# required TypeVarLikes cannot appear after a defaulted one.
if alen < elen:
# since we validate TypeVarLike default in _collect_type_vars
# or _collect_parameters we can safely check parameters[alen]
if (
getattr(parameters[alen], '__default__', NoDefault)
is not NoDefault
):
return
num_default_tv = sum(getattr(p, '__default__', NoDefault)
is not NoDefault for p in parameters)
elen -= num_default_tv
expect_val = f"at least {elen}"
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments"
f" for {cls}; actual {alen}, expected {expect_val}")
if not _PEP_696_IMPLEMENTED:
typing._check_generic = _check_generic
def _has_generic_or_protocol_as_origin() -> bool:
try:
frame = sys._getframe(2)
# - Catch AttributeError: not all Python implementations have sys._getframe()
# - Catch ValueError: maybe we're called from an unexpected module
# and the call stack isn't deep enough
except (AttributeError, ValueError):
return False # err on the side of leniency
else:
# If we somehow get invoked from outside typing.py,
# also err on the side of leniency
if frame.f_globals.get("__name__") != "typing":
return False
origin = frame.f_locals.get("origin")
# Cannot use "in" because origin may be an object with a buggy __eq__ that
# throws an error.
return origin is typing.Generic or origin is Protocol or origin is typing.Protocol
_TYPEVARTUPLE_TYPES = {TypeVarTuple, getattr(typing, "TypeVarTuple", None)}
def _is_unpacked_typevartuple(x) -> bool:
if get_origin(x) is not Unpack:
return False
args = get_args(x)
return (
bool(args)
and len(args) == 1
and type(args[0]) in _TYPEVARTUPLE_TYPES
)
# Python 3.11+ _collect_type_vars was renamed to _collect_parameters
if hasattr(typing, '_collect_type_vars'):
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
# A required TypeVarLike cannot appear after a TypeVarLike with a default
# if it was a direct call to `Generic[]` or `Protocol[]`
enforce_default_ordering = _has_generic_or_protocol_as_origin()
default_encountered = False
# Also, a TypeVarLike with a default cannot appear after a TypeVarTuple
type_var_tuple_encountered = False
for t in types:
if _is_unpacked_typevartuple(t):
type_var_tuple_encountered = True
elif isinstance(t, typevar_types) and t not in tvars:
if enforce_default_ordering:
has_default = getattr(t, '__default__', NoDefault) is not NoDefault
if has_default:
if type_var_tuple_encountered:
raise TypeError('Type parameter with a default'
' follows TypeVarTuple')
default_encountered = True
elif default_encountered:
raise TypeError(f'Type parameter {t!r} without a default'
' follows type parameter with a default')
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
typing._collect_type_vars = _collect_type_vars
else:
def _collect_parameters(args):
"""Collect all type variables and parameter specifications in args
in order of first appearance (lexicographic order).
For example::
assert _collect_parameters((T, Callable[P, T])) == (T, P)
"""
parameters = []
# A required TypeVarLike cannot appear after a TypeVarLike with default
# if it was a direct call to `Generic[]` or `Protocol[]`
enforce_default_ordering = _has_generic_or_protocol_as_origin()
default_encountered = False
# Also, a TypeVarLike with a default cannot appear after a TypeVarTuple
type_var_tuple_encountered = False
for t in args:
if isinstance(t, type):
# We don't want __parameters__ descriptor of a bare Python class.
pass
elif isinstance(t, tuple):
# `t` might be a tuple, when `ParamSpec` is substituted with
# `[T, int]`, or `[int, *Ts]`, etc.
for x in t:
for collected in _collect_parameters([x]):
if collected not in parameters:
parameters.append(collected)
elif hasattr(t, '__typing_subst__'):
if t not in parameters:
if enforce_default_ordering:
has_default = (
getattr(t, '__default__', NoDefault) is not NoDefault
)
if type_var_tuple_encountered and has_default:
raise TypeError('Type parameter with a default'
' follows TypeVarTuple')
if has_default:
default_encountered = True
elif default_encountered:
raise TypeError(f'Type parameter {t!r} without a default'
' follows type parameter with a default')
parameters.append(t)
else:
if _is_unpacked_typevartuple(t):
type_var_tuple_encountered = True
for x in getattr(t, '__parameters__', ()):
if x not in parameters:
parameters.append(x)
return tuple(parameters)
if not _PEP_696_IMPLEMENTED:
typing._collect_parameters = _collect_parameters
# Backport typing.NamedTuple as it exists in Python 3.13.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
# On 3.12, we added __orig_bases__ to call-based NamedTuples
# On 3.13, we deprecated kwargs-based NamedTuples
if sys.version_info >= (3, 13):
NamedTuple = typing.NamedTuple
else:
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
if "__annotations__" in ns:
types = ns["__annotations__"]
elif "__annotate__" in ns:
# TODO: Use inspect.VALUE here, and make the annotations lazily evaluated
types = ns["__annotate__"](1)
else:
types = {}
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
if hasattr(typing, '_generic_class_getitem'): # 3.12+
nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem)
else:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key, val in ns.items():
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields:
if key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
try:
set_name = type(val).__set_name__
except AttributeError:
pass
else:
try:
set_name(val, nm_tpl, key)
except BaseException as e:
msg = (
f"Error calling __set_name__ on {type(val).__name__!r} "
f"instance {key!r} in {typename!r}"
)
# BaseException.add_note() existed on py311,
# but the __set_name__ machinery didn't start
# using add_note() until py312.
# Making sure exceptions are raised in the same way
# as in "normal" classes seems most important here.
if sys.version_info >= (3, 12):
e.add_note(msg)
raise
else:
raise RuntimeError(msg) from e
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
@_ensure_subclassable(_namedtuple_mro_entries)
def NamedTuple(typename, fields=_marker, /, **kwargs):
"""Typed version of namedtuple.
Usage::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
An alternative equivalent functional syntax is also accepted::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is _marker:
if kwargs:
deprecated_thing = "Creating NamedTuple classes using keyword arguments"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"Use the class-based or functional syntax instead."
)
else:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif fields is None:
if kwargs:
raise TypeError(
"Cannot pass `None` as the 'fields' parameter "
"and also specify fields using keyword arguments"
)
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
if fields is _marker or fields is None:
warnings.warn(
deprecation_msg.format(name=deprecated_thing, remove="3.15"),
DeprecationWarning,
stacklevel=2,
)
fields = kwargs.items()
nt = _make_nmtuple(typename, fields, module=_caller())
nt.__orig_bases__ = (NamedTuple,)
return nt
if hasattr(collections.abc, "Buffer"):
Buffer = collections.abc.Buffer
else:
class Buffer(abc.ABC): # noqa: B024
"""Base class for classes that implement the buffer protocol.
The buffer protocol allows Python objects to expose a low-level
memory buffer interface. Before Python 3.12, it is not possible
to implement the buffer protocol in pure Python code, or even
to check whether a class implements the buffer protocol. In
Python 3.12 and higher, the ``__buffer__`` method allows access
to the buffer protocol from Python code, and the
``collections.abc.Buffer`` ABC allows checking whether a class
implements the buffer protocol.
To indicate support for the buffer protocol in earlier versions,
inherit from this ABC, either in a stub file or at runtime,
or use ABC registration. This ABC provides no methods, because
there is no Python-accessible methods shared by pre-3.12 buffer
classes. It is useful primarily for static checks.
"""
# As a courtesy, register the most common stdlib buffer classes.
Buffer.register(memoryview)
Buffer.register(bytearray)
Buffer.register(bytes)
# Backport of types.get_original_bases, available on 3.12+ in CPython
if hasattr(_types, "get_original_bases"):
get_original_bases = _types.get_original_bases
else:
def get_original_bases(cls, /):
"""Return the class's "original" bases prior to modification by `__mro_entries__`.
Examples::
from typing import TypeVar, Generic
from metaflow._vendor.typing_extensions import NamedTuple, TypedDict
T = TypeVar("T")
class Foo(Generic[T]): ...
class Bar(Foo[int], float): ...
class Baz(list[str]): ...
Eggs = NamedTuple("Eggs", [("a", int), ("b", str)])
Spam = TypedDict("Spam", {"a": int, "b": str})
assert get_original_bases(Bar) == (Foo[int], float)
assert get_original_bases(Baz) == (list[str],)
assert get_original_bases(Eggs) == (NamedTuple,)
assert get_original_bases(Spam) == (TypedDict,)
assert get_original_bases(int) == (object,)
"""
try:
return cls.__dict__.get("__orig_bases__", cls.__bases__)
except AttributeError:
raise TypeError(
f'Expected an instance of type, not {type(cls).__name__!r}'
) from None
# NewType is a class on Python 3.10+, making it pickleable
# The error message for subclassing instances of NewType was improved on 3.11+
if sys.version_info >= (3, 11):
NewType = typing.NewType
else:
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def __call__(self, obj, /):
return obj
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __mro_entries__(self, bases):
# We defined __mro_entries__ to get a better error message
# if a user attempts to subclass a NewType instance. bpo-46170
supercls_name = self.__name__
class Dummy:
def __init_subclass__(cls):
subcls_name = cls.__name__
raise TypeError(
f"Cannot subclass an instance of NewType. "
f"Perhaps you were looking for: "
f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`"
)
return (Dummy,)
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
if sys.version_info >= (3, 10):
# PEP 604 methods
# It doesn't make sense to have these methods on Python <3.10
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
if hasattr(typing, "TypeAliasType"):
TypeAliasType = typing.TypeAliasType
else:
def _is_unionable(obj):
"""Corresponds to is_unionable() in unionobject.c in CPython."""
return obj is None or isinstance(obj, (
type,
_types.GenericAlias,
_types.UnionType,
TypeAliasType,
))
class TypeAliasType:
"""Create named, parameterized type aliases.
This provides a backport of the new `type` statement in Python 3.12:
type ListOrSet[T] = list[T] | set[T]
is equivalent to:
T = TypeVar("T")
ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,))
The name ListOrSet can then be used as an alias for the type it refers to.
The type_params argument should contain all the type parameters used
in the value of the type alias. If the alias is not generic, this
argument is omitted.
Static type checkers should only support type aliases declared using
TypeAliasType that follow these rules:
- The first argument (the name) must be a string literal.
- The TypeAliasType instance must be immediately assigned to a variable
of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid,
as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)').
"""
def __init__(self, name: str, value, *, type_params=()):
if not isinstance(name, str):
raise TypeError("TypeAliasType name must be a string")
self.__value__ = value
self.__type_params__ = type_params
parameters = []
for type_param in type_params:
if isinstance(type_param, TypeVarTuple):
parameters.extend(type_param)
else:
parameters.append(type_param)
self.__parameters__ = tuple(parameters)
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Setting this attribute closes the TypeAliasType from further modification
self.__name__ = name
def __setattr__(self, name: str, value: object, /) -> None:
if hasattr(self, "__name__"):
self._raise_attribute_error(name)
super().__setattr__(name, value)
def __delattr__(self, name: str, /) -> Never:
self._raise_attribute_error(name)
def _raise_attribute_error(self, name: str) -> Never:
# Match the Python 3.12 error messages exactly
if name == "__name__":
raise AttributeError("readonly attribute")
elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}:
raise AttributeError(
f"attribute '{name}' of 'typing.TypeAliasType' objects "
"is not writable"
)
else:
raise AttributeError(
f"'typing.TypeAliasType' object has no attribute '{name}'"
)
def __repr__(self) -> str:
return self.__name__
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
parameters = [
typing._type_check(
item, f'Subscripting {self.__name__} requires a type.'
)
for item in parameters
]
return typing._GenericAlias(self, tuple(parameters))
def __reduce__(self):
return self.__name__
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"type 'typing_extensions.TypeAliasType' is not an acceptable base type"
)
# The presence of this method convinces typing._type_check
# that TypeAliasTypes are types.
def __call__(self):
raise TypeError("Type alias is not callable")
if sys.version_info >= (3, 10):
def __or__(self, right):
# For forward compatibility with 3.12, reject Unions
# that are not accepted by the built-in Union.
if not _is_unionable(right):
return NotImplemented
return typing.Union[self, right]
def __ror__(self, left):
if not _is_unionable(left):
return NotImplemented
return typing.Union[left, self]
if hasattr(typing, "is_protocol"):
is_protocol = typing.is_protocol
get_protocol_members = typing.get_protocol_members
else:
def is_protocol(tp: type, /) -> bool:
"""Return True if the given type is a Protocol.
Example::
>>> from typing_extensions import Protocol, is_protocol
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> is_protocol(P)
True
>>> is_protocol(int)
False
"""
return (
isinstance(tp, type)
and getattr(tp, '_is_protocol', False)
and tp is not Protocol
and tp is not typing.Protocol
)
def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]:
"""Return the set of members defined in a Protocol.
Example::
>>> from typing_extensions import Protocol, get_protocol_members
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> get_protocol_members(P)
frozenset({'a', 'b'})
Raise a TypeError for arguments that are not Protocols.
"""
if not is_protocol(tp):
raise TypeError(f'{tp!r} is not a Protocol')
if hasattr(tp, '__protocol_attrs__'):
return frozenset(tp.__protocol_attrs__)
return frozenset(_get_protocol_attrs(tp))
if hasattr(typing, "Doc"):
Doc = typing.Doc
else:
class Doc:
"""Define the documentation of a type annotation using ``Annotated``, to be
used in class attributes, function and method parameters, return values,
and variables.
The value should be a positional-only string literal to allow static tools
like editors and documentation generators to use it.
This complements docstrings.
The string value passed is available in the attribute ``documentation``.
Example::
>>> from typing_extensions import Annotated, Doc
>>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ...
"""
def __init__(self, documentation: str, /) -> None:
self.documentation = documentation
def __repr__(self) -> str:
return f"Doc({self.documentation!r})"
def __hash__(self) -> int:
return hash(self.documentation)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Doc):
return NotImplemented
return self.documentation == other.documentation
_CapsuleType = getattr(_types, "CapsuleType", None)
if _CapsuleType is None:
try:
import _socket
except ImportError:
pass
else:
_CAPI = getattr(_socket, "CAPI", None)
if _CAPI is not None:
_CapsuleType = type(_CAPI)
if _CapsuleType is not None:
CapsuleType = _CapsuleType
__all__.append("CapsuleType")
# Aliases for items that have always been in typing.
# Explicitly assign these (rather than using `from typing import *` at the top),
# so that we get a CI error if one of these is deleted from typing.py
# in a future version of Python
AbstractSet = typing.AbstractSet
AnyStr = typing.AnyStr
BinaryIO = typing.BinaryIO
Callable = typing.Callable
Collection = typing.Collection
Container = typing.Container
Dict = typing.Dict
ForwardRef = typing.ForwardRef
FrozenSet = typing.FrozenSet
Generic = typing.Generic
Hashable = typing.Hashable
IO = typing.IO
ItemsView = typing.ItemsView
Iterable = typing.Iterable
Iterator = typing.Iterator
KeysView = typing.KeysView
List = typing.List
Mapping = typing.Mapping
MappingView = typing.MappingView
Match = typing.Match
MutableMapping = typing.MutableMapping
MutableSequence = typing.MutableSequence
MutableSet = typing.MutableSet
Optional = typing.Optional
Pattern = typing.Pattern
Reversible = typing.Reversible
Sequence = typing.Sequence
Set = typing.Set
Sized = typing.Sized
TextIO = typing.TextIO
Tuple = typing.Tuple
Union = typing.Union
ValuesView = typing.ValuesView
cast = typing.cast
no_type_check = typing.no_type_check
no_type_check_decorator = typing.no_type_check_decorator
| _SpecialForm |
python | django__django | tests/filtered_relation/models.py | {
"start": 1979,
"end": 2527
} | class ____(models.Model):
NEW = "new"
STOPPED = "stopped"
STATES = (
(NEW, "New"),
(STOPPED, "Stopped"),
)
borrower = models.ForeignKey(
Borrower,
models.CASCADE,
related_name="rental_sessions",
related_query_name="rental_session",
)
book = models.ForeignKey(
Book,
models.CASCADE,
related_name="rental_sessions",
related_query_name="rental_session",
)
state = models.CharField(max_length=7, choices=STATES, default=NEW)
| RentalSession |
python | apache__airflow | airflow-core/src/airflow/models/dagbundle.py | {
"start": 1214,
"end": 4459
} | class ____(Base, LoggingMixin):
"""
A table for storing DAG bundle metadata.
We track the following information about each bundle, as it can be useful for
informational purposes and for debugging:
- active: Is the bundle currently found in configuration?
- version: The latest version Airflow has seen for the bundle.
- last_refreshed: When the bundle was last refreshed.
- signed_url_template: Signed URL template for viewing the bundle
- template_params: JSON object containing template parameters for constructing view url (e.g., {"subdir": "dags"})
"""
__tablename__ = "dag_bundle"
name: Mapped[str] = mapped_column(StringID(length=250), primary_key=True, nullable=False)
active: Mapped[bool | None] = mapped_column(Boolean, default=True, nullable=True)
version: Mapped[str | None] = mapped_column(String(200), nullable=True)
last_refreshed: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
signed_url_template: Mapped[str | None] = mapped_column(String(200), nullable=True)
template_params: Mapped[dict | None] = mapped_column(JSONType, nullable=True)
teams = relationship("Team", secondary=dag_bundle_team_association_table, back_populates="dag_bundles")
def __init__(self, *, name: str, version: str | None = None):
super().__init__()
self.name = name
self.version = version
def _unsign_url(self) -> str | None:
"""
Unsign a URL token to get the original URL template.
:param signed_url: The signed URL token
:return: The original URL template or None if unsigning fails
"""
try:
from itsdangerous import BadSignature, URLSafeSerializer
from airflow.configuration import conf
if not self.signed_url_template:
return None
serializer = URLSafeSerializer(conf.get_mandatory_value("core", "fernet_key"))
payload = serializer.loads(self.signed_url_template)
if isinstance(payload, dict) and "url" in payload and "bundle_name" in payload:
if payload["bundle_name"] == self.name:
return payload["url"]
return None
except (BadSignature, Exception):
return None
def render_url(self, version: str | None = None) -> str | None:
"""
Render the URL template with the given version and stored template parameters.
First unsigns the URL to get the original template, then formats it with
the provided version and any additional parameters.
:param version: The version to substitute in the template
:return: The rendered URL or None if no template is available
"""
if not self.signed_url_template:
return None
url_template = self._unsign_url()
if url_template is None:
return None
params = dict(self.template_params or {})
params["version"] = version
try:
return url_template.format(**params)
except (KeyError, ValueError) as e:
self.log.warning("Failed to render URL template for bundle %s: %s", self.name, e)
return None
| DagBundleModel |
python | django__django | django/contrib/admin/widgets.py | {
"start": 2870,
"end": 3747
} | class ____(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
template_name = "admin/widgets/split_datetime.html"
def __init__(self, attrs=None):
widgets = [BaseAdminDateWidget, BaseAdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context["date_label"] = _("Date:")
context["time_label"] = _("Time:")
for widget in context["widget"]["subwidgets"]:
widget["attrs"]["aria-describedby"] = f"id_{name}_timezone_warning_helptext"
return context
def id_for_label(self, id_):
return id_
| AdminSplitDateTime |
python | google__pytype | pytype/tests/test_typevar1.py | {
"start": 99,
"end": 22304
} | class ____(test_base.BaseTest):
"""Tests for TypeVar."""
def test_unused_typevar(self):
ty = self.Infer("""
from typing import TypeVar
T = TypeVar("T")
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
T = TypeVar("T")
""",
)
@test_utils.skipBeforePy((3, 12), "type aliases are new in 3.12")
def test_unused_typevar_pep695(self):
ty = self.Infer("""
type MyType[T] = list[T]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
T = TypeVar("T")
MyType = list[T]
""",
)
@test_utils.skipBeforePy((3, 12), "type aliases are new in 3.12")
def test_unused_typevar_pep695_switch_order(self):
ty = self.Infer("""
type FlippedPair[S, T] = tuple[T, S]
""")
# TODO(b/412616662): This pytd result is wrong, as T and S order should be
# flipped but there's no way to represent this properly without printing out
# type aliases.
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
S = TypeVar('S')
T = TypeVar("T")
FlippedPair = tuple[T, S]
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_type_var_with_bounds_in_type_alias(self):
ty = self.Infer("""
type Alias[T: int] = list[T]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
T = TypeVar('T', bound=int)
Alias = list[T]
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_type_var_with_constraints_in_type_alias(self):
ty = self.Infer("""
type Alias[T: (int, str)] = list[T]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
T = TypeVar('T', int, str)
Alias = list[T]
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_function_type_var_single(self):
ty = self.Infer("""
def foo[T, S](a: T) -> T:
return a
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
T = TypeVar("T")
def foo(a: T) -> T: ...
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_function_type_var_double(self):
ty = self.Infer("""
def foo[T, S](a: T, b: S) -> tuple[S, T]:
return (a, b)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
S = TypeVar('S')
T = TypeVar('T')
def foo(a: T, b: S) -> tuple[S, T]: ...
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_class_single_type_var(self):
ty = self.Infer("""
class A[T]: pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Generic, TypeVar
T = TypeVar('T')
class A(Generic[T]):
__type_params__: tuple[Any]
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_class_double_type_var(self):
ty = self.Infer("""
class A[T, S]: pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Generic, TypeVar
S = TypeVar('S')
T = TypeVar('T')
class A(Generic[T, S]):
__type_params__: tuple[Any, Any]
""",
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_type_var_tuple(self):
errors = self.CheckWithErrors("""
type Tup[*Ts] = ( # not-supported-yet[e1]
tuple[int, *Ts] ) # invalid-annotation[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": "Using TypeVarTuple in Generics is not supported yet",
"e2": "Invalid type annotation '<instance of tuple>' \nNot a type",
},
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_class_both_generic_and_base(self):
errors = self.CheckWithErrors("""
from typing import Generic, TypeVar
U = TypeVar('U')
class A[T, S](Generic[U]): pass # invalid-annotation[e1]
""")
self.assertErrorRegexes(
errors,
{
"e1": (
r"Invalid type annotation 'A' \nCannot inherit from"
r" Generic\[...\] multiple times"
),
},
)
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_unused_typevar_pep695_class_inherit_from_base(self):
ty = self.Infer("""
class Base[T]: pass
class Derived[S, T](Base[T]): pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Generic, TypeVar
S = TypeVar('S')
T = TypeVar('T')
class Base(Generic[T]):
__type_params__: tuple[Any]
class Derived(Base[T], Generic[S, T]):
__type_params__: tuple[Any, Any]
""",
)
def test_import_typevar(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", """T = TypeVar("T")""")
ty = self.Infer(
"""
from a import T
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
T = TypeVar("T")
""",
)
def test_invalid_typevar(self):
ty, errors = self.InferWithErrors("""
from typing import TypeVar
typevar = TypeVar
T = typevar() # invalid-typevar[e1]
T = typevar("T") # ok
T = typevar(42) # invalid-typevar[e2]
T = typevar(str()) # invalid-typevar[e3]
T = typevar("T", str, int if __random__ else float) # invalid-typevar[e4]
T = typevar("T", 0, float) # invalid-typevar[e5]
T = typevar("T", str) # invalid-typevar[e6]
# pytype: disable=not-supported-yet
S = typevar("S", covariant=False) # ok
T = typevar("T", covariant=False) # duplicate ok
# pytype: enable=not-supported-yet
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
typevar = ... # type: type
S = TypeVar("S")
T = TypeVar("T")
""",
)
self.assertErrorRegexes(
errors,
{
"e1": r"wrong arguments",
"e2": r"Expected.*str.*Actual.*int",
"e3": r"constant str",
"e4": r"constraint.*Must be constant",
"e5": r"Expected.*_1:.*type.*Actual.*_1: int",
"e6": r"0 or more than 1",
},
)
def test_print_constraints(self):
ty = self.Infer("""
from typing import List, TypeVar
S = TypeVar("S", int, float, covariant=True) # pytype: disable=not-supported-yet
T = TypeVar("T", int, float)
U = TypeVar("U", List[int], List[float])
""")
# The "covariant" keyword is ignored for now.
self.assertTypesMatchPytd(
ty,
"""
from typing import List, TypeVar
S = TypeVar("S", int, float)
T = TypeVar("T", int, float)
U = TypeVar("U", List[int], List[float])
""",
)
def test_infer_typevars(self):
ty = self.Infer("""
def id(x):
return x
def wrap_tuple(x, y):
return (x, y)
def wrap_list(x, y):
return [x, y]
def wrap_dict(x, y):
return {x: y}
def return_second(x, y):
return y
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, List, Tuple, Union
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
def id(x: _T0) -> _T0: ...
def wrap_tuple(x: _T0, y: _T1) -> Tuple[_T0, _T1]: ...
def wrap_list(x: _T0, y: _T1) -> List[Union[_T0, _T1]]: ...
def wrap_dict(x: _T0, y: _T1) -> Dict[_T0, _T1]: ...
def return_second(x, y: _T1) -> _T1: ...
""",
)
def test_infer_union(self):
ty = self.Infer("""
def return_either(x, y):
return x or y
def return_arg_or_42(x):
return x or 42
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
def return_either(x: _T0, y: _T1) -> Union[_T0, _T1]: ...
def return_arg_or_42(x: _T0) -> Union[_T0, int]: ...
""",
)
def test_typevar_in_type_comment(self):
self.InferWithErrors("""
from typing import List, TypeVar
T = TypeVar("T")
x = None # type: T # invalid-annotation
y = None # type: List[T] # invalid-annotation
""")
def test_base_class_with_typevar(self):
ty = self.Infer("""
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): ...
""",
)
def test_overwrite_base_class_with_typevar(self):
self.Check("""
from typing import List, TypeVar
T = TypeVar("T")
l = List[T]
l = list
class X(l): pass
""")
def test_bound(self):
self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", int, float, bound=str) # invalid-typevar
S = TypeVar("S", bound="") # invalid-typevar
V = TypeVar("V", bound=int if __random__ else float) # invalid-typevar
U = TypeVar("U", bound=str) # ok
""")
def test_covariant(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar("T", covariant=True) # not-supported-yet
U = TypeVar("U", covariant=True if __random__ else False) # invalid-typevar[e1]
S = TypeVar("S", covariant=42) # invalid-typevar[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"constant",
"e2": r"Expected.*bool.*Actual.*int",
},
)
def test_contravariant(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar("T", contravariant=True) # not-supported-yet
U = TypeVar("U", contravariant=True if __random__ else False) # invalid-typevar[e1]
S = TypeVar("S", contravariant=42) # invalid-typevar[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"constant",
"e2": r"Expected.*bool.*Actual.*int",
},
)
def test_default(self):
ty = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar("T", default=int) # pytype: disable=not-supported-yet
class Foo(Generic[T]):
pass
f = Foo()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]): ...
f: Foo[nothing]
""",
)
self.CheckWithErrors("""
from typing import Generic, TypeVar
T = TypeVar("T", default=int) # not-supported-yet
class Foo(Generic[T]):
pass
f = Foo()
""")
def test_dont_propagate_pyval(self):
# in functions like f(x: T) -> T, if T has constraints we should not copy
# the value of constant types between instances of the typevar.
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar
AnyInt = TypeVar('AnyInt', int)
def f(x: AnyInt) -> AnyInt: ...
""",
)
ty = self.Infer(
"""
import a
if a.f(0):
x = 3
if a.f(1):
y = 3
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: int
y = ... # type: int
""",
)
def test_property_type_param(self):
# We should allow property signatures of the form f(self: T) -> X[T]
# without complaining about the class not being parametrised over T
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar, List
T = TypeVar('T')
class A:
@property
def foo(self: T) -> List[T]: ...
class B(A): ...
""",
)
ty = self.Infer(
"""
import a
x = a.A().foo
y = a.B().foo
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import List
x = ... # type: List[a.A]
y = ... # type: List[a.B]
""",
)
def test_property_type_param2(self):
# Test for classes inheriting from Generic[X]
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar, List, Generic
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[U]):
@property
def foo(self: T) -> List[T]: ...
class B(A, Generic[U]): ...
def make_A() -> A[int]: ...
def make_B() -> B[int]: ...
""",
)
ty = self.Infer(
"""
import a
x = a.make_A().foo
y = a.make_B().foo
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import List
x = ... # type: List[a.A[int]]
y = ... # type: List[a.B[int]]
""",
)
# Skipping due to b/66005735
@test_base.skip("Type parameter bug")
def test_property_type_param3(self):
# Don't mix up the class parameter and the property parameter
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar, List, Generic
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[U]):
@property
def foo(self: T) -> List[U]: ...
def make_A() -> A[int]: ...
""",
)
ty = self.Infer(
"""
import a
x = a.make_A().foo
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: List[int]
""",
)
def test_property_type_param_with_constraints(self):
# Test setting self to a constrained type
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar, List, Generic
T = TypeVar('T')
U = TypeVar('U', int, str)
X = TypeVar('X', int)
class A(Generic[U]):
@property
def foo(self: A[X]) -> List[X]: ...
def make_A() -> A[int]: ...
""",
)
ty = self.Infer(
"""
import a
x = a.make_A().foo
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import List
x = ... # type: List[int]
""",
)
def test_classmethod_type_param(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar, List, Type
T = TypeVar('T')
class A:
@classmethod
def foo(self: Type[T]) -> List[T]: ...
class B(A): ...
""",
)
ty = self.Infer(
"""
import a
v = a.A.foo()
w = a.B.foo()
x = a.A().foo()
y = a.B().foo()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import List
v = ... # type: List[a.A]
w = ... # type: List[a.B]
x = ... # type: List[a.A]
y = ... # type: List[a.B]
""",
)
def test_metaclass_property_type_param(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar, Type, List
T = TypeVar('T')
class Meta():
@property
def foo(self: Type[T]) -> List[T]: ...
class A(metaclass=Meta):
pass
""",
)
ty = self.Infer(
"""
import a
x = a.A.foo
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import List
x = ... # type: List[a.A]
""",
)
def test_top_level_union(self):
ty = self.Infer("""
from typing import TypeVar
if __random__:
T = TypeVar("T")
else:
T = 42
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
T = ... # type: Any
""",
)
def test_store_typevar_in_dict(self):
ty = self.Infer("""
from typing import TypeVar
T = TypeVar("T")
a = {'key': T}
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, TypeVar
a = ... # type: Dict[str, nothing]
T = TypeVar('T')
""",
)
def test_late_bound(self):
errors = self.CheckWithErrors("""
from typing import TypeVar, Union
T = TypeVar("T", int, float, bound="str") # invalid-typevar[e1]
S = TypeVar("S", bound="") # invalid-typevar[e2]
U = TypeVar("U", bound="str") # ok
V = TypeVar("V", bound="int if __random__ else float") # invalid-typevar[e3]
W = TypeVar("W", bound="Foo") # ok, forward reference
X = TypeVar("X", bound="Bar") # name-error[e4]
class Foo:
pass
""")
self.assertErrorRegexes(
errors,
{
"e1": r"mutually exclusive",
"e2": r"empty string",
"e3": r"Must be constant",
"e4": r"Name.*Bar",
},
)
def test_late_constraints(self):
ty = self.Infer("""
from typing import List, TypeVar
S = TypeVar("S", int, float)
T = TypeVar("T", "int", "float")
U = TypeVar("U", "List[int]", List[float])
V = TypeVar("V", "Foo", "List[Foo]")
class Foo:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, TypeVar
S = TypeVar("S", int, float)
T = TypeVar("T", int, float)
U = TypeVar("U", List[int], List[float])
V = TypeVar("V", Foo, List[Foo])
class Foo:
pass
""",
)
def test_typevar_in_alias(self):
ty = self.Infer("""
from typing import TypeVar, Union
T = TypeVar("T", int, float)
Num = Union[T, complex]
x = 10 # type: Num[int]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar, Union
T = TypeVar("T", int, float)
Num = Union[T, complex]
x: Union[int, complex]
""",
)
def test_type_of_typevar(self):
self.Check("""
from typing import Sequence, TypeVar
T = TypeVar('T')
def f(x): # type: (Sequence[T]) -> Sequence[T]
print(type(x))
return x
""")
def test_type_of_typevar_error(self):
errors = self.CheckWithErrors("""
from typing import Sequence, Type, TypeVar
T = TypeVar('T')
def f(x): # type: (int) -> int
return x
def g(x): # type: (Sequence[T]) -> Type[Sequence[T]]
return f(type(x)) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": "Expected.*int.*Actual.*Sequence"})
def test_typevar_in_constant(self):
ty = self.Infer("""
from typing import TypeVar
T = TypeVar('T')
class Foo:
def __init__(self):
self.f1 = self.f2
def f2(self, x):
# type: (T) -> T
return x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, TypeVar
T = TypeVar('T')
class Foo:
f1: Callable[[T], T]
def __init__(self) -> None: ...
def f2(self, x: T) -> T: ...
""",
)
def test_extra_arguments(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar("T", extra_arg=42) # invalid-typevar[e1]
S = TypeVar("S", *__any_object__) # invalid-typevar[e2]
U = TypeVar("U", **__any_object__) # invalid-typevar[e3]
""")
self.assertErrorRegexes(
errors, {"e1": r"extra_arg", "e2": r"\*args", "e3": r"\*\*kwargs"}
)
def test_simplify_args_and_kwargs(self):
ty = self.Infer("""
from typing import TypeVar
constraints = (int, str)
kwargs = {"covariant": True}
T = TypeVar("T", *constraints, **kwargs) # pytype: disable=not-supported-yet
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Tuple, Type, TypeVar
T = TypeVar("T", int, str)
constraints = ... # type: Tuple[Type[int], Type[str]]
kwargs = ... # type: Dict[str, bool]
""",
)
def test_typevar_starargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Generic, TypeVar, Union
T = TypeVar('T')
S = TypeVar('S')
SS = TypeVar('SS')
class A(Generic[T]):
def __init__(self, x: T, *args: S, **kwargs: SS):
self = A[Union[T, S, SS]]
""",
)
self.Check(
"""
import a
a.A(1)
a.A(1, 2, 3)
a.A(1, 2, 3, a=1, b=2)
""",
pythonpath=[d.path],
)
def test_cast_generic_callable(self):
errors = self.CheckWithErrors("""
from typing import Callable, TypeVar, cast
T = TypeVar('T')
def f(x):
return cast(Callable[[T, T], T], x)
assert_type(f(None)(0, 1), int)
f(None)(0, '1') # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": "Expected.*int.*Actual.*str"})
@test_utils.skipBeforePy((3, 12), "PEP 695 - 3.12 feature")
def test_global_var_not_hidden_by_type_variable(self):
self.Check("""
Apple: str = 'Apple'
type AppleBox[Apple] = tuple[Apple, ...]
def print_apple(a: str):
print(a)
print_apple(Apple)
""")
if __name__ == "__main__":
test_base.main()
| TypeVarTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 1551,
"end": 1665
} | class ____:
def __get__(self, instance: "ClassE | None", owner: "type[ClassE]"):
return None
| DescriptorE |
python | has2k1__plotnine | plotnine/geoms/annotation_logticks.py | {
"start": 710,
"end": 7178
} | class ____(geom_rug):
"""
Internal geom implementing drawing of annotation_logticks
"""
DEFAULT_AES = {}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
"sides": "bl",
"alpha": 1,
"color": "black",
"size": 0.5,
"linetype": "solid",
"lengths": (0.036, 0.0225, 0.012),
"base": 10,
}
draw_legend = staticmethod(geom_path.draw_legend)
def draw_layer(self, data: pd.DataFrame, layout: Layout, coord: coord):
"""
Draw ticks on every panel
"""
for pid in layout.layout["PANEL"]:
ploc = pid - 1
panel_params = layout.panel_params[ploc]
ax = layout.axs[ploc]
self.draw_panel(data, panel_params, coord, ax)
@staticmethod
def _check_log_scale(
base: Optional[float],
sides: str,
panel_params: panel_view,
coord: coord,
) -> tuple[float, float]:
"""
Check the log transforms
Parameters
----------
base : float | None
Base of the logarithm in which the ticks will be
calculated. If `None`, the base of the log transform
the scale will be used.
sides : str, default="bl"
Sides onto which to draw the marks. Any combination
chosen from the characters `btlr`, for *bottom*, *top*,
*left* or *right* side marks. If `coord_flip()` is used,
these are the sides *before* the flip.
panel_params : panel_view
`x` and `y` view scale values.
coord : coord
Coordinate (e.g. coord_cartesian) system of the geom.
Returns
-------
out : tuple
The bases (base_x, base_y) to use when generating the ticks.
"""
def get_base(sc, ubase: Optional[float]) -> float:
ae = sc.aesthetics[0]
if not isinstance(sc, ScaleContinuous) or not sc.is_log_scale:
warnings.warn(
f"annotation_logticks for {ae}-axis which does not have "
"a log scale. The logticks may not make sense.",
PlotnineWarning,
)
return 10 if ubase is None else ubase
base = sc._trans.base # pyright: ignore
if ubase is not None and base != ubase:
warnings.warn(
f"The x-axis is log transformed in base={base} ,"
"but the annotation_logticks are computed in base="
f"{ubase}",
PlotnineWarning,
)
return ubase
return base
base_x, base_y = 10, 10
x_scale = panel_params.x.scale
y_scale = panel_params.y.scale
if isinstance(coord, coord_flip):
x_scale, y_scale = y_scale, x_scale
base_x, base_y = base_y, base_x
if "t" in sides or "b" in sides:
base_x = get_base(x_scale, base)
if "l" in sides or "r" in sides:
base_y = get_base(y_scale, base)
return base_x, base_y
@staticmethod
def _calc_ticks(
value_range: tuple[float, float], base: float
) -> tuple[AnyArray, AnyArray, AnyArray]:
"""
Calculate tick marks within a range
Parameters
----------
value_range: tuple
Range for which to calculate ticks.
base : number
Base of logarithm
Returns
-------
out: tuple
(major, middle, minor) tick locations
"""
def _minor(x: Sequence[Any], mid_idx: int) -> AnyArray:
return np.hstack([x[1:mid_idx], x[mid_idx + 1 : -1]])
# * Calculate the low and high powers,
# * Generate for all intervals in along the low-high power range
# The intervals are in normal space
# * Calculate evenly spaced breaks in normal space, then convert
# them to log space.
low = np.floor(value_range[0])
high = np.ceil(value_range[1])
arr = base ** np.arange(low, float(high + 1))
n_ticks = int(np.round(base) - 1)
breaks = [
log(np.linspace(b1, b2, n_ticks + 1), base)
for (b1, b2) in list(zip(arr, arr[1:]))
]
# Partition the breaks in the 3 groups
major = np.array([x[0] for x in breaks] + [breaks[-1][-1]])
if n_ticks % 2:
mid_idx = n_ticks // 2
middle = np.array([x[mid_idx] for x in breaks])
minor = np.hstack([_minor(x, mid_idx) for x in breaks])
else:
middle = np.array([])
minor = np.hstack([x[1:-1] for x in breaks])
return major, middle, minor
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
params = self.params
# Any passed data is ignored, the relevant data is created
sides = params["sides"]
lengths = params["lengths"]
_aesthetics = {
"size": params["size"],
"color": params["color"],
"alpha": params["alpha"],
"linetype": params["linetype"],
}
def _draw(
geom: geom,
axis: Literal["x", "y"],
tick_positions: tuple[AnyArray, AnyArray, AnyArray],
):
for position, length in zip(tick_positions, lengths):
data = pd.DataFrame({axis: position, **_aesthetics})
params["length"] = length
geom.draw_group(data, panel_params, coord, ax, params)
if isinstance(coord, coord_flip):
tick_range_x = panel_params.y.range
tick_range_y = panel_params.x.range
else:
tick_range_x = panel_params.x.range
tick_range_y = panel_params.y.range
# these are already flipped iff coord_flip
base_x, base_y = self._check_log_scale(
params["base"], sides, panel_params, coord
)
if "b" in sides or "t" in sides:
tick_positions = self._calc_ticks(tick_range_x, base_x)
_draw(self, "x", tick_positions)
if "l" in sides or "r" in sides:
tick_positions = self._calc_ticks(tick_range_y, base_y)
_draw(self, "y", tick_positions)
| _geom_logticks |
python | pypa__warehouse | warehouse/events/models.py | {
"start": 3555,
"end": 5073
} | class ____:
tag: Mapped[str]
time: Mapped[datetime_now]
additional: Mapped[dict | None] = mapped_column(JSONB)
@declared_attr
def ip_address_id(cls): # noqa: N805
return mapped_column(
UUID(as_uuid=True),
ForeignKey("ip_addresses.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=True,
)
@declared_attr
def ip_address(cls): # noqa: N805
return orm.relationship(IpAddress)
@property
def location_info(cls) -> str: # noqa: N805
"""
Determine "best" location info to display.
Dig into `.additional` for `geoip_info` and return that if it exists.
It was stored at the time of the event, and may change in the related
`IpAddress` object over time.
Otherwise, return the `ip_address` and let its repr decide.
"""
if cls.additional is not None and "geoip_info" in cls.additional:
g = GeoIPInfo(**cls.additional["geoip_info"])
if g.display():
return g.display()
return cls.ip_address
@property
def user_agent_info(cls) -> str: # noqa: N805
"""
Display a summarized User-Agent if available
Dig into `.additional` for `user_agent_info` and return that if it exists.
"""
if cls.additional is not None and "user_agent_info" in cls.additional:
return UserAgentInfo(**cls.additional["user_agent_info"]).display()
return "No User-Agent"
| Event |
python | wandb__wandb | wandb/sdk/artifacts/_generated/update_team_registry_role.py | {
"start": 180,
"end": 274
} | class ____(GQLResult):
result: Optional[UpdateTeamRegistryRoleResult]
| UpdateTeamRegistryRole |
python | tensorflow__tensorflow | tensorflow/python/keras/mixed_precision/autocast_variable.py | {
"start": 1858,
"end": 19677
} | class ____(variables.Variable, core.Tensor):
"""Variable that will cast itself to a different dtype in applicable contexts.
This class wraps a floating-point `tf.Variable`. It emulates the variable
interface and delegates to the wrapped variable, but it additionally will cast
the wrapped variable under an `enable_auto_cast_variables(dtype)` context
manager.
For example:
>>> v = tf.Variable(1.0, dtype=tf.float32)
>>> v = AutoCastVariable(v)
>>> tf.identity(v).dtype
tf.float32
>>> with enable_auto_cast_variables(tf.float16):
... tf.identity(v).dtype
tf.float16
The purpose of this class is to allow Keras layers to create variables in
float32, and automatically cast them to float16 or bfloat16 when the layer is
called.
"""
def __init__(self, variable):
"""Creates an AutoCastVariable instance.
Args:
variable: A floating-point resource variable to wrap.
Raises:
ValueError: If `variable` is not a floating-point resource variable
"""
if not isinstance(variable, variables.Variable):
raise ValueError('variable must be of type tf.ResourceVariable, but got: '
'%s' % variable)
if not variable.dtype.is_floating:
raise ValueError('variable must be a floating point variable but has '
'type: %s' % variable.dtype.name)
self._variable = variable
# 'delegate' means AutoCastVariable.op return self._variable.op, which will
# raise an AttributeError in Eager (as intended). If set to any other value,
# AutoCastVariable.op returns that value instead, which is used to set the
# op attribute in AutoCastVariable.assign().
self._op = 'delegate'
def _should_cast(self):
"""Returns True if this variable should be casted when accessed."""
autocast_dtype = getattr(_autocast_dtype, 'dtype', None)
return autocast_dtype is not None and self.dtype != autocast_dtype
@property
def dtype(self):
"""The dtype of the underlying variable, before any casts are done."""
return self._variable.dtype
@property
def true_dtype(self):
"""Deprecated alias of `dtype`."""
return self._variable.dtype
@property
def _cast_dtype(self):
dtype = getattr(_autocast_dtype, 'dtype', None)
return dtype or self._variable.dtype
def value(self):
val = self._variable.value()
if not self._should_cast():
return val
return math_ops.cast(val, self._cast_dtype)
def read_value(self):
val = self._variable.read_value()
return math_ops.cast(val, self._cast_dtype)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
val = self._variable.sparse_read(indices, name=name)
return math_ops.cast(val, self._cast_dtype)
def gather_nd(self, indices, name=None):
"""Gather slices of the variable into a Tensor."""
val = self._variable.gather_nd(indices, name=name)
return math_ops.cast(val, self._cast_dtype)
def __getattr__(self, name):
return getattr(self._variable, name)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts this variable to a tensor."""
if as_ref:
# This ValueError should not occur in practice since it is impossible to
# pass as_ref=True using public APIs.
raise ValueError('Cannot convert AutoCastVariable to a tensor if '
'as_ref=True is passed to convert_to_tensor')
if not self._should_cast():
return tensor_conversion.convert_to_tensor_v2_with_dispatch(
self._variable, dtype=dtype, name=name
)
if dtype is not None and not dtype.is_compatible_with(self._cast_dtype):
raise ValueError(
'Incompatible type conversion requested to type {!r} for '
'AutoCastVariable which is casted to type {!r}'.format(
dtype.name, self._cast_dtype.name))
val = tensor_conversion.convert_to_tensor_v2_with_dispatch(
self._variable, dtype=self._variable.dtype, name=name
)
return math_ops.cast(val, self._cast_dtype)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} "
'dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}, '
'numpy={np_repr}>')
return repr_str.format(
v=self, np_repr=numpy_text(self.read_value(), is_repr=True))
else:
repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} "
'dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}>')
return repr_str.format(v=self)
# Method delegations: We delegate the following methods to self._variable.
# Each of these methods simply calls the same method on self._variable. The
# base Variable raises NotImplementedError for most of these, so we must
# override them.
#
# We do not define the following methods from Variable for the following
# reasons:
# * 'count_up_to': This method only applies to int variables, which cannot
# be wrapped with an AutoCastVariable.
# * 'ref': Instead we inherit the definition from Variable.
# If we defined and delegated to Variable, the ref of an AutoCastVariable
# would be the same as the ref of the underlying variable, which would be
# strange as they are different Python objects.
def set_shape(self, shape):
return self._variable.set_shape(self, shape)
@property
def trainable(self):
return self._variable.trainable
@property
def synchronization(self):
return self._variable.synchronization
@property
def aggregation(self):
return self._variable.aggregation
def eval(self, session=None):
return self._variable.eval(session)
def initialized_value(self):
return self._variable.initialized_value()
@property
def initial_value(self):
return self._variable.initial_value
@property
def constraint(self):
return self._variable.constraint
def _apply_assign_update(self,
update_fn,
value,
use_locking=None,
name=None,
read_value=True):
# TODO(b/146181571): This logic can be simplified once
# DistributedVariable.assign returns a DistributedVariable. Currently for
# MirroredStrategy, it returns a Mirrored value.
if ops.executing_eagerly_outside_functions():
assign_op = update_fn(value, use_locking, name, False)
if read_value:
# We create a new AutoCastVariable with the same underlying tf.Variable.
# The new AutoCastVariable is identical except the 'op' attribute is
# defined. This matches the behavior of tf.Variable.assign.
var = create_autocast_variable(self._variable)
var._op = assign_op # pylint:disable=protected-access
return var
return assign_op
# Fallback to wrapping the returned variable in graph mode if possible
assign_var = update_fn(value, use_locking, name, read_value)
if read_value and resource_variable_ops.is_resource_variable(assign_var):
return create_autocast_variable(assign_var)
return assign_var
def _apply_update(self, update_fn, *args, **kwargs):
update_var = update_fn(*args, **kwargs)
if ops.executing_eagerly_outside_functions():
return self
# Fallback to wrapping the returned variable in graph mode if possible
if resource_variable_ops.is_resource_variable(update_var):
return create_autocast_variable(update_var)
return update_var
def assign(self, value, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(self._variable.assign, value, use_locking,
name, read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(self._variable.assign_add, delta,
use_locking, name, read_value)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(self._variable.assign_sub, delta,
use_locking, name, read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_sub, sparse_delta,
use_locking, name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_add, sparse_delta,
use_locking, name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_max, sparse_delta,
use_locking, name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_min, sparse_delta,
use_locking, name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_mul, sparse_delta,
use_locking, name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_div, sparse_delta,
use_locking, name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.scatter_update, sparse_delta,
use_locking, name)
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
return self._apply_update(self._variable.batch_scatter_update, sparse_delta,
use_locking, name)
def scatter_nd_sub(self, indices, updates, name=None):
return self._apply_update(self._variable.scatter_nd_sub, indices, updates,
name)
def scatter_nd_add(self, indices, updates, name=None):
return self._apply_update(self._variable.scatter_nd_add, indices, updates,
name)
def scatter_nd_update(self, indices, updates, name=None):
return self._apply_update(self._variable.scatter_nd_update, indices,
updates, name)
def load(self, value, session=None):
return self._variable.load(value, session)
@property
def name(self):
return self._variable.name
@property
def _shared_name(self):
return self._variable._shared_name # pylint:disable=protected-access
@property
def initializer(self):
return self._variable.initializer
@property
def device(self):
return self._variable.device
@property
def op(self):
if self._op == 'delegate':
return self._variable.op
return self._op
def _as_graph_element(self):
graph_element = self._variable._as_graph_element() # pylint:disable=protected-access
if graph_element is None:
return self._op
return graph_element
@property
def graph(self):
return self._variable.graph
@property
def shape(self):
return self._variable.shape
def get_shape(self) -> tensor_shape.TensorShape:
return self._variable.get_shape()
def _gather_saveables_for_checkpoint(self):
# By delegating this method to the wrapped variable, checkpoints with
# AutoCastVariables are identical to checkpoints with normal variables.
# Therefore models checkpointed with AutoCastVariables can be restored on
# models with normal variables, and vice versa.
return self._variable._gather_saveables_for_checkpoint() # pylint:disable=protected-access
def _export_to_saved_model_graph(self, object_map, tensor_map, options,
**kwargs):
# By delegating this method to the wrapped variable, SavedModel with
# AutoCastVariables are identical to SavedModel with normal variables.
resource_list = self._variable._export_to_saved_model_graph( # pylint:disable=protected-access
object_map, tensor_map, options, **kwargs)
object_map[self] = object_map[self._variable]
return resource_list
# TODO(reedwm): Maybe encode the fact the variable is an AutoCastVariable in
# to_proto().
def to_proto(self, export_scope=None):
return self._variable.to_proto(export_scope)
def from_proto(self, variable_def, import_scope=None):
return self._variable.from_proto(variable_def, import_scope)
# Delegate the private attributes _handle_name and _initializer_op to
# self._variable. SavedModel sets these attributes when loading a model. For
# example, it sets _handle_name here:
# https://github.com/tensorflow/tensorflow/blob/db26bd574fa95b5bdd53c08463dd19407cc0297e/tensorflow/python/keras/saving/saved_model/load.py#L211
# We need to expose these attributes on AutoCastVariable as well for
# SavedModel to work properly.
# TODO(reedwm/kathywu): Find a better way to support SavedModel. Exposing
# private attributes is hacky and difficult to maintain.
@property
def _handle_name(self):
return self._variable._handle_name # pylint: disable=protected-access
@_handle_name.setter
def _handle_name(self, handle_name):
self._variable._handle_name = handle_name # pylint: disable=protected-access
@property
def _initializer_op(self):
return self._variable._initializer_op # pylint: disable=protected-access
@_initializer_op.setter
def _initializer_op(self, initializer_op):
self._variable._initializer_op = initializer_op # pylint: disable=protected-access
# Operator overloads:
# Note we only overload operators that support floating-point types, as
# non-float variables cannot be wrapped with an AutoCastVariable.
# Also note: We call read_value() instead of value(), because value() causes
# gradients not to work properly when TPUStrategy is used: b/143380936
def __add__(self, o):
return self.read_value() + o
def __radd__(self, o):
return o + self.read_value()
def __sub__(self, o):
return self.read_value() - o
def __rsub__(self, o):
return o - self.read_value()
def __mul__(self, o):
return self.read_value() * o
def __rmul__(self, o):
return o * self.read_value()
def __truediv__(self, o):
return self.read_value() / o
def __rtruediv__(self, o):
return o / self.read_value()
def __floordiv__(self, o):
return self.read_value() // o
def __rfloordiv__(self, o):
return o // self.read_value()
def __mod__(self, o):
return self.read_value() % o
def __rmod__(self, o):
return o % self.read_value()
def __lt__(self, o):
return self.read_value() < o
def __le__(self, o):
return self.read_value() <= o
def __gt__(self, o):
return self.read_value() > o
def __ge__(self, o):
return self.read_value() >= o
def __getitem__(self, o):
return self.read_value()[o]
def __pow__(self, o, modulo=None):
return pow(self.read_value(), o, modulo)
def __rpow__(self, o):
return pow(o, self.read_value())
def __neg__(self):
return -self.read_value() # pylint: disable=invalid-unary-operand-type
def __abs__(self):
return abs(self.read_value())
def __div__(self, o):
try:
return self.read_value().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self.read_value().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self.read_value().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self.read_value().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# pylint: enable=multiple-statements
tensor_conversion_registry.register_tensor_conversion_function(
AutoCastVariable, AutoCastVariable._dense_var_to_tensor) # pylint:disable=protected-access
def create_autocast_variable(variable):
"""Creates an AutoCastVariable that wraps another variable.
This typically just returns `AutoCastVariable(variable)`. But, if the variable
is a DistributedVariable or one of its subclasses, we instead dynamically
create a class that subclasses from both AutoCastVariable and
variable.__class__. This is so the returned variable will still pass
`isinstance(variable, variable.__class__)`, which is required for
DistributedVariables and its subclasses to work properly.
Args:
variable: A floating-point resource variable to wrap.
Returns:
An AutoCastVariable that wraps the variable.
"""
if not distributed_training_utils.is_distributed_variable(variable):
return AutoCastVariable(variable)
class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):
"""An AutoCastVariable that also subclasses from variable.__class__.
variable.__class__ is either a DistributedVariable or an
AggregatingVariable.
"""
def __repr__(self):
# pylint: disable=missing-format-attribute
return ('<AutoCastDistributedVariable dtype={v.dtype.name} '
'dtype_to_cast_to={v._cast_dtype.name} '
'inner_variable={v._variable}>'
).format(v=self)
# pylint: enable=missing-format-attribute
return AutoCastDistributedVariable(variable)
| AutoCastVariable |
python | kamyu104__LeetCode-Solutions | Python/first-letter-to-appear-twice.py | {
"start": 42,
"end": 303
} | class ____(object):
def repeatedCharacter(self, s):
"""
:type s: str
:rtype: str
"""
lookup = set()
for c in s:
if c in lookup:
break
lookup.add(c)
return c
| Solution |
python | PyCQA__pylint | tests/functional/a/assigning/assigning_non_slot.py | {
"start": 4602,
"end": 4684
} | class ____(ColorCls):
__slots__ = ()
repro = Child()
Child.COLOR = "blue"
| Child |
python | pypa__hatch | tests/backend/metadata/test_build.py | {
"start": 2156,
"end": 3081
} | class ____:
def test_default(self, isolation):
metadata = BuildMetadata(str(isolation), {})
assert metadata.backend_path == metadata.backend_path == []
def test_not_array(self, isolation):
metadata = BuildMetadata(str(isolation), {"backend-path": 10})
with pytest.raises(TypeError, match="Field `build-system.backend-path` must be an array"):
_ = metadata.backend_path
def test_entry_not_string(self, isolation):
metadata = BuildMetadata(str(isolation), {"backend-path": [10]})
with pytest.raises(TypeError, match="Entry #1 of field `build-system.backend-path` must be a string"):
_ = metadata.backend_path
def test_correct(self, isolation):
metadata = BuildMetadata(str(isolation), {"backend-path": ["foo", "bar", "Baz"]})
assert metadata.backend_path == metadata.backend_path == ["foo", "bar", "Baz"]
| TestBackendPath |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/sql.py | {
"start": 991,
"end": 6222
} | class ____(BaseStructStoreIndex[SQLStructTable]):
"""
SQL Struct Store Index.
The SQLStructStoreIndex is an index that uses a SQL database
under the hood. During index construction, the data can be inferred
from unstructured documents given a schema extract prompt,
or it can be pre-loaded in the database.
During query time, the user can either specify a raw SQL query
or a natural language query to retrieve their data.
NOTE: this is deprecated.
Args:
documents (Optional[Sequence[DOCUMENTS_INPUT]]): Documents to index.
NOTE: in the SQL index, this is an optional field.
sql_database (Optional[SQLDatabase]): SQL database to use,
including table names to specify.
See :ref:`Ref-Struct-Store` for more details.
table_name (Optional[str]): Name of the table to use
for extracting data.
Either table_name or table must be specified.
table (Optional[Table]): SQLAlchemy Table object to use.
Specifying the Table object explicitly, instead of
the table name, allows you to pass in a view.
Either table_name or table must be specified.
sql_context_container (Optional[SQLContextContainer]): SQL context container.
an be generated from a SQLContextContainerBuilder.
See :ref:`Ref-Struct-Store` for more details.
"""
index_struct_cls = SQLStructTable
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
index_struct: Optional[SQLStructTable] = None,
sql_database: Optional[SQLDatabase] = None,
table_name: Optional[str] = None,
table: Optional[Table] = None,
ref_doc_id_column: Optional[str] = None,
sql_context_container: Optional[SQLContextContainer] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
if sql_database is None:
raise ValueError("sql_database must be specified")
self.sql_database = sql_database
# needed here for data extractor
self._ref_doc_id_column = ref_doc_id_column
self._table_name = table_name
self._table = table
# if documents aren't specified, pass in a blank []
if index_struct is None:
nodes = nodes or []
super().__init__(
nodes=nodes,
index_struct=index_struct,
**kwargs,
)
# TODO: index_struct context_dict is deprecated,
# we're migrating storage of information to here.
if sql_context_container is None:
container_builder = SQLContextContainerBuilder(sql_database)
sql_context_container = container_builder.build_context_container()
self.sql_context_container = sql_context_container
@property
def ref_doc_id_column(self) -> Optional[str]:
return self._ref_doc_id_column
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **build_kwargs: Any
) -> SQLStructTable:
"""Build index from nodes."""
index_struct = self.index_struct_cls()
if len(nodes) == 0:
return index_struct
else:
data_extractor = SQLStructDatapointExtractor(
Settings.llm,
self.schema_extract_prompt,
self.output_parser,
self.sql_database,
table_name=self._table_name,
table=self._table,
ref_doc_id_column=self._ref_doc_id_column,
)
# group nodes by ids
source_to_node = defaultdict(list)
for node in nodes:
source_to_node[node.ref_doc_id].append(node)
for node_set in source_to_node.values():
data_extractor.insert_datapoint_from_nodes(node_set)
return index_struct
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
data_extractor = SQLStructDatapointExtractor(
Settings.llm,
self.schema_extract_prompt,
self.output_parser,
self.sql_database,
table_name=self._table_name,
table=self._table,
ref_doc_id_column=self._ref_doc_id_column,
)
data_extractor.insert_datapoint_from_nodes(nodes)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
raise NotImplementedError("Not supported")
def as_query_engine(
self,
llm: Optional[LLMType] = None,
query_mode: Union[str, SQLQueryMode] = SQLQueryMode.NL,
**kwargs: Any,
) -> BaseQueryEngine:
# NOTE: lazy import
from llama_index.core.indices.struct_store.sql_query import (
NLStructStoreQueryEngine,
SQLStructStoreQueryEngine,
)
if query_mode == SQLQueryMode.NL:
return NLStructStoreQueryEngine(self, **kwargs)
elif query_mode == SQLQueryMode.SQL:
return SQLStructStoreQueryEngine(self, **kwargs)
else:
raise ValueError(f"Unknown query mode: {query_mode}")
GPTSQLStructStoreIndex = SQLStructStoreIndex
| SQLStructStoreIndex |
python | doocs__leetcode | solution/0100-0199/0158.Read N Characters Given read4 II - Call Multiple Times/Solution.py | {
"start": 83,
"end": 600
} | class ____:
def __init__(self):
self.buf4 = [None] * 4
self.i = self.size = 0
def read(self, buf: List[str], n: int) -> int:
j = 0
while j < n:
if self.i == self.size:
self.size = read4(self.buf4)
self.i = 0
if self.size == 0:
break
while j < n and self.i < self.size:
buf[j] = self.buf4[self.i]
self.i += 1
j += 1
return j
| Solution |
python | pytorch__pytorch | torch/_ops.py | {
"start": 42312,
"end": 48099
} | class ____(OpOverload[_P, _T]):
def _fallthrough_keys(self) -> list[DispatchKey]:
# TODO: we should be calling the fallback for these, but a fallthrough is almost close
# enough to the fallback in most cases that we care about.
_DEFAULT_FALLTHROUGH_KEYS = [
DispatchKey.Autograd,
DispatchKey.AutogradCPU,
DispatchKey.AutogradCUDA,
DispatchKey.ADInplaceOrView,
DispatchKey.BackendSelect,
DispatchKey.PythonTLSSnapshot,
DispatchKey.PythonDispatcher,
DispatchKey.Functionalize,
]
def _may_use_fallthrough_instead_of_fallback(key: DispatchKey):
if torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), key):
return torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
self.name(), key
)
return (
key not in self.py_kernels
or self.py_kernels[key] is torch.library.fallthrough_kernel
)
return [
key
for key in _DEFAULT_FALLTHROUGH_KEYS
if _may_use_fallthrough_instead_of_fallback(key)
]
@contextlib.contextmanager
def _register_as_effectful_op_temporarily(self):
from torch._higher_order_ops.effects import (
_EffectType,
_get_effect,
_register_effectful_op,
)
try:
# We don't want to register the effect if there already exists a
# registration, especially if the registration is None (explicitly
# no effect)
register_tmp_effect = _get_effect(self) is None
handle = None
if register_tmp_effect:
handle = _register_effectful_op(self, _EffectType.ORDERED)
yield
finally:
if register_tmp_effect:
assert handle is not None
handle.destroy()
# Use positional-only argument to avoid naming collision with aten ops arguments
# that are named "self". This way, all the aten ops can be called by kwargs.
def __call__(self, /, *args: _P.args, **kwargs: _P.kwargs) -> _T:
if _must_dispatch_in_python(args, kwargs):
# When any inputs are FakeScriptObject, we need to
# skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher
# because C++ dispatcher will check the schema and cannot recognize FakeScriptObject.
#
# Note:
# 1. We only register the torchbind op temporarily as effectful op because we only want
# the effect token functionalization logic to be applied during tracing. Otherwise, the behavior
# of the eagerly executing the op might change after tracing.
# 2. We don't want to register the op as effectful for all torchbind ops in ctor because this might
# cause unexpected behavior for some autograd.profiler ops e.g. profiler._record_function_exit._RecordFunction.
with self._register_as_effectful_op_temporarily():
return self._dispatch_in_python(
self._fallthrough_keys(), *args, **kwargs
)
return self._op(*args, **kwargs)
def _dispatch_in_python(
self, fallthrough_keys: list[DispatchKey], *args: _P.args, **kwargs: _P.kwargs
) -> _T:
non_fallthrough_keys = torch._C._dispatch_keyset_full()
for key in fallthrough_keys:
non_fallthrough_keys = non_fallthrough_keys.remove(key)
dispatch_key_set = _compute_keyset(args, kwargs, non_fallthrough_keys)
dispatch_key = dispatch_key_set.highestPriorityTypeId()
handler = (
self._get_dispatch(dispatch_key)
if dispatch_key not in self._dispatch_cache
else self._dispatch_cache[dispatch_key]
)
if isinstance(handler, DispatchKey):
# fallthrough keys can be registered at runtime via torch.library.impl
# so need to add it to fallthrough_keys and re-dispatch.
if torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
self.name(), dispatch_key
):
return self._dispatch_in_python(
fallthrough_keys + [dispatch_key],
*args,
**kwargs,
)
raise RuntimeError(
f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
f" but no python implementation is found."
f" Please file an issue on this when you encounter this error."
f" This error can happen when you export or compile the model."
f" It can still happen even if a C++ implementation for {dispatch_key}. "
f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
f" with a C++ implementation."
)
assert isinstance(handler, Callable) # type: ignore[arg-type]
return handler(*args, **kwargs) # pyrefly: ignore [bad-return]
def _must_dispatch_in_python(args, kwargs):
return pytree.tree_any(
lambda obj: isinstance(
obj, torch._library.fake_class_registry.FakeScriptObject
),
(args, kwargs),
)
def _has_script_object_arg(schema: torch.FunctionSchema) -> bool:
return any(isinstance(arg.type, torch.ClassType) for arg in schema.arguments)
# OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
# You can obtain an OpOverload object through attribute query.
| TorchBindOpOverload |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integration_channels.py | {
"start": 2682,
"end": 4200
} | class ____(OrganizationIntegrationChannelsTest):
def setUp(self) -> None:
super().setUp()
self.integration = self.create_integration(
organization=self.organization,
provider="discord",
name="Discord Server",
external_id="1234567890",
)
@patch("sentry.integrations.discord.client.DiscordClient.get")
def test_discord_channels_list(self, mock_get):
mock_channels = [
{"id": "123456", "name": "general", "type": 0},
{"id": "789012", "name": "announcements", "type": 5},
{"id": "345678", "name": "off-topic", "type": 0},
]
mock_get.return_value = mock_channels
response = self.get_success_response(self.organization.slug, self.integration.id)
results = response.data["results"]
DISCORD_CHANNEL_TYPES = {
0: "text",
5: "announcement",
15: "forum",
}
expected = []
for ch in mock_channels:
channel_type = cast(int, ch["type"]) # mypy: ensure int key for map lookup
expected.append(
{
"id": ch["id"],
"name": ch["name"],
"display": f"#{ch['name']}",
"type": DISCORD_CHANNEL_TYPES.get(channel_type, "unknown"),
}
)
assert results == expected
mock_get.assert_called_once()
@control_silo_test
| OrganizationIntegrationChannelsDiscordTest |
python | PrefectHQ__prefect | src/prefect/assets/core.py | {
"start": 921,
"end": 2286
} | class ____(PrefectBaseModel):
"""
Assets are objects that represent materialized data,
providing a way to track lineage and dependencies.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True)
key: ValidAssetKey
properties: Optional[AssetProperties] = Field(
default=None,
description="Properties of the asset. "
"Setting this will overwrite properties of a known asset.",
)
def __repr__(self) -> str:
return f"Asset(key={self.key!r})"
def __hash__(self) -> int:
return hash(self.key)
def add_metadata(self, metadata: dict[str, Any]) -> None:
from prefect.context import AssetContext
asset_ctx = AssetContext.get()
if not asset_ctx:
raise RuntimeError(
"Unable add Asset metadata when not inside of an AssetContext"
)
asset_ctx.add_asset_metadata(self.key, metadata)
def add_asset_metadata(asset: str | Asset, metadata: dict[str, Any]) -> None:
from prefect.context import AssetContext
asset_ctx = AssetContext.get()
if not asset_ctx:
raise RuntimeError(
"Unable to call `add_asset_metadata` when not inside of an AssetContext"
)
asset_key = asset if isinstance(asset, str) else asset.key
asset_ctx.add_asset_metadata(asset_key, metadata)
| Asset |
python | PyCQA__pylint | tests/functional/t/too/too_many_public_methods.py | {
"start": 37,
"end": 1083
} | class ____: # [too-many-public-methods]
def __init__(self):
pass
def meth1(self):
"""hehehe"""
def meth2(self):
"""hehehe"""
def meth3(self):
"""hehehe"""
def meth4(self):
"""hehehe"""
def meth5(self):
"""hehehe"""
def meth6(self):
"""hehehe"""
def meth7(self):
"""hehehe"""
def meth8(self):
"""hehehe"""
def meth9(self):
"""hehehe"""
def meth10(self):
"""hehehe"""
def meth11(self):
"""hehehe"""
def meth12(self):
"""hehehe"""
def meth13(self):
"""hehehe"""
def meth14(self):
"""hehehe"""
def meth15(self):
"""hehehe"""
def meth16(self):
"""hehehe"""
def meth17(self):
"""hehehe"""
def meth18(self):
"""hehehe"""
def meth19(self):
"""hehehe"""
def meth20(self):
"""hehehe"""
def meth21(self):
"""hehehe"""
def _dontcount(self):
"""not public"""
| Aaaa |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_task_runner.py | {
"start": 98855,
"end": 106037
} | class ____:
DEFAULT_ARGS = {
"owner": "test",
"depends_on_past": True,
"start_date": datetime.now(tz=timezone.utc),
"retries": 1,
"retry_delay": timedelta(minutes=1),
}
VALUE = 42
def test_dag_param_resolves_from_task(self, create_runtime_ti, mock_supervisor_comms, time_machine):
"""Test dagparam resolves on operator execution"""
instant = timezone.datetime(2024, 12, 3, 10, 0)
time_machine.move_to(instant, tick=False)
dag = DAG(dag_id="dag_with_dag_params", start_date=timezone.datetime(2024, 12, 3))
dag.param("value", default="NOTSET")
class CustomOperator(BaseOperator):
def execute(self, context):
assert dag.params["value"] == "NOTSET"
task = CustomOperator(task_id="task_with_dag_params")
runtime_ti = create_runtime_ti(task=task, dag_id="dag_with_dag_params")
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
mock_supervisor_comms.send.assert_called_once_with(
SucceedTask(state=TaskInstanceState.SUCCESS, end_date=instant, task_outlets=[], outlet_events=[]),
)
def test_dag_param_dag_overwrite(self, create_runtime_ti, mock_supervisor_comms, time_machine):
"""Test dag param is overwritten from dagrun config"""
instant = timezone.datetime(2024, 12, 3, 10, 0)
time_machine.move_to(instant, tick=False)
dag = DAG(dag_id="dag_with_dag_params_overwrite", start_date=timezone.datetime(2024, 12, 3))
dag.param("value", default="NOTSET")
class CustomOperator(BaseOperator):
def execute(self, context):
# important to use self.dag here
assert self.dag.params["value"] == "new_value"
# asserting on the default value when not set in dag run
assert dag.params["value"] == "NOTSET"
task = CustomOperator(task_id="task_with_dag_params_overwrite")
# we reparse the dag here, and if conf passed, added as params
runtime_ti = create_runtime_ti(
task=task, dag_id="dag_with_dag_params_overwrite", conf={"value": "new_value"}
)
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
mock_supervisor_comms.send.assert_called_once_with(
SucceedTask(state=TaskInstanceState.SUCCESS, end_date=instant, task_outlets=[], outlet_events=[]),
)
def test_dag_param_dag_default(self, create_runtime_ti, mock_supervisor_comms, time_machine):
"""Test that dag param is correctly resolved by operator"""
instant = timezone.datetime(2024, 12, 3, 10, 0)
time_machine.move_to(instant, tick=False)
dag = DAG(
dag_id="dag_with_dag_params_default",
start_date=timezone.datetime(2024, 12, 3),
params={"value": "test"},
)
class CustomOperator(BaseOperator):
def execute(self, context):
assert dag.params["value"] == "test"
assert dag.params["value"] == "test"
task = CustomOperator(task_id="task_with_dag_params_default")
runtime_ti = create_runtime_ti(task=task, dag_id="dag_with_dag_params_default")
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
mock_supervisor_comms.send.assert_called_once_with(
SucceedTask(state=TaskInstanceState.SUCCESS, end_date=instant, task_outlets=[], outlet_events=[]),
)
def test_dag_param_resolves(
self, create_runtime_ti, mock_supervisor_comms, time_machine, make_ti_context
):
"""Test that dag param is correctly resolved by operator"""
instant = timezone.datetime(2024, 12, 3, 10, 0)
time_machine.move_to(instant, tick=False)
@dag_decorator(schedule=None, start_date=timezone.datetime(2024, 12, 3))
def dag_with_dag_params(value="NOTSET"):
@task_decorator
def dummy_task(val):
return val
class CustomOperator(BaseOperator):
def execute(self, context):
assert self.dag.params["value"] == "NOTSET"
_ = dummy_task(value)
custom_task = CustomOperator(task_id="task_with_dag_params")
self.operator = custom_task
dag_with_dag_params()
runtime_ti = create_runtime_ti(task=self.operator, dag_id="dag_with_dag_params")
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
mock_supervisor_comms.send.assert_called_once_with(
SucceedTask(state=TaskInstanceState.SUCCESS, end_date=instant, task_outlets=[], outlet_events=[]),
)
def test_dag_param_dagrun_parameterized(
self, create_runtime_ti, mock_supervisor_comms, time_machine, make_ti_context
):
"""Test that dag param is correctly overwritten when set in dag run"""
instant = timezone.datetime(2024, 12, 3, 10, 0)
time_machine.move_to(instant, tick=False)
@dag_decorator(schedule=None, start_date=timezone.datetime(2024, 12, 3))
def dag_with_dag_params(value=self.VALUE):
@task_decorator
def dummy_task(val):
return val
assert isinstance(value, DagParam)
class CustomOperator(BaseOperator):
def execute(self, context):
assert self.dag.params["value"] == "new_value"
_ = dummy_task(value)
custom_task = CustomOperator(task_id="task_with_dag_params")
self.operator = custom_task
dag_with_dag_params()
runtime_ti = create_runtime_ti(
task=self.operator, dag_id="dag_with_dag_params", conf={"value": "new_value"}
)
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
mock_supervisor_comms.send.assert_called_once_with(
SucceedTask(state=TaskInstanceState.SUCCESS, end_date=instant, task_outlets=[], outlet_events=[]),
)
@pytest.mark.parametrize("value", [VALUE, 0])
def test_set_params_for_dag(
self, create_runtime_ti, mock_supervisor_comms, time_machine, make_ti_context, value
):
"""Test that dag param is correctly set when using dag decorator"""
instant = timezone.datetime(2024, 12, 3, 10, 0)
time_machine.move_to(instant, tick=False)
@dag_decorator(schedule=None, start_date=timezone.datetime(2024, 12, 3))
def dag_with_param(value=value):
@task_decorator
def return_num(num):
return num
xcom_arg = return_num(value)
self.operator = xcom_arg.operator
dag_with_param()
runtime_ti = create_runtime_ti(task=self.operator, dag_id="dag_with_param", conf={"value": value})
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
mock_supervisor_comms.send.assert_any_call(
SucceedTask(state=TaskInstanceState.SUCCESS, end_date=instant, task_outlets=[], outlet_events=[]),
)
| TestDagParamRuntime |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_key_details.py | {
"start": 281,
"end": 13134
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(is_superuser=False)
self.superuser = self.create_user(is_superuser=True)
def test_simple(self) -> None:
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"name": "hello world"})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.label == "hello world"
def test_no_rate_limit(self) -> None:
project = self.create_project()
key = ProjectKey.objects.create(project=project, rate_limit_window=60, rate_limit_count=1)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"rateLimit": None})
assert response.status_code == 200, response.content
key = ProjectKey.objects.get(id=key.id)
assert key.rate_limit_count is None
assert key.rate_limit_window is None
def test_unset_rate_limit(self) -> None:
project = self.create_project()
key = ProjectKey.objects.create(project=project, rate_limit_window=60, rate_limit_count=1)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url)
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.rate_limit_count == 1
assert key.rate_limit_window == 60
def test_remove_rate_limit(self) -> None:
project = self.create_project()
key = ProjectKey.objects.create(project=project, rate_limit_window=60, rate_limit_count=1)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"rateLimit": {"count": "", "window": 300}})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.rate_limit_count is None
assert key.rate_limit_window is None
def test_simple_rate_limit(self) -> None:
project = self.create_project()
key = ProjectKey.objects.create(
project=project, rate_limit_window=None, rate_limit_count=None
)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"rateLimit": {"count": 1, "window": 60}})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.rate_limit_count == 1
assert key.rate_limit_window == 60
@patch("sentry.api.base.create_audit_entry")
def test_rate_limit_change_data(self, mock_create_audit_entry: MagicMock) -> None:
project = self.create_project()
key = ProjectKey.objects.create(
project=project, rate_limit_window=None, rate_limit_count=None
)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"rateLimit": {"count": 1, "window": 60}})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.rate_limit_count == 1
assert key.rate_limit_window == 60
assert mock_create_audit_entry.call_args[-1]["data"]["prev_rate_limit_count"] is None
assert mock_create_audit_entry.call_args[-1]["data"]["prev_rate_limit_window"] is None
assert mock_create_audit_entry.call_args[-1]["data"]["rate_limit_count"] == 1
assert mock_create_audit_entry.call_args[-1]["data"]["rate_limit_window"] == 60
def test_deactivate(self) -> None:
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"isActive": False, "name": "hello world"})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.label == "hello world"
assert key.status == ProjectKeyStatus.INACTIVE
def test_default_browser_sdk_version(self) -> None:
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.data["browserSdkVersion"] == get_default_sdk_version_for_project(project)
def test_set_browser_sdk_version(self) -> None:
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"browserSdkVersion": "5.x"})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.data["browserSdkVersion"] == "5.x"
def test_default_dynamic_sdk_loader_options(self) -> None:
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {})
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert "dynamicSdkLoaderOptions" in key.data
assert key.data["dynamicSdkLoaderOptions"] == {
"hasPerformance": True,
"hasReplay": True,
}
def test_dynamic_sdk_loader_options(self) -> None:
project = self.create_project()
key = ProjectKey.objects.get_or_create(project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(
url,
{"dynamicSdkLoaderOptions": {}},
)
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert "dynamicSdkLoaderOptions" in key.data
assert key.data["dynamicSdkLoaderOptions"] == {
"hasPerformance": True,
"hasReplay": True,
}
response = self.client.put(
url,
{"dynamicSdkLoaderOptions": {"hasReplay": False, "hasDebug": True}},
)
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.data.get("dynamicSdkLoaderOptions") == {
"hasReplay": False,
"hasPerformance": True,
"hasDebug": True,
}
response = self.client.put(
url,
{
"dynamicSdkLoaderOptions": {
"hasReplay": False,
"hasPerformance": True,
}
},
)
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.data.get("dynamicSdkLoaderOptions") == {
"hasReplay": False,
"hasPerformance": True,
"hasDebug": True,
}
response = self.client.put(
url,
{"dynamicSdkLoaderOptions": {"hasDebug": False, "invalid-key": "blah"}},
)
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.data.get("dynamicSdkLoaderOptions") == {
"hasReplay": False,
"hasPerformance": True,
"hasDebug": False,
}
response = self.client.put(
url,
{
"dynamicSdkLoaderOptions": {
"hasReplay": "invalid",
}
},
)
assert response.status_code == 400
key = ProjectKey.objects.get(id=key.id)
assert key.data.get("dynamicSdkLoaderOptions") == {
"hasReplay": False,
"hasPerformance": True,
"hasDebug": False,
}
response = self.client.put(
url,
{
"dynamicSdkLoaderOptions": {
"invalid-key": "blah",
}
},
)
assert response.status_code == 200
key = ProjectKey.objects.get(id=key.id)
assert key.data.get("dynamicSdkLoaderOptions") == {
"hasReplay": False,
"hasPerformance": True,
"hasDebug": False,
}
def test_use_case(self) -> None:
"""Regular user cannot update an internal DSN"""
project = self.create_project()
key = ProjectKey.objects.get_or_create(use_case=UseCase.PROFILING.value, project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
response = self.client.put(url, {"name": "hello world"})
assert response.status_code == 404
# Superuser can update
self.login_as(user=self.superuser, superuser=True)
response = self.client.put(url, {"name": "hello world"})
assert response.status_code == 200
def test_cannot_upgrade_to_internal(self) -> None:
"""PUT request ignores use case field"""
project = self.create_project()
key = ProjectKey.objects.get_or_create(use_case=UseCase.USER.value, project=project)[0]
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-key-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"key_id": key.public_key,
},
)
self.client.put(url, {"useCase": "profiling", "name": "updated"})
updated = ProjectKey.objects.get(pk=key.id)
assert updated.label == "updated"
assert updated.use_case == UseCase.USER.value
| UpdateProjectKeyTest |
python | pytorch__pytorch | test/distributed/test_c10d_functional_native.py | {
"start": 23409,
"end": 25133
} | class ____(TestCase):
def setUp(self):
super().setUp()
if not dist.is_initialized():
self.rank = 0
self.world_size = 2
store = FakeStore()
dist.init_process_group(
backend="fake",
world_size=self.world_size,
rank=self.rank,
store=store,
)
def tearDown(self):
dist.destroy_process_group()
@fresh_cache()
def _test_inductor_all_reduce_cpu(self, cpp_wrapper=False):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
ar0 = funcol.all_reduce(buf0, "avg", "0")
ar0 = funcol.wait_tensor(ar0)
return ar0
arg = torch.rand(4, 4, device="cpu")
with torch._inductor.config.patch({"cpp_wrapper": cpp_wrapper}):
compiled = torch.compile(func)
_, (code,) = run_and_get_code(compiled, arg)
include_ops = (
[
"aoti_torch_cpu__c10d_functional_all_reduce_",
"aoti_torch_cpu__c10d_functional_wait_tensor",
]
if cpp_wrapper
else [
"torch.ops._c10d_functional.all_reduce_.default",
"torch.ops._c10d_functional.wait_tensor.default",
]
)
for op in include_ops:
self.assertIn(op, code)
# Test aoti
AOTIRunnerUtil.run(func, (arg,))
torch.cpu.synchronize()
def test_inductor_all_reduce_cpu(self):
self._test_inductor_all_reduce_cpu(cpp_wrapper=False)
self._test_inductor_all_reduce_cpu(cpp_wrapper=True)
| CompileTestCPU |
python | wandb__wandb | wandb/vendor/pygments/lexers/data.py | {
"start": 535,
"end": 852
} | class ____(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
| YamlLexerContext |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 50149,
"end": 51339
} | class ____(sgqlc.types.Enum):
"""The permissions available for repository creation on an
Organization.
Enumeration Choices:
* `ALL`: All organization members are restricted from creating any
repositories.
* `INTERNAL`: All organization members are restricted from
creating internal repositories.
* `NONE`: All organization members are allowed to create any
repositories.
* `PRIVATE`: All organization members are restricted from creating
private repositories.
* `PRIVATE_INTERNAL`: All organization members are restricted from
creating private or internal repositories.
* `PUBLIC`: All organization members are restricted from creating
public repositories.
* `PUBLIC_INTERNAL`: All organization members are restricted from
creating public or internal repositories.
* `PUBLIC_PRIVATE`: All organization members are restricted from
creating public or private repositories.
"""
__schema__ = github_schema
__choices__ = ("ALL", "INTERNAL", "NONE", "PRIVATE", "PRIVATE_INTERNAL", "PUBLIC", "PUBLIC_INTERNAL", "PUBLIC_PRIVATE")
| OrgUpdateMemberRepositoryCreationPermissionAuditEntryVisibility |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/depends_on_develop/package.py | {
"start": 218,
"end": 451
} | class ____(Package):
homepage = "example.com"
url = "fake.com"
version("main", branch="main")
version("0.0.0", sha256="0123456789abcdef0123456789abcdef")
depends_on("develop-branch-version@develop")
| DependsOnDevelop |
python | doocs__leetcode | solution/1600-1699/1669.Merge In Between Linked Lists/Solution.py | {
"start": 151,
"end": 534
} | class ____:
def mergeInBetween(
self, list1: ListNode, a: int, b: int, list2: ListNode
) -> ListNode:
p = q = list1
for _ in range(a - 1):
p = p.next
for _ in range(b):
q = q.next
p.next = list2
while p.next:
p = p.next
p.next = q.next
q.next = None
return list1
| Solution |
python | scrapy__scrapy | tests/test_exporters.py | {
"start": 22084,
"end": 23103
} | class ____:
item_class: type = MyItem
def setup_method(self):
if self.item_class is None:
pytest.skip("item class is None")
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == "age":
return str(int(value) + 1)
return super().serialize_field(field, name, value)
def export_item(self, item: Any) -> None:
pass
i = self.item_class(name="John", age="22")
a = ItemAdapter(i)
ie = CustomItemExporter()
assert ie.serialize_field(a.get_field_meta("name"), "name", a["name"]) == "John"
assert ie.serialize_field(a.get_field_meta("age"), "age", a["age"]) == "23"
i2 = {"name": "John", "age": "22"}
assert ie.serialize_field({}, "name", i2["name"]) == "John"
assert ie.serialize_field({}, "age", i2["age"]) == "23"
| TestCustomExporterItem |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/intltool.py | {
"start": 3653,
"end": 3738
} | class ____(Task.Task):
run_str = '${MSGFMT} -o ${TGT} ${SRC}'
color = 'BLUE'
| po |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 463444,
"end": 464689
} | class ____(Response):
"""
Response of frames.get_source_ids_for_dataview endpoint.
:param source_ids: Unique source ids for the dataset version
:type source_ids: Sequence[str]
"""
_service = "frames"
_action = "get_source_ids_for_dataview"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"source_ids": {
"description": "Unique source ids for the dataset version",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, source_ids=None, **kwargs):
super(GetSourceIdsForDataviewResponse, self).__init__(**kwargs)
self.source_ids = source_ids
@schema_property("source_ids")
def source_ids(self):
return self._property_source_ids
@source_ids.setter
def source_ids(self, value):
if value is None:
self._property_source_ids = None
return
self.assert_isinstance(value, "source_ids", (list, tuple))
self.assert_isinstance(value, "source_ids", six.string_types, is_array=True)
self._property_source_ids = value
| GetSourceIdsForDataviewResponse |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 21406,
"end": 21789
} | class ____(LocalizableStreamlitException):
"""Exception raised when an invalid size value is provided."""
def __init__(self, size: Any) -> None:
super().__init__(
"Invalid size value: {size}. Size must be either an integer (pixels), "
"'stretch', 'small', 'medium', or 'large'.",
size=repr(size),
)
| StreamlitInvalidSizeError |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops.py | {
"start": 26984,
"end": 32765
} | class ____(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
@compatibility(TF2)
Although it is a legacy `compat.v1` API, this symbol is compatible with eager
execution and `tf.function`.
To switch to TF2 APIs, move to using either
`tf.initializers.variance_scaling` or `tf.keras.initializers.VarianceScaling`
(neither from `compat.v1`) and
pass the dtype when calling the initializer.
#### Structural Mapping to TF2
Before:
```python
initializer = tf.compat.v1.variance_scaling_initializer(
scale=scale,
mode=mode,
distribution=distribution
seed=seed,
dtype=dtype)
weight_one = tf.Variable(initializer(shape_one))
weight_two = tf.Variable(initializer(shape_two))
```
After:
```python
initializer = tf.keras.initializers.VarianceScaling(
scale=scale,
mode=mode,
distribution=distribution
seed=seed)
weight_one = tf.Variable(initializer(shape_one, dtype=dtype))
weight_two = tf.Variable(initializer(shape_two, dtype=dtype))
```
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :----------------- | :-------------- | :------------------------- |
| `scale` | `scale` | No change to defaults |
| `mode` | `mode` | No change to defaults |
| `distribution` | `distribution` | No change to defaults. |
: : : 'normal' maps to 'truncated_normal' :
| `seed` | `seed` | |
| `dtype` | `dtype` | The TF2 api only takes it |
: : : as a `__call__` arg, not a constructor arg. :
| `partition_info` | - | (`__call__` arg in TF1) Not supported |
@end_compatibility
With `distribution="truncated_normal" or "untruncated_normal"`,
samples are drawn from a truncated/untruncated normal
distribution with a mean of zero and a standard deviation (after truncation,
if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
@deprecated_args(None,
"Call initializer instance with the dtype argument instead "
"of passing it to the constructor", "dtype")
@deprecated_arg_values(
None,
"`normal` is a deprecated alias for `truncated_normal`",
distribution="normal")
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("Argument `scale` must be a positive float. Received: "
f"{scale}")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Argument `mode` should be one of ('fan_in', 'fan_out', "
f"'fan_avg'). Received: {mode}")
distribution = distribution.lower()
if distribution not in {
"normal", "uniform", "truncated_normal", "untruncated_normal"
}:
raise ValueError("Argument `distribution` should be one of ('normal', "
"uniform', 'truncated_normal', 'untruncated_normal'). "
f"Received: {distribution}")
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal" or self.distribution == "truncated_normal":
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return random_ops.truncated_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(
shape, -limit, limit, dtype, seed=self.seed)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export(v1=["initializers.orthogonal", "orthogonal_initializer"])
@deprecation.deprecated_endpoints("initializers.orthogonal",
"orthogonal_initializer")
| VarianceScaling |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 15151,
"end": 15495
} | class ____(AnalysisPass):
"""
Preserves the IR in the metadata
"""
_name = "preserve_ir"
def __init__(self):
AnalysisPass.__init__(self)
def run_pass(self, state):
state.metadata['preserved_ir'] = state.func_ir.copy()
return False
@register_pass(mutates_CFG=False, analysis_only=True)
| PreserveIR |
python | PyCQA__pylint | tests/functional/m/misplaced_bare_raise.py | {
"start": 1433,
"end": 1771
} | class ____:
def __enter__(self):
return self
def __exit__(self, *args):
raise
def test_dont_trigger_in_finally_clause_found_in_exception_handler():
try:
raise ValueError('bad value')
except ValueError:
try:
raise IOError('failed')
finally:
raise
| ContextManager |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/character.py | {
"start": 2737,
"end": 25226
} | class ____(TextSplitter):
"""Splitting text by recursively look at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: list[str] | None = None,
keep_separator: bool | Literal["start", "end"] = True, # noqa: FBT001,FBT002
is_separator_regex: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
self._is_separator_regex = is_separator_regex
def _split_text(self, text: str, separators: list[str]) -> list[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
separator_ = _s if self._is_separator_regex else re.escape(_s)
if not _s:
separator = _s
break
if re.search(separator_, text):
separator = _s
new_separators = separators[i + 1 :]
break
separator_ = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex(
text, separator_, keep_separator=self._keep_separator
)
# Now go merging things, recursively splitting longer texts.
good_splits = []
separator_ = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
good_splits.append(s)
else:
if good_splits:
merged_text = self._merge_splits(good_splits, separator_)
final_chunks.extend(merged_text)
good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if good_splits:
merged_text = self._merge_splits(good_splits, separator_)
final_chunks.extend(merged_text)
return final_chunks
def split_text(self, text: str) -> list[str]:
"""Split the input text into smaller chunks based on predefined separators.
Args:
text: The input text to be split.
Returns:
A list of text chunks obtained after splitting.
"""
return self._split_text(text, self._separators)
@classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
"""Return an instance of this class based on a specific language.
This method initializes the text splitter with language-specific separators.
Args:
language: The language to configure the text splitter for.
**kwargs: Additional keyword arguments to customize the splitter.
Returns:
An instance of the text splitter configured for the specified language.
"""
separators = cls.get_separators_for_language(language)
return cls(separators=separators, is_separator_regex=True, **kwargs)
@staticmethod
def get_separators_for_language(language: Language) -> list[str]:
"""Retrieve a list of separators specific to the given language.
Args:
language: The language for which to get the separators.
Returns:
A list of separators appropriate for the specified language.
"""
if language in {Language.C, Language.CPP}:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.KOTLIN:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\ninternal ",
"\ncompanion ",
"\nfun ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nwhen ",
"\ncase ",
"\nelse ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.TS:
return [
"\nenum ",
"\ninterface ",
"\nnamespace ",
"\ntype ",
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.ELIXIR:
return [
# Split along method function and module definition
"\ndef ",
"\ndefp ",
"\ndefmodule ",
"\ndefprotocol ",
"\ndefmacro ",
"\ndefmacrop ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\ncase ",
"\ncond ",
"\nwith ",
"\nfor ",
"\ndo ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.SCALA:
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\\*\\*\\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
if language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\\chapter{",
"\n\\\\section{",
"\n\\\\subsection{",
"\n\\\\subsubsection{",
# Now split by environments
"\n\\\\begin{enumerate}",
"\n\\\\begin{itemize}",
"\n\\\\begin{description}",
"\n\\\\begin{list}",
"\n\\\\begin{quote}",
"\n\\\\begin{quotation}",
"\n\\\\begin{verse}",
"\n\\\\begin{verbatim}",
# Now split by math environments
"\n\\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
if language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
if language == Language.CSHARP:
return [
"\ninterface ",
"\nenum ",
"\nimplements ",
"\ndelegate ",
"\nevent ",
# Split along class definitions
"\nclass ",
"\nabstract ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
"\nreturn ",
# Split along control flow statements
"\nif ",
"\ncontinue ",
"\nfor ",
"\nforeach ",
"\nwhile ",
"\nswitch ",
"\nbreak ",
"\ncase ",
"\nelse ",
# Split by exceptions
"\ntry ",
"\nthrow ",
"\nfinally ",
"\ncatch ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.SOL:
return [
# Split along compiler information definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitions
"\nconstructor ",
"\ntype ",
"\nfunction ",
"\nevent ",
"\nmodifier ",
"\nerror ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo while ",
"\nassembly ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.COBOL:
return [
# Split along divisions
"\nIDENTIFICATION DIVISION.",
"\nENVIRONMENT DIVISION.",
"\nDATA DIVISION.",
"\nPROCEDURE DIVISION.",
# Split along sections within DATA DIVISION
"\nWORKING-STORAGE SECTION.",
"\nLINKAGE SECTION.",
"\nFILE SECTION.",
# Split along sections within PROCEDURE DIVISION
"\nINPUT-OUTPUT SECTION.",
# Split along paragraphs and common statements
"\nOPEN ",
"\nCLOSE ",
"\nREAD ",
"\nWRITE ",
"\nIF ",
"\nELSE ",
"\nMOVE ",
"\nPERFORM ",
"\nUNTIL ",
"\nVARYING ",
"\nACCEPT ",
"\nDISPLAY ",
"\nSTOP RUN.",
# Split by the normal type of lines
"\n",
" ",
"",
]
if language == Language.LUA:
return [
# Split along variable and table definitions
"\nlocal ",
# Split along function definitions
"\nfunction ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nrepeat ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.HASKELL:
return [
# Split along function definitions
"\nmain :: ",
"\nmain = ",
"\nlet ",
"\nin ",
"\ndo ",
"\nwhere ",
"\n:: ",
"\n= ",
# Split along type declarations
"\ndata ",
"\nnewtype ",
"\ntype ",
"\n:: ",
# Split along module declarations
"\nmodule ",
# Split along import statements
"\nimport ",
"\nqualified ",
"\nimport qualified ",
# Split along typeclass declarations
"\nclass ",
"\ninstance ",
# Split along case expressions
"\ncase ",
# Split along guards in function definitions
"\n| ",
# Split along record field declarations
"\ndata ",
"\n= {",
"\n, ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
if language == Language.POWERSHELL:
return [
# Split along function definitions
"\nfunction ",
# Split along parameter declarations (escape parentheses)
"\nparam ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nfor ",
"\nwhile ",
"\nswitch ",
# Split along class definitions (for PowerShell 5.0 and above)
"\nclass ",
# Split along try-catch-finally blocks
"\ntry ",
"\ncatch ",
"\nfinally ",
# Split by normal lines and empty spaces
"\n\n",
"\n",
" ",
"",
]
if language == Language.VISUALBASIC6:
vis = r"(?:Public|Private|Friend|Global|Static)\s+"
return [
# Split along definitions
rf"\n(?!End\s){vis}?Sub\s+",
rf"\n(?!End\s){vis}?Function\s+",
rf"\n(?!End\s){vis}?Property\s+(?:Get|Let|Set)\s+",
rf"\n(?!End\s){vis}?Type\s+",
rf"\n(?!End\s){vis}?Enum\s+",
# Split along control flow statements
r"\n(?!End\s)If\s+",
r"\nElseIf\s+",
r"\nElse\s+",
r"\nSelect\s+Case\s+",
r"\nCase\s+",
r"\nFor\s+",
r"\nDo\s+",
r"\nWhile\s+",
r"\nWith\s+",
# Split by the normal type of lines
r"\n\n",
r"\n",
" ",
"",
]
if language in Language._value2member_map_:
msg = f"Language {language} is not implemented yet!"
raise ValueError(msg)
msg = (
f"Language {language} is not supported! Please choose from {list(Language)}"
)
raise ValueError(msg)
| RecursiveCharacterTextSplitter |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_types.py | {
"start": 12961,
"end": 14067
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
def test_default(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_truncate_microseconds(self):
dt = datetime.time(12, 0, 0, 125)
dt_out = datetime.time(12, 0, 0)
eq_(str(dt), "12:00:00.000125")
sldt = sqlite.TIME(truncate_microseconds=True)
bp = sldt.bind_processor(None)
eq_(bp(dt), "12:00:00")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt_out)
def test_custom_format(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE(
storage_format="%(year)04d%(month)02d%(day)02d",
regexp=r"(\d{4})(\d{2})(\d{2})",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "20080627")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
| TimeTest |
python | sphinx-doc__sphinx | doc/development/tutorials/examples/helloworld.py | {
"start": 214,
"end": 433
} | class ____(SphinxRole):
"""A role to say hello!"""
def run(self) -> tuple[list[nodes.Node], list[nodes.system_message]]:
node = nodes.inline(text=f'Hello {self.text}!')
return [node], []
| HelloRole |
python | celery__celery | t/unit/tasks/test_chord.py | {
"start": 1138,
"end": 1492
} | class ____(TSR):
def _failed_join_report(self):
return iter([])
@contextmanager
def patch_unlock_retry(app):
unlock = app.tasks['celery.chord_unlock']
retry = Mock()
retry.return_value = Retry()
prev, unlock.retry = unlock.retry, retry
try:
yield unlock, retry
finally:
unlock.retry = prev
| TSRNoReport |
python | pytorch__pytorch | test/jit/fixtures_srcs/fixtures_src.py | {
"start": 1607,
"end": 1753
} | class ____(torch.nn.Module):
def forward(self, x):
out = torch.zeros_like(x)
return out.random(0, 10)
| TestVersionedRandomFuncV10 |
python | django__django | tests/migrations/migrations_test_apps/lookuperror_b/migrations/0001_initial.py | {
"start": 43,
"end": 525
} | class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="B1",
fields=[
(
"id",
models.AutoField(
serialize=False,
auto_created=True,
primary_key=True,
verbose_name="ID",
),
),
],
),
]
| Migration |
python | pytorch__pytorch | test/dynamo/test_fx_annotate.py | {
"start": 558,
"end": 13944
} | class ____(torch._dynamo.test_case.TestCase):
def test_annotations(self):
class Mod(torch.nn.Module):
def forward(self, x):
with fx_traceback.annotate({"pp_stage": 0}):
with fx_traceback.annotate({"fdsp_bucket": 0}):
sin = torch.sin(x)
sub = sin - 2
with fx_traceback.annotate({"cuda_stream": 2, "fsdp_bucket": 1}):
mul = sub * 2
div = mul / 3
return div
m = Mod()
backend = AotEagerAndRecordGraphs()
opt_m = torch.compile(m, backend=backend, fullgraph=True)
x = torch.randn(10, requires_grad=True)
opt_m(x).sum().backward()
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
dynamo_metadata = fx_traceback._get_custom_metadata(backend.graphs[0])
fw_metadata = fx_traceback._get_custom_metadata(backend.fw_graphs[0])
bw_metadata = fx_traceback._get_custom_metadata(backend.bw_graphs[0])
self.assertExpectedInline(
str(dynamo_metadata),
"""\
('placeholder', 'l_x_', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'sin', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'sub', {'pp_stage': 0})
('call_function', 'mul', {'pp_stage': 0, 'cuda_stream': 2, 'fsdp_bucket': 1})""", # noqa: B950
)
self.assertExpectedInline(
str(fw_metadata),
"""\
('call_function', 'sin', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'sub', {'pp_stage': 0})
('call_function', 'mul', {'pp_stage': 0, 'cuda_stream': 2, 'fsdp_bucket': 1})""", # noqa: B950
)
self.assertExpectedInline(
str(bw_metadata),
"""\
('call_function', 'mul_1', {'pp_stage': 0, 'cuda_stream': 2, 'fsdp_bucket': 1})
('call_function', 'cos', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'mul_2', {'pp_stage': 0, 'fdsp_bucket': 0})""", # noqa: B950
)
def test_activation_checkpointing(self):
@checkpoint_wrapper
def gn(x):
return torch.sin(x)
def fn(x):
with fx_traceback.annotate({"ac_sin": 0}):
ac = gn(x)
return torch.sigmoid(ac)
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(10, requires_grad=True)
opt_fn(x).sum().backward()
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
dynamo_metadata = fx_traceback._get_custom_metadata(backend.graphs[0])
fw_metadata = fx_traceback._get_custom_metadata(backend.fw_graphs[0])
bw_metadata = fx_traceback._get_custom_metadata(backend.bw_graphs[0])
self.assertExpectedInline(
str(dynamo_metadata),
"""\
('placeholder', 'l_x_', {'ac_sin': 0})
('get_attr', 'wrap_body_0', {'ac_sin': 0})
[('placeholder', 'l_x_', {'ac_sin': 0}), ('call_function', 'sin', {'ac_sin': 0}), ('output', 'output', {'ac_sin': 0})]
('call_function', 'tag_activation_checkpoint', {'ac_sin': 0})
('call_function', 'ac', {'ac_sin': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(fw_metadata),
"""('call_function', 'sin', {'ac_sin': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(bw_metadata),
"""\
('call_function', 'cos', {'ac_sin': 0})
('call_function', 'mul', {'ac_sin': 0})""", # noqa: B950
)
def test_activation_checkpointing_annotation_inside(self):
@checkpoint_wrapper
def gn(x):
x = x + 1
with fx_traceback.annotate({"stage": 0}):
p = torch.sin(x)
return p + 1
def fn(x):
ac = gn(x)
return torch.sigmoid(ac)
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(10, requires_grad=True)
opt_fn(x).sum().backward()
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
dynamo_metadata = fx_traceback._get_custom_metadata(backend.graphs[0])
fw_metadata = fx_traceback._get_custom_metadata(backend.fw_graphs[0])
bw_metadata = fx_traceback._get_custom_metadata(backend.bw_graphs[0])
self.assertExpectedInline(
str(dynamo_metadata),
"""[('call_function', 'p', {'stage': 0})]""", # noqa: B950
)
self.assertExpectedInline(
str(fw_metadata),
"""('call_function', 'sin', {'stage': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(bw_metadata),
"""\
('call_function', 'cos', {'stage': 0})
('call_function', 'mul', {'stage': 0})""", # noqa: B950
)
@requires_cuda_and_triton
def test_ac_flex_attention(self):
def _squared(score, b, h, m, n):
return score * score
def mask_mod(b, h, q, k):
return q >= 0
a = 12
b = 64
block_mask = create_block_mask(mask_mod, None, None, a * b, a * b)
def gn(x: torch.Tensor):
with fx_traceback.annotate({"compile_inductor": 0}):
return flex_attention(
x, x, x, block_mask=block_mask, score_mod=_squared
)
def fn(x):
x = torch.sin(x)
x = gn(x)
return torch.cos(x)
x = torch.randn(
1,
1,
a * b,
b,
dtype=torch.bfloat16,
device="cuda",
requires_grad=True,
)
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
opt_fn(x).sum().backward()
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
dynamo_metadata = fx_traceback._get_custom_metadata(backend.graphs[0])
fw_metadata = fx_traceback._get_custom_metadata(backend.fw_graphs[0])
bw_metadata = fx_traceback._get_custom_metadata(backend.bw_graphs[0])
self.assertExpectedInline(
str(dynamo_metadata),
"""\
('placeholder', 'l_gn_closure_1_cell_contents_kv_indices', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_kv_num_blocks', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_full_kv_num_blocks', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_full_kv_indices', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_q_num_blocks', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_q_indices', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_full_q_num_blocks', {'compile_inductor': 0})
('placeholder', 'l_gn_closure_1_cell_contents_full_q_indices', {'compile_inductor': 0})
('get_attr', 'score_mod_0', {'compile_inductor': 0})
[('placeholder', 'child', {'compile_inductor': 0}), ('placeholder', 'child_1', {'compile_inductor': 0}), ('placeholder', 'child_2', {'compile_inductor': 0}), ('placeholder', 'child_3', {'compile_inductor': 0}), ('placeholder', 'child_4', {'compile_inductor': 0}), ('call_function', 'mul', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('get_attr', 'mask_fn_0', {'compile_inductor': 0})
[('placeholder', 'child', {'compile_inductor': 0}), ('placeholder', 'child_1', {'compile_inductor': 0}), ('placeholder', 'child_2', {'compile_inductor': 0}), ('placeholder', 'child_3', {'compile_inductor': 0}), ('call_function', 'ge', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('call_function', 'flex_attention', {'compile_inductor': 0})
('call_function', 'out', {'compile_inductor': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(fw_metadata),
"""\
('get_attr', 'sdpa_score0', {'compile_inductor': 0})
[('placeholder', 'arg0_1', {'compile_inductor': 0}), ('placeholder', 'arg1_1', {'compile_inductor': 0}), ('placeholder', 'arg2_1', {'compile_inductor': 0}), ('placeholder', 'arg3_1', {'compile_inductor': 0}), ('placeholder', 'arg4_1', {'compile_inductor': 0}), ('call_function', 'mul', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('get_attr', 'sdpa_mask0', {'compile_inductor': 0})
[('placeholder', 'arg0_1', {'compile_inductor': 0}), ('placeholder', 'arg1_1', {'compile_inductor': 0}), ('placeholder', 'arg2_1', {'compile_inductor': 0}), ('placeholder', 'arg3_1', {'compile_inductor': 0}), ('call_function', 'ge', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('call_function', 'flex_attention', {'compile_inductor': 0})
('call_function', 'getitem', {'compile_inductor': 0})
('call_function', 'getitem_1', {'compile_inductor': 0})
('call_function', 'detach_1', {'compile_inductor': 0})
('call_function', 'detach_3', {'compile_inductor': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(bw_metadata),
"""\
('placeholder', 'getitem', {'compile_inductor': 0})
('placeholder', 'detach_3', {'compile_inductor': 0})
('call_function', 'zeros', {'compile_inductor': 0})
('call_function', 'detach', {'compile_inductor': 0})
('call_function', 'detach_2', {'compile_inductor': 0})
('get_attr', 'fw_graph0', {'compile_inductor': 0})
[('placeholder', 'arg0_1', {'compile_inductor': 0}), ('placeholder', 'arg1_1', {'compile_inductor': 0}), ('placeholder', 'arg2_1', {'compile_inductor': 0}), ('placeholder', 'arg3_1', {'compile_inductor': 0}), ('placeholder', 'arg4_1', {'compile_inductor': 0}), ('call_function', 'mul', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('get_attr', 'joint_graph0', {'compile_inductor': 0})
[('placeholder', 'arg0_1', {'compile_inductor': 0}), ('placeholder', 'arg1_1', {'compile_inductor': 0}), ('placeholder', 'arg2_1', {'compile_inductor': 0}), ('placeholder', 'arg3_1', {'compile_inductor': 0}), ('placeholder', 'arg4_1', {'compile_inductor': 0}), ('placeholder', 'arg5_1', {'compile_inductor': 0}), ('call_function', 'mul_1', {'compile_inductor': 0}), ('call_function', 'mul_2', {'compile_inductor': 0}), ('call_function', 'add', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('get_attr', 'mask_graph0', {'compile_inductor': 0})
[('placeholder', 'arg0_1', {'compile_inductor': 0}), ('placeholder', 'arg1_1', {'compile_inductor': 0}), ('placeholder', 'arg2_1', {'compile_inductor': 0}), ('placeholder', 'arg3_1', {'compile_inductor': 0}), ('call_function', 'ge', {'compile_inductor': 0}), ('output', 'output', {'compile_inductor': 0})]
('call_function', 'flex_attention_backward', {'compile_inductor': 0})
('call_function', 'getitem_3', {'compile_inductor': 0})
('call_function', 'getitem_4', {'compile_inductor': 0})
('call_function', 'getitem_5', {'compile_inductor': 0})""", # noqa: B950
)
def test_as_decorator(self):
class Mod(torch.nn.Module):
@fx_traceback.annotate({"fdsp_bucket": 0})
def sin(self, x):
return torch.sin(x)
def forward(self, x):
with fx_traceback.annotate({"pp_stage": 0}):
sin = self.sin(x)
sub = sin - 2
mul = sub * 2
div = mul / 3
return div
m = Mod()
backend = AotEagerAndRecordGraphs()
opt_m = torch.compile(m, backend=backend, fullgraph=True)
x = torch.randn(10, requires_grad=True)
m(x)
opt_m(x).sum().backward()
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
dynamo_metadata = fx_traceback._get_custom_metadata(backend.graphs[0])
fw_metadata = fx_traceback._get_custom_metadata(backend.fw_graphs[0])
bw_metadata = fx_traceback._get_custom_metadata(backend.bw_graphs[0])
self.assertExpectedInline(
str(dynamo_metadata),
"""\
('placeholder', 'l_x_', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'sin', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'sub', {'pp_stage': 0})
('call_function', 'mul', {'pp_stage': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(fw_metadata),
"""\
('call_function', 'sin', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'sub', {'pp_stage': 0})
('call_function', 'mul', {'pp_stage': 0})""", # noqa: B950
)
self.assertExpectedInline(
str(bw_metadata),
"""\
('call_function', 'mul_1', {'pp_stage': 0})
('call_function', 'cos', {'pp_stage': 0, 'fdsp_bucket': 0})
('call_function', 'mul_2', {'pp_stage': 0, 'fdsp_bucket': 0})""", # noqa: B950
)
def test_graph_break(self):
def fn(x):
with torch.fx.traceback.annotate({"pp_stage": 0}):
x = torch.sin(x)
torch._dynamo.graph_break()
x = torch.cos(x)
return x
opt_fn = torch.compile(fn, backend="eager")
x = torch.randn(10, requires_grad=True)
self.assertEqual(fn(x), opt_fn(x))
if __name__ == "__main__":
run_tests()
| AnnotateTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver44.py | {
"start": 189,
"end": 395
} | class ____[S]:
def __new__[T](cls, func: Callable[[T], S], iter1: Iterable[T]) -> Self: ...
def func(a: list[int | None]):
b = map(lambda x: x or 0, a)
reveal_type(b, expected_text="map[int]")
| map |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_application_fees.py | {
"start": 9805,
"end": 15540
} | class ____(TestCase):
@HttpMocker()
def test_given_no_state_when_read_then_use_application_fees_endpoint(self, http_mocker: HttpMocker) -> None:
cursor_value = int(_A_START_DATE.timestamp()) + 1
http_mocker.get(
_application_fees_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_application_fees_response().with_record(_an_application_fee().with_cursor(cursor_value)).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE), _NO_STATE)
most_recent_state = output.most_recent_state
assert most_recent_state.stream_descriptor == StreamDescriptor(name=_STREAM_NAME)
assert most_recent_state.stream_state.updated == str(cursor_value)
@HttpMocker()
def test_given_state_when_read_then_query_events_using_types_and_state_value_plus_1(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=40)
state_datetime = _NOW - timedelta(days=5)
cursor_value = int(state_datetime.timestamp())
http_mocker.get(
_events_request().with_created_gte(state_datetime).with_created_lte(_NOW).with_limit(100).with_types(_EVENT_TYPES).build(),
_events_response()
.with_record(_an_event().with_cursor(cursor_value).with_field(_DATA_FIELD, _an_application_fee().build()))
.build(),
)
output = self._read(
_config().with_start_date(start_date),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
most_recent_state = output.most_recent_state
assert most_recent_state.stream_descriptor == StreamDescriptor(name=_STREAM_NAME)
assert most_recent_state.stream_state.updated == str(cursor_value)
@HttpMocker()
def test_given_state_and_pagination_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
state_datetime = _NOW - timedelta(days=5)
http_mocker.get(
_events_request().with_created_gte(state_datetime).with_created_lte(_NOW).with_limit(100).with_types(_EVENT_TYPES).build(),
_events_response()
.with_pagination()
.with_record(_an_event().with_id("last_record_id_from_first_page").with_field(_DATA_FIELD, _an_application_fee().build()))
.build(),
)
http_mocker.get(
_events_request()
.with_starting_after("last_record_id_from_first_page")
.with_created_gte(state_datetime)
.with_created_lte(_NOW)
.with_limit(100)
.with_types(_EVENT_TYPES)
.build(),
_events_response().with_record(self._an_application_fee_event()).build(),
)
output = self._read(
_config(),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
assert len(output.records) == 2
@HttpMocker()
def test_given_state_and_small_slice_range_when_read_then_perform_multiple_queries(self, http_mocker: HttpMocker) -> None:
state_datetime = _NOW - timedelta(days=5)
slice_range = timedelta(days=3)
slice_datetime = state_datetime + slice_range
http_mocker.get(
_events_request()
.with_created_gte(state_datetime)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.with_types(_EVENT_TYPES)
.build(),
_events_response().with_record(self._an_application_fee_event()).build(),
)
http_mocker.get(
_events_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).with_types(_EVENT_TYPES).build(),
_events_response().with_record(self._an_application_fee_event()).with_record(self._an_application_fee_event()).build(),
)
output = self._read(
_config().with_start_date(_NOW - timedelta(days=30)).with_slice_range_in_days(slice_range.days),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
assert len(output.records) == 3
@HttpMocker()
def test_given_state_earlier_than_30_days_when_read_then_query_events_using_types_and_event_lower_boundary(
self, http_mocker: HttpMocker
) -> None:
# this seems odd as we would miss some data between start_date and events_lower_boundary. In that case, we should hit the
# application fees endpoint
start_date = _NOW - timedelta(days=40)
state_value = _NOW - timedelta(days=39)
events_lower_boundary = _NOW - timedelta(days=30)
http_mocker.get(
_events_request()
.with_created_gte(events_lower_boundary)
.with_created_lte(_NOW)
.with_limit(100)
.with_types(_EVENT_TYPES)
.build(),
_events_response().with_record(self._an_application_fee_event()).build(),
)
self._read(
_config().with_start_date(start_date),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_value.timestamp())}).build(),
)
# request matched http_mocker
def _an_application_fee_event(self) -> RecordBuilder:
return _an_event().with_field(_DATA_FIELD, _an_application_fee().build())
def _read(
self, config: ConfigBuilder, state: Optional[List[AirbyteStateMessage]], expecting_exception: bool = False
) -> EntrypointOutput:
return _read(config, SyncMode.incremental, state, expecting_exception)
| IncrementalTest |
python | walkccc__LeetCode | solutions/3552. Grid Teleportation Traversal/3552.py | {
"start": 0,
"end": 1607
} | class ____:
# Similar to 3341. Find Minimum Time to Reach Last Room I
def minMoves(self, matrix: list[str]) -> int:
if matrix[-1][-1] == '#':
return -1
teleportPositions = [[] for _ in range(26)]
for i, row in enumerate(matrix):
for j, c in enumerate(row):
if c not in ('.', '#'):
teleportPositions[ord(c) - ord('A')].append((i, j))
return self._dijkstra(matrix, teleportPositions,
(0, 0), (len(matrix) - 1, len(matrix[0]) - 1))
def _dijkstra(
self,
matrix: list[str],
teleportPositions: list[list[tuple[int, int]]],
src: tuple[int, int],
dst: tuple[int, int],
) -> int:
DIRS = [(0, 1), (1, 0), (0, -1), (-1, 0)]
m = len(matrix)
n = len(matrix[0])
dist = [[math.inf] * n for _ in range(m)]
seen = set()
dist[0][0] = 0
minHeap = [(dist[0][0], src)] # (d, u)
while minHeap:
d, u = heapq.heappop(minHeap)
if u == dst:
return d
i, j = u
if d > dist[i][j]:
continue
c = matrix[i][j]
if c.isupper() and c not in seen:
seen.add(c)
for x, y in teleportPositions[ord(c) - ord('A')]:
if d < dist[x][y]:
dist[x][y] = d
heapq.heappush(minHeap, (d, (x, y)))
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if matrix[x][y] == '#':
continue
if d + 1 < dist[x][y]:
dist[x][y] = d + 1
heapq.heappush(minHeap, (d + 1, (x, y)))
return -1
| Solution |
python | walkccc__LeetCode | solutions/2350. Shortest Impossible Sequence of Rolls/2350.py | {
"start": 0,
"end": 389
} | class ____:
def shortestSequence(self, rolls: list[int], k: int) -> int:
ans = 1 # the the next target length
seen = set()
for roll in rolls:
seen.add(roll)
if len(seen) == k:
# Have all combinations that form `ans` length, and we are going to
# extend the sequence to `ans + 1` length.
ans += 1
seen.clear()
return ans
| Solution |
python | ray-project__ray | python/ray/train/tests/test_predictor.py | {
"start": 2200,
"end": 10350
} | class ____(DummyPredictor):
def _predict_numpy(
self, data: Union[np.ndarray, Dict[str, np.ndarray]], **kwargs
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
return data * self.factor
@classmethod
def preferred_batch_format(cls):
return BatchFormat.NUMPY
def test_serialization():
"""Tests that Predictor instances are not serializable."""
# Class is serializable.
ray.put(DummyPredictor)
# Instance is not serializable.
predictor = DummyPredictor()
with pytest.raises(PredictorNotSerializableException):
ray.put(predictor)
def test_from_checkpoint():
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
assert DummyPredictor.from_checkpoint(checkpoint).factor == 2.0
def test_predict_pandas_with_pandas_data():
"""Data batch level predictor test where both input data and prediction
batch format are pandas dataframes.
"""
input = pd.DataFrame({"x": [1, 2, 3]})
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(DummyPreprocessor())
predictor = DummyPredictor.from_checkpoint(checkpoint)
actual_output = predictor.predict(input)
pd.testing.assert_frame_equal(
actual_output, pd.DataFrame({"predictions": [4.0, 8.0, 12.0]})
)
pd.testing.assert_frame_equal(
predictor.get_preprocessor().inputs[0],
pd.DataFrame({"x": [1, 2, 3]}),
)
pd.testing.assert_frame_equal(
predictor.get_preprocessor().outputs[0],
pd.DataFrame({"x": [2, 4, 6]}),
)
# Test predict with both Numpy and Pandas preprocessor available
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(DummyWithNumpyPreprocessor())
predictor = DummyPredictor.from_checkpoint(checkpoint)
actual_output = predictor.predict(input)
pd.testing.assert_frame_equal(
actual_output, pd.DataFrame({"predictions": [4.0, 8.0, 12.0]})
)
# This Preprocessor has Numpy as the batch format preference.
np.testing.assert_array_equal(
predictor.get_preprocessor().inputs[0], np.array([1, 2, 3])
)
np.testing.assert_array_equal(
predictor.get_preprocessor().outputs[0], np.array([2, 4, 6])
)
def test_predict_numpy_with_numpy_data():
"""Data batch level predictor test where both input data and prediction
batch format are numpy formats.
"""
input = np.array([1, 2, 3])
# Test predict with only Pandas preprocessor
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(DummyPreprocessor())
predictor = DummyPredictor.from_checkpoint(checkpoint)
actual_output = predictor.predict(input)
# Numpy is the preferred batch format for prediction.
# Multiply by 2 from preprocessor, another multiply by 2.0 from predictor
np.testing.assert_array_equal(actual_output, np.array([4.0, 8.0, 12.0]))
# Preprocessing is still done via Pandas path.
pd.testing.assert_frame_equal(
predictor.get_preprocessor().inputs[0],
pd.DataFrame({TENSOR_COLUMN_NAME: [1, 2, 3]}),
)
pd.testing.assert_frame_equal(
predictor.get_preprocessor().outputs[0],
pd.DataFrame({TENSOR_COLUMN_NAME: [2, 4, 6]}),
)
# Test predict with Numpy as preferred batch format for both Predictor and
# Preprocessor.
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(DummyWithNumpyPreprocessor())
predictor = DummyPredictor.from_checkpoint(checkpoint)
actual_output = predictor.predict(input)
np.testing.assert_equal(actual_output, np.array([4.0, 8.0, 12.0]))
np.testing.assert_equal(predictor.get_preprocessor().inputs[0], np.array([1, 2, 3]))
np.testing.assert_equal(
predictor.get_preprocessor().outputs[0], np.array([2, 4, 6])
)
def test_predict_pandas_with_numpy_data():
"""Data batch level predictor test where both input data is numpy format but
predictor only has _predict_pandas implementation.
"""
input = np.array([1, 2, 3])
# Test predict with only Pandas preprocessor
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(DummyPreprocessor())
predictor = DummyPredictor.from_checkpoint(checkpoint)
actual_output = predictor.predict(input)
# Predictor should return in the same format as the input.
# Multiply by 2 from preprocessor, another multiply by 2.0 from predictor
np.testing.assert_array_equal(actual_output, np.array([4.0, 8.0, 12.0]))
# Preprocessing should go through Pandas path.
pd.testing.assert_frame_equal(
predictor.get_preprocessor().inputs[0],
pd.DataFrame({TENSOR_COLUMN_NAME: [1, 2, 3]}),
)
pd.testing.assert_frame_equal(
predictor.get_preprocessor().outputs[0],
pd.DataFrame({TENSOR_COLUMN_NAME: [2, 4, 6]}),
)
# Test predict with both Numpy and Pandas preprocessor available
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(DummyWithNumpyPreprocessor())
predictor = DummyPredictor.from_checkpoint(checkpoint)
actual_output = predictor.predict(input)
np.testing.assert_equal(actual_output, np.array([4.0, 8.0, 12.0]))
# Preprocessor should go through Numpy path since it is the preferred batch type.
np.testing.assert_equal(predictor.get_preprocessor().inputs[0], np.array([1, 2, 3]))
np.testing.assert_equal(
predictor.get_preprocessor().outputs[0], np.array([2, 4, 6])
)
def test_from_udf():
def check_truth(df, all_true=False):
if all_true:
return pd.DataFrame({"bool": [True] * len(df)})
return pd.DataFrame({"bool": df["a"] == df["b"]})
predictor = Predictor.from_pandas_udf(check_truth)
df = pd.DataFrame({"a": [1, 2, 3], "b": [1, 5, 6]})
output = predictor.predict(df)
output = output["bool"].tolist()
assert output == [True, False, False]
output = predictor.predict(df, all_true=True)
output = output["bool"].tolist()
assert output == [True, True, True]
@mock.patch.object(DummyPredictor, "_predict_pandas", return_value=mock.DEFAULT)
def test_kwargs(predict_pandas_mock):
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
predictor = DummyPredictor.from_checkpoint(checkpoint)
input = pd.DataFrame({"x": [1, 2, 3]})
predictor.predict(input, extra_arg=1)
# Second element in call_args is the kwargs.
assert "extra_arg" in predict_pandas_mock.call_args[1]
assert predict_pandas_mock.call_args[1]["extra_arg"] == 1
def test_get_and_set_preprocessor():
"""Test preprocessor can be set and get."""
preprocessor = DummyPreprocessor(1)
with create_dict_checkpoint(
{"factor": 2.0}, checkpoint_cls=FrameworkCheckpoint
) as checkpoint:
checkpoint.set_preprocessor(preprocessor)
predictor = DummyPredictor.from_checkpoint(checkpoint)
assert predictor.get_preprocessor().id == preprocessor.id
test_dataset = pd.DataFrame(range(4))
output_df = predictor.predict(test_dataset)
assert output_df.to_numpy().squeeze().tolist() == [
0.0,
2.0,
4.0,
6.0,
]
preprocessor2 = DummyPreprocessor(2)
predictor.set_preprocessor(preprocessor2)
assert predictor.get_preprocessor().id == preprocessor2.id
output_df = predictor.predict(test_dataset)
assert output_df.to_numpy().squeeze().tolist() == [
0.0,
4.0,
8.0,
12.0,
]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| DummyWithNumpyPredictor |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 75557,
"end": 76940
} | class ____(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
| HTTPResponseTest |
python | realpython__materials | build-a-gui-with-wxpython/mp3_tag_editor.py | {
"start": 3517,
"end": 4504
} | class ____(wx.Frame):
def __init__(self):
super().__init__(parent=None, title="Mp3 Tag Editor")
self.panel = Mp3Panel(self)
self.create_menu()
self.Show()
def create_menu(self):
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
open_folder_menu_item = file_menu.Append(
wx.ID_ANY, "Open Folder", "Open a folder with MP3s"
)
menu_bar.Append(file_menu, "&File")
self.Bind(
event=wx.EVT_MENU,
handler=self.on_open_folder,
source=open_folder_menu_item,
)
self.SetMenuBar(menu_bar)
def on_open_folder(self, event):
title = "Choose a directory:"
dlg = wx.DirDialog(self, title, style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.panel.update_mp3_listing(dlg.GetPath())
dlg.Destroy()
if __name__ == "__main__":
app = wx.App(False)
frame = Mp3Frame()
app.MainLoop()
| Mp3Frame |
python | joke2k__faker | faker/providers/phone_number/ar_AE/__init__.py | {
"start": 49,
"end": 2580
} | class ____(PhoneNumberProvider):
# Source: https://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Arab_Emirates
cellphone_formats = (
"{{area_code}} {{cellphone_provider_code}} ### ####",
"{{area_code}}{{cellphone_provider_code}}#######",
"0{{cellphone_provider_code}} ### ####",
"0{{cellphone_provider_code}}#######",
)
telephone_formats = (
"{{area_code}} {{telephone_provider_code}} ### ####",
"{{area_code}}{{telephone_provider_code}}#######",
"0{{telephone_provider_code}} ### ####",
"0{{telephone_provider_code}}#######",
)
toll_formats = (
"200####",
"600######",
"800###",
"800####",
"800#####",
"800######",
"800#######",
)
services_phones_formats = (
"999",
"901",
"998",
"997",
"996",
"991",
"922",
)
formats = cellphone_formats + telephone_formats + services_phones_formats + toll_formats
def cellphone_provider_code(self) -> str:
return self.random_element(
[
"50",
"52",
"54",
"55",
"56",
"58",
]
)
def telephone_provider_code(self) -> str:
return self.random_element(
[
"1",
"2",
"3",
"4",
"6",
"7",
"9",
]
)
def area_code(self) -> str:
return self.random_element(
[
"00971",
"+971",
]
)
def cellphone_number(self) -> str:
pattern: str = self.random_element(self.cellphone_formats)
return self.numerify(self.generator.parse(pattern))
def telephone_number(self) -> str:
pattern: str = self.random_element(self.telephone_formats)
return self.numerify(self.generator.parse(pattern))
def service_phone_number(self) -> str:
pattern: str = self.random_element(self.services_phones_formats)
return self.numerify(self.generator.parse(pattern))
def toll_number(self) -> str:
pattern: str = self.random_element(self.toll_formats)
return self.numerify(self.generator.parse(pattern))
def phone_number(self) -> str:
pattern: str = self.random_element(self.formats)
return self.numerify(self.generator.parse(pattern))
| Provider |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py | {
"start": 64679,
"end": 69641
} | class ____(Phi3Model):
def __init__(self, config: Phi4MultimodalConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.embed_dropout = nn.Dropout(config.embd_pdrop)
self.embed_tokens_extend = Phi4MultimodalFeatureEmbedding(config)
self.layers = nn.ModuleList(
[Phi4MultimodalDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Phi4MultimodalRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_pixel_values: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
image_attention_mask=None,
audio_input_features: Optional[torch.FloatTensor] = None,
audio_embed_sizes=None,
audio_attention_mask=None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> BaseModelOutputWithPast:
r"""
image_pixel_values (`torch.FloatTensor`, *optional*):
If the input contains images, these correspond to the pixel values after transformations (as returned by
the Processor)
image_sizes (`torch.LongTensor`, *optional*):
If the input contains images, these correspond to size of each image.
image_attention_mask (`torch.LongTensor`, *optional*):
Attention mask for the images.
audio_input_features (`torch.FloatTensor`, *optional*):
If the input contains audio samples, these correspond to the values after transformation (as returned by
the Processor).
audio_embed_sizes (`torch.Tensor`, *optional*):
Size of the audio inputs.
audio_attention_mask (`torch.Tensor, *optional*):
Attention mask for the audio inputs.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
inputs_embeds = self.embed_tokens_extend(
input_ids,
inputs_embeds,
image_pixel_values=image_pixel_values,
audio_input_features=audio_input_features,
image_sizes=image_sizes,
image_attention_mask=image_attention_mask,
audio_embed_sizes=audio_embed_sizes,
audio_attention_mask=audio_attention_mask,
)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| Phi4MultimodalModel |
python | mlflow__mlflow | dev/clint/src/clint/linter.py | {
"start": 1413,
"end": 1664
} | class ____:
"""Represents a position in source code with line and column."""
line: int
column: int
def __add__(self, other: "Position") -> "Position":
return Position(self.line + other.line, self.column + other.column)
| Position |
python | keon__algorithms | tests/test_dp.py | {
"start": 3535,
"end": 3684
} | class ____(unittest.TestCase):
def test_house_robber(self):
self.assertEqual(44, house_robber([1, 2, 16, 3, 15, 3, 12, 1]))
| TestHouseRobber |
python | pikepdf__pikepdf | tests/test_object.py | {
"start": 23423,
"end": 24166
} | class ____:
def test_string_bool(self):
assert bool(String('')) is False
assert bool(String('abc')) is True
def test_get_resource_names(sandwich):
assert '/R12' in sandwich.pages[0].Resources._get_resource_names()
def test_get_unique_resource_names(sandwich):
name, suffix = sandwich.pages[0].Resources._get_unique_resource_name("/R", 12)
assert name == "/R13"
assert suffix == 13
sandwich.pages[0].Resources.XObject['/R13'] = Dictionary()
name, suffix = sandwich.pages[0].Resources._get_unique_resource_name("/R", 12)
assert name == "/R14"
assert suffix == 14
@pytest.fixture
def cyclic_toc(resources):
with Pdf.open(resources / 'cyclic-toc.pdf') as pdf:
yield pdf
| TestString |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 40356,
"end": 42432
} | class ____(NextRedirectMixin, FormView):
form_class = ConfirmLoginCodeForm
template_name = "account/confirm_login_code." + app_settings.TEMPLATE_EXTENSION
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
self.stage = request._login_stage
self._process = flows.login_by_code.LoginCodeVerificationProcess.resume(
self.stage
)
if not self._process:
return HttpResponseRedirect(reverse(_login_by_code_urlname()))
return super().dispatch(request, *args, **kwargs)
def get_form_class(self):
return get_form_class(app_settings.FORMS, "confirm_login_code", self.form_class)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["code"] = self._process.code
return kwargs
def form_valid(self, form):
redirect_url = self.get_next_url()
return self._process.finish(redirect_url)
def form_invalid(self, form):
attempts_left = self._process.record_invalid_attempt()
if attempts_left:
return super().form_invalid(form)
adapter = get_adapter(self.request)
adapter.add_message(
self.request,
messages.ERROR,
message=adapter.error_messages["too_many_login_attempts"],
)
return HttpResponseRedirect(
reverse(
_login_by_code_urlname()
if self._process.state["initiated_by_user"]
else "account_login"
)
)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
site = get_current_site(self.request)
email = self._process.state.get("email")
phone = self._process.state.get("phone")
ret.update(
{
"site": site,
"email": email,
"phone": phone,
"verify_form": ret["form"],
}
)
return ret
confirm_login_code = ConfirmLoginCodeView.as_view()
| ConfirmLoginCodeView |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 47349,
"end": 48397
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, attn_implementation: str = "sdpa") -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.attn = Qwen3OmniMoeVisionAttention(config=config)
self.mlp = Qwen3OmniMoeVisionMLP(config=config)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> torch.Tensor:
hidden_states = hidden_states + self.attn(
self.norm1(hidden_states),
cu_seqlens=cu_seqlens,
rotary_pos_emb=rotary_pos_emb,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
return hidden_states
| Qwen3OmniMoeVisionBlock |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/shuffle.py | {
"start": 1356,
"end": 1606
} | class ____(TypedDict):
"""RapidsMPF shuffling options."""
on: Sequence[str]
column_names: Sequence[str]
dtypes: Sequence[DataType]
cluster_kind: Literal["dask", "single"]
# Experimental rapidsmpf shuffler integration
| ShuffleOptions |
python | doocs__leetcode | solution/2500-2599/2538.Difference Between Maximum and Minimum Price Sum/Solution.py | {
"start": 0,
"end": 607
} | class ____:
def maxOutput(self, n: int, edges: List[List[int]], price: List[int]) -> int:
def dfs(i, fa):
a, b = price[i], 0
for j in g[i]:
if j != fa:
c, d = dfs(j, i)
nonlocal ans
ans = max(ans, a + d, b + c)
a = max(a, price[i] + c)
b = max(b, price[i] + d)
return a, b
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
g[b].append(a)
ans = 0
dfs(0, -1)
return ans
| Solution |
python | lxml__lxml | src/lxml/html/formfill.py | {
"start": 5721,
"end": 9681
} | class ____:
insert_before = True
block_inside = True
error_container_tag = 'div'
error_message_class = 'error-message'
error_block_class = 'error-block'
default_message = "Invalid"
def __init__(self, **kw):
for name, value in kw.items():
if not hasattr(self, name):
raise TypeError(
"Unexpected keyword argument: %s" % name)
setattr(self, name, value)
def __call__(self, el, is_block, message):
error_el = el.makeelement(self.error_container_tag)
if self.error_message_class:
error_el.set('class', self.error_message_class)
if is_block and self.error_block_class:
error_el.set('class', error_el.get('class', '')+' '+self.error_block_class)
if message is None or message == '':
message = self.default_message
if isinstance(message, ElementBase):
error_el.append(message)
else:
assert isinstance(message, basestring), (
"Bad message; should be a string or element: %r" % message)
error_el.text = message or self.default_message
if is_block and self.block_inside:
if self.insert_before:
error_el.tail = el.text
el.text = None
el.insert(0, error_el)
else:
el.append(error_el)
else:
parent = el.getparent()
pos = parent.index(el)
if self.insert_before:
parent.insert(pos, error_el)
else:
error_el.tail = el.tail
el.tail = None
parent.insert(pos+1, error_el)
default_error_creator = DefaultErrorCreator()
def insert_errors(
el,
errors,
form_id=None,
form_index=None,
error_class="error",
error_creator=default_error_creator,
):
el = _find_form(el, form_id=form_id, form_index=form_index)
for name, error in errors.items():
if error is None:
continue
for error_el, message in _find_elements_for_name(el, name, error):
assert isinstance(message, (basestring, type(None), ElementBase)), (
"Bad message: %r" % message)
_insert_error(error_el, message, error_class, error_creator)
def insert_errors_html(html, values, **kw):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
insert_errors(doc, values, **kw)
return _transform_result(result_type, doc)
def _insert_error(el, error, error_class, error_creator):
if _nons(el.tag) in defs.empty_tags or _nons(el.tag) == 'textarea':
is_block = False
else:
is_block = True
if _nons(el.tag) != 'form' and error_class:
_add_class(el, error_class)
if el.get('id'):
labels = _label_for_xpath(el, for_id=el.get('id'))
if labels:
for label in labels:
_add_class(label, error_class)
error_creator(el, is_block, error)
def _add_class(el, class_name):
if el.get('class'):
el.set('class', el.get('class')+' '+class_name)
else:
el.set('class', class_name)
def _find_elements_for_name(form, name, error):
if name is None:
# An error for the entire form
yield form, error
return
if name.startswith('#'):
# By id
el = form.get_element_by_id(name[1:])
if el is not None:
yield el, error
return
els = _name_xpath(form, name=name)
if not els:
# FIXME: should this raise an exception?
return
if not isinstance(error, (list, tuple)):
yield els[0], error
return
# FIXME: if error is longer than els, should it raise an error?
for el, err in zip(els, error):
if err is None:
continue
yield el, err
| DefaultErrorCreator |
python | aio-libs__aiohttp | aiohttp/multipart.py | {
"start": 1364,
"end": 5683
} | class ____(RuntimeWarning):
pass
def parse_content_disposition(
header: str | None,
) -> tuple[str | None, dict[str, str]]:
def is_token(string: str) -> bool:
return bool(string) and TOKEN >= set(string)
def is_quoted(string: str) -> bool:
return string[0] == string[-1] == '"'
def is_rfc5987(string: str) -> bool:
return is_token(string) and string.count("'") == 2
def is_extended_param(string: str) -> bool:
return string.endswith("*")
def is_continuous_param(string: str) -> bool:
pos = string.find("*") + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith("*") else string[pos:]
return substring.isdigit()
def unescape(text: str, *, chars: str = "".join(map(re.escape, CHAR))) -> str:
return re.sub(f"\\\\([{chars}])", "\\1", text)
if not header:
return None, {}
disptype, *parts = header.split(";")
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params: dict[str, str] = {}
while parts:
item = parts.pop(0)
if not item: # To handle trailing semicolons
warnings.warn(BadContentDispositionHeader(header))
continue
if "=" not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split("=", 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or "utf-8"
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, "strict")
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
failed = True
if is_quoted(value):
failed = False
value = unescape(value[1:-1].lstrip("\\/"))
elif is_token(value):
failed = False
elif parts:
# maybe just ; in filename, in any case this is just
# one case fix, for proper fix we need to redesign parser
_value = f"{value};{parts[0]}"
if is_quoted(_value):
parts.pop(0)
value = unescape(_value[1:-1].lstrip("\\/"))
failed = False
if failed:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(
params: Mapping[str, str], name: str = "filename"
) -> str | None:
name_suf = "%s*" % name
if not params:
return None
elif name_suf in params:
return params[name_suf]
elif name in params:
return params[name]
else:
parts = []
fnparams = sorted(
(key, value) for key, value in params.items() if key.startswith(name_suf)
)
for num, (key, value) in enumerate(fnparams):
_, tail = key.split("*", 1)
if tail.endswith("*"):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = "".join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or "utf-8"
return unquote(value, encoding, "strict")
return value
| BadContentDispositionParam |
python | kamyu104__LeetCode-Solutions | Python/gas-station.py | {
"start": 29,
"end": 545
} | class ____(object):
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
start, total_sum, current_sum = 0, 0, 0
for i in xrange(len(gas)):
diff = gas[i] - cost[i]
current_sum += diff
total_sum += diff
if current_sum < 0:
start = i + 1
current_sum = 0
if total_sum >= 0:
return start
return -1
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.