language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 10615,
"end": 45263
} | class ____(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
support.check_disallow_instantiation(self, _ssl.Certificate)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
if major >= 3:
# 3.x uses 0xMNN00PP0L
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{patch:d}"
else:
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.OP_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
if not has_tls_protocol(protocol):
continue
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'ssl.{protocol.name} is deprecated',
str(cm.warning)
)
for version in versions:
if not has_tls_version(version):
continue
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
self.assertEqual(
f'ssl.{version!s} is deprecated',
str(cm.warning)
)
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
| BasicSocketTests |
python | pandas-dev__pandas | pandas/tests/indexes/interval/test_constructors.py | {
"start": 11996,
"end": 14044
} | class ____(ConstructorTests):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
"""Fixture for IntervalIndex.from_tuples constructor"""
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if is_unsigned_integer_dtype(breaks):
pytest.skip(f"{breaks.dtype} not relevant IntervalIndex.from_tuples tests")
if len(breaks) == 0:
return {"data": breaks}
tuples = list(zip(breaks[:-1], breaks[1:]))
if isinstance(breaks, (list, tuple)):
return {"data": tuples}
elif isinstance(getattr(breaks, "dtype", None), CategoricalDtype):
return {"data": breaks._constructor(tuples)}
return {"data": com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = "IntervalIndex.from_tuples received an invalid item, 2"
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = "IntervalIndex.from_tuples requires tuples of length 2, got {t}"
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an element
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
| TestFromTuples |
python | neetcode-gh__leetcode | python/0205-isomorphic-strings.py | {
"start": 0,
"end": 322
} | class ____:
def isIsomorphic(self, s: str, t: str) -> bool:
mapST, mapTS = {}, {}
for c1, c2 in zip(s, t):
if (c1 in mapST and mapST[c1] != c2) or (c2 in mapTS and mapTS[c2] != c1):
return False
mapST[c1] = c2
mapTS[c2] = c1
return True | Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 283077,
"end": 288417
} | class ____(ValueChannelMixin, core.StringValueDefWithCondition):
"""
HrefValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : str, dict, :class:`ExprRef`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "href"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> HrefValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> HrefValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> HrefValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> HrefValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> HrefValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> HrefValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefstringnullExprRef], /
) -> HrefValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| HrefValue |
python | pytorch__pytorch | benchmarks/dynamo/genai_layers/utils.py | {
"start": 589,
"end": 1196
} | class ____:
# Benchmark setting usually the shape of the input tensor
setting: str
# Latency in milliseconds
latency: float
# Number of memory access in bytes
memory_bytes: float
# Memory bandwidth in GB/s
memory_bandwidth: float = 0.0
# Compute intensity in FLOPs/byte
compute_intensity: float = 0.0
def __post_init__(self):
self.memory_bandwidth = self.memory_bytes / (self.latency / 1000) / 1e9
def __str__(self):
return f"setting: {self.setting}, latency: {self.latency} ms, memory bandwidth: {self.memory_bandwidth} GB/s"
| Performance |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/incident.py | {
"start": 1156,
"end": 1271
} | class ____(IncidentSerializerResponse):
discoverQuery: str
@register(Incident)
| DetailedIncidentSerializerResponse |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 26241,
"end": 26857
} | class ____(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ("start", "stop", "step")
start: t.Optional[Expr]
stop: t.Optional[Expr]
step: t.Optional[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> slice:
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj: t.Optional[Expr]) -> t.Optional[t.Any]:
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
| Slice |
python | huggingface__transformers | src/transformers/models/qwen2_moe/configuration_qwen2_moe.py | {
"start": 920,
"end": 11278
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2MoeModel`]. It is used to instantiate a
Qwen2MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [Qwen/Qwen1.5-MoE-A2.7B](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151936):
Vocabulary size of the Qwen2MoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Qwen2MoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 5632):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*, defaults to 4096):
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
max_window_layers (`int`, *optional*, defaults to 28):
The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
additional layer afterwards will use SWA (Sliding Window Attention).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
decoder_sparse_step (`int`, *optional*, defaults to 1):
The frequency of the MoE layer.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert.
shared_expert_intermediate_size (`int`, *optional*, defaults to 5632):
Intermediate size of the shared expert.
num_experts_per_tok (`int`, *optional*, defaults to 4):
Number of selected experts.
num_experts (`int`, *optional*, defaults to 60):
Number of routed experts.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
mlp_only_layers (`list[int]`, *optional*, defaults to `[]`):
Indicate which layers use Qwen2MoeMLP rather than Qwen2MoeSparseMoeBlock
The list contains layer index, from 0 to num_layers-1 if we have num_layers layers
If `mlp_only_layers` is empty, `decoder_sparse_step` is used to determine the sparsity.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layer_types (`dict[int, str]`, *optional*): a dictionarry that explicitly maps layer index with
the attention type. The attention type is one of `sliding_attention`, `full_attention`.
```python
>>> from transformers import Qwen2MoeModel, Qwen2MoeConfig
>>> # Initializing a Qwen2MoE style configuration
>>> configuration = Qwen2MoeConfig()
>>> # Initializing a model from the Qwen1.5-MoE-A2.7B" style configuration
>>> model = Qwen2MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen2_moe"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Qwen2Moe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 151936,
hidden_size: Optional[int] = 2048,
intermediate_size: Optional[int] = 5632,
num_hidden_layers: Optional[int] = 24,
num_attention_heads: Optional[int] = 16,
num_key_value_heads: Optional[int] = 16,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 32768,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
use_sliding_window: Optional[bool] = False,
sliding_window: Optional[int] = 4096,
max_window_layers: Optional[int] = 28,
attention_dropout: Optional[float] = 0.0,
decoder_sparse_step: Optional[int] = 1,
moe_intermediate_size: Optional[int] = 1408,
shared_expert_intermediate_size: Optional[int] = 5632,
num_experts_per_tok: Optional[int] = 4,
num_experts: Optional[int] = 60,
norm_topk_prob: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
router_aux_loss_coef: Optional[float] = 0.001,
mlp_only_layers: Optional[bool] = None,
qkv_bias: Optional[bool] = True,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.layer_types = layer_types
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window if use_sliding_window else 0
self.max_window_layers = max_window_layers
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
# MoE arguments
self.decoder_sparse_step = decoder_sparse_step
self.moe_intermediate_size = moe_intermediate_size
self.shared_expert_intermediate_size = shared_expert_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.norm_topk_prob = norm_topk_prob
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.mlp_only_layers = [] if mlp_only_layers is None else mlp_only_layers
self.qkv_bias = qkv_bias
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if bool((i + 1) % 2) and i < self.max_window_layers and use_sliding_window
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types)
self.rope_parameters = rope_parameters
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Qwen2MoeConfig"]
| Qwen2MoeConfig |
python | nedbat__coveragepy | coverage/exceptions.py | {
"start": 899,
"end": 993
} | class ____(CoverageException):
"""We didn't have data to work with."""
pass
| NoDataError |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 8474,
"end": 8764
} | class ____(MPTTModel):
class Meta:
unique_together = (
(
"parent",
"code",
),
)
parent = TreeForeignKey("self", null=True, on_delete=models.CASCADE)
code = models.CharField(max_length=10)
| UniqueTogetherModel |
python | tensorflow__tensorflow | tensorflow/python/autograph/core/function_wrappers_test.py | {
"start": 1036,
"end": 2110
} | class ____(test.TestCase):
def test_name_scope(self):
if context.executing_eagerly():
self.skipTest('Tensor names are disabled in eager')
with function_wrappers.FunctionScope(
'test_name', None,
converter.ConversionOptions(
optional_features=converter.Feature.NAME_SCOPES)):
t = constant_op.constant(1)
self.assertIn('test_name', t.name)
def test_auto_control_deps(self):
v = variables.Variable(1)
with function_wrappers.FunctionScope(
'_', None,
converter.ConversionOptions(
optional_features=converter.Feature.AUTO_CONTROL_DEPS)) as scope:
v.assign(2)
op = scope.ret(constant_op.constant(1), True)
self.evaluate(op)
self.assertEqual(self.evaluate(v.read_value()), 2)
def test_all_disabled(self):
with function_wrappers.FunctionScope(None, None,
converter.STANDARD_OPTIONS):
t = constant_op.constant(1)
self.assertEqual(self.evaluate(t), 1)
if __name__ == '__main__':
test.main()
| FunctionWrappersTest |
python | astropy__astropy | astropy/io/ascii/latex.py | {
"start": 5425,
"end": 6500
} | class ____(core.BaseData):
"""Class to read the data in LaTeX tables."""
data_start: ClassVar[str | None] = None
data_end = r"\end{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r"Could not find table start")
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, "data_start", lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, "data_end", lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
if self.latex["tabletype"] is not None:
lines.append(r"\end{" + self.latex["tabletype"] + "}")
| LatexData |
python | tensorflow__tensorflow | tensorflow/python/keras/legacy_tf_layers/variable_scope_shim.py | {
"start": 22590,
"end": 23561
} | class ____(module.Module):
"""Module that has a scope to capture vars/losses made by `get_variable`."""
def __init__(self):
self._var_store = _EagerVariableStore() # pylint: disable=protected-access
self._variables = {}
def _variable_creator(self, next_creator, **kwargs):
var = next_creator(**kwargs)
self._variables[var.name] = var
return var
@tf_contextlib.contextmanager
def scope(self):
with vs.variable_creator_scope(
self._variable_creator), vs.with_variable_store(self._var_store):
yield
def get_regularization_losses(self):
# TODO(kaftan): Consider adding a regex scope like the collection access.
# But, < 40-50 usages of get_regularization_loss(es) with `scope`
# & possible to do manually?
losses = {}
for var_name, regularizer in self._var_store._regularizers.items(): # pylint: disable=protected-access
losses[var_name] = regularizer()
return losses
| VariableAndLossTracker |
python | dateutil__dateutil | tests/test_parser.py | {
"start": 11198,
"end": 13559
} | class ____(object):
def test_ybd(self):
# If we have a 4-digit year, a non-numeric month (abbreviated or not),
# and a day (1 or 2 digits), then there is no ambiguity as to which
# token is a year/month/day. This holds regardless of what order the
# terms are in and for each of the separators below.
seps = ['-', ' ', '/', '.']
year_tokens = ['%Y']
month_tokens = ['%b', '%B']
day_tokens = ['%d']
if PLATFORM_HAS_DASH_D:
day_tokens.append('%-d')
prods = itertools.product(year_tokens, month_tokens, day_tokens)
perms = [y for x in prods for y in itertools.permutations(x)]
unambig_fmts = [sep.join(perm) for sep in seps for perm in perms]
actual = datetime(2003, 9, 25)
for fmt in unambig_fmts:
dstr = actual.strftime(fmt)
res = parse(dstr)
assert res == actual
# TODO: some redundancy with PARSER_TEST_CASES cases
@pytest.mark.parametrize("fmt,dstr", [
("%a %b %d %Y", "Thu Sep 25 2003"),
("%b %d %Y", "Sep 25 2003"),
("%Y-%m-%d", "2003-09-25"),
("%Y%m%d", "20030925"),
("%Y-%b-%d", "2003-Sep-25"),
("%d-%b-%Y", "25-Sep-2003"),
("%b-%d-%Y", "Sep-25-2003"),
("%m-%d-%Y", "09-25-2003"),
("%d-%m-%Y", "25-09-2003"),
("%Y.%m.%d", "2003.09.25"),
("%Y.%b.%d", "2003.Sep.25"),
("%d.%b.%Y", "25.Sep.2003"),
("%b.%d.%Y", "Sep.25.2003"),
("%m.%d.%Y", "09.25.2003"),
("%d.%m.%Y", "25.09.2003"),
("%Y/%m/%d", "2003/09/25"),
("%Y/%b/%d", "2003/Sep/25"),
("%d/%b/%Y", "25/Sep/2003"),
("%b/%d/%Y", "Sep/25/2003"),
("%m/%d/%Y", "09/25/2003"),
("%d/%m/%Y", "25/09/2003"),
("%Y %m %d", "2003 09 25"),
("%Y %b %d", "2003 Sep 25"),
("%d %b %Y", "25 Sep 2003"),
("%m %d %Y", "09 25 2003"),
("%d %m %Y", "25 09 2003"),
("%y %d %b", "03 25 Sep",),
])
def test_strftime_formats_2003Sep25(self, fmt, dstr):
expected = datetime(2003, 9, 25)
# First check that the format strings behave as expected
# (not strictly necessary, but nice to have)
assert expected.strftime(fmt) == dstr
res = parse(dstr)
assert res == expected
| TestFormat |
python | allegroai__clearml | clearml/backend_interface/task/task.py | {
"start": 2303,
"end": 137683
} | class ____(IdObjectBase, AccessMixin, SetupUploadMixin):
"""Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = "__anonymous__"
_development_tag = "development"
archived_tag = "archived"
_default_configuration_section_name = "General"
_legacy_parameters_section_name = "Args"
_force_requirements = {}
_ignore_requirements = set()
_store_diff = deferred_config("development.store_uncommitted_code_diff", False)
_store_remote_diff = deferred_config("development.store_code_diff_from_remote", False)
_report_subprocess_enabled = deferred_config("development.report_use_subprocess", sys.platform == "linux")
_force_use_pip_freeze = deferred_config(
multi=[
("development.detect_with_pip_freeze", False),
("development.detect_with_conda_freeze", False),
]
)
_force_store_standalone_script = False
_offline_filename = "task.json"
__default_random_seed = 1337
_random_seed = __default_random_seed
__nested_deferred_init_flag = type("_NestedDeferredInitFlag", (object,), {"content": {}})
class TaskTypes(Enum):
def __str__(self) -> str:
return str(self.value)
def __eq__(self, other: Any) -> bool:
return str(self) == str(other)
def __repr__(self) -> str:
return "TaskTypes.{}".format(self.value)
training = "training"
testing = "testing"
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self) -> str:
return str(self.value)
def __eq__(self, other: Any) -> bool:
return str(self) == str(other)
def __repr__(self) -> str:
return "TaskTypes.{}".format(self.value)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
class DeleteError(Exception):
pass
def __init__(
self,
session: Optional[Session] = None,
task_id: Optional[str] = None,
log: Optional[logging.Logger] = None,
project_name: Optional[str] = None,
task_name: Optional[str] = None,
task_type: Union[str, TaskTypes] = TaskTypes.training,
log_to_backend: bool = True,
raise_on_validation_errors: bool = True,
force_create: bool = False,
) -> None:
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, minimum length of 3 characters, used only if a new task is created.
The new task will be associated with a project by this name. If no such project exists, a new project will
be created using the API.
:type project_name: str
:param task_name: Optional task name, minimum length of 3 characters, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True, a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
self._offline_output_models = []
SingletonLock.instantiate()
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._metrics_manager = None
self.__reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = tuple(
set(
six.string_types
+ six.integer_types
+ (six.text_type, float, list, tuple, dict, type(None), Enum) # noqa
)
)
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
self._calling_filename = None
self._offline_dir = None
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
if self._offline_mode:
self.data.id = self.id
self.name = task_name
else:
# this is an existing task, let's try to verify stuff
self._validate(check_output_dest_credentials=False)
if self.data is None:
raise ValueError('Task ID "{}" could not be found'.format(self.id))
self._project_name = (self.project, project_name)
self._project_object = None
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = get_log_to_backend(default=log_to_backend)
self._artifacts_manager = Artifacts(self)
self._hyper_params_manager = HyperParams(self)
def _validate(self, check_output_dest_credentials: bool = False) -> None:
if not self._is_remote_main_task():
self._storage_uri = self.get_output_destination(raise_on_error=False, log_on_error=False) or None
return
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info("Validating output destination")
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = "Failed resolving output destination (no credentials found for %s)" % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error("Failed trying to verify output destination: %s" % ex)
@classmethod
def _resolve_task_id(cls, task_id: Optional[str], log: Optional[logging.Logger] = None) -> Optional[str]:
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger("task")
log.info("Using task ID from env %s=%s" % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self) -> None:
def check_package_update() -> None:
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get(
default=config.get("development.suppress_update_message", False)
):
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
"{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}".format(
Session.get_clients()[0][0].upper(),
latest_version[0],
sep.join(latest_version[2]),
),
)
else:
self.get_logger().report_text(
"ClearML new version available: upgrade to v{} is recommended!".format(latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[
self._calling_filename,
sys.argv[0],
]
if ScriptInfo.is_running_from_module()
else [
sys.argv[0],
self._calling_filename,
],
log=self.log,
create_requirements=False,
check_uncommitted=self._store_diff,
uncommitted_from_remote=self._store_remote_diff,
force_single_script=self._force_store_standalone_script,
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# if the git is too large to store on the task, we must store it as artifact:
if result.auxiliary_git_diff:
diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n"
diff_preview += "\n".join(
line for line in result.auxiliary_git_diff.split("\n") if line.startswith("diff --git ")
)
self._artifacts_manager.upload_artifact(
name="auxiliary_git_diff",
artifact_object=result.auxiliary_git_diff,
preview=diff_preview,
)
# add ide info into task runtime_properties
if result.script and result.script.get("ide"):
# noinspection PyBroadException
try:
self._set_runtime_properties(runtime_properties={"ide": result.script["ide"]})
except Exception as ex:
self.log.info("Failed logging ide information: {}".format(ex))
# store original entry point
entry_point = result.script.get("entry_point") if result.script else None
# check if we are running inside a module, then we should set our entry point
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
with self._edit_lock:
self.reload()
self.data.script = result.script
self._edit(script=result.script)
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = (
None
if config.get("development.force_analyze_entire_repo", False)
else os.path.join(result.script["working_dir"], entry_point)
)
if self._force_use_pip_freeze:
if isinstance(self._force_use_pip_freeze, (str, Path)):
conda_requirements = ""
try:
req_file = Path(self._force_use_pip_freeze)
except TypeError:
# LazyEvaluator loading when casting
req_file = Path(str(self._force_use_pip_freeze))
requirements = req_file.read_text() if req_file.is_file() else None
else:
requirements, conda_requirements = pip_freeze(
combine_conda_with_pip=config.get("development.detect_with_conda_freeze", True)
)
requirements = (
"# Python " + sys.version.replace("\n", " ").replace("\r", " ") + "\n\n" + requirements
)
else:
(
requirements,
conda_requirements,
) = script_requirements.get_requirements(entry_point_filename=entry_point_filename)
if requirements:
if not result.script["requirements"]:
result.script["requirements"] = {}
result.script["requirements"]["pip"] = requirements
result.script["requirements"]["conda"] = conda_requirements
self._update_requirements(result.script.get("requirements") or "")
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger("task").debug(str(e))
def _auto_generate(
self,
project_name: Optional[str] = None,
task_name: Optional[str] = None,
task_type: Union[str, TaskTypes] = TaskTypes.training,
) -> str:
created_msg = make_message("Auto-generated at %(time)s UTC by %(user)s@%(host)s")
if isinstance(task_type, self.TaskTypes):
task_type = task_type.value
if task_type not in (
self.TaskTypes.training.value,
self.TaskTypes.testing.value,
) and not Session.check_min_api_version("2.8"):
print(
'WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
"please upgrade clearml-server.".format(self.TaskTypes.training, task_type)
)
task_type = self.TaskTypes.training.value
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {"system_tags": tags} if Session.check_min_api_version("2.3") else {"tags": tags}
if not Session.check_min_api_version("2.20"):
extra_properties["input"] = {"view": {}} # noqa
req = tasks.CreateRequest(
name=task_name or make_message("Anonymous task (%(user)s@%(host)s %(time)s)"),
type=tasks.TaskTypeEnum(task_type),
comment=created_msg,
project=project_id,
**extra_properties,
)
res = self.send(req)
if res:
return res.response.id
id = "offline-{}".format(str(uuid4()).replace("-", ""))
self._edit(type=tasks.TaskTypeEnum(task_type))
return id
def _set_storage_uri(self, value: str) -> None:
value = value.rstrip("/") if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ("" if Session.check_min_api_version("2.3") else None))
@property
def storage_uri(self) -> Optional[str]:
"""
The storage / output url for this task. This is the default location for output models and other artifacts.
:return: The url string or None if not set.
"""
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value: str) -> ():
"""
Set the storage / output url for this task. This is the default location for output models and other artifacts.
:param str value: The value to set for output URI.
"""
self._set_storage_uri(value)
@property
def task_id(self) -> str:
"""
Returns the current Task's ID.
"""
return self.id
@property
def name(self) -> str:
"""
Returns the current Task's name.
"""
return self.data.name or ""
@name.setter
def name(self, value: str) -> ():
"""
Set the current Task's name.
:param str value: Name to set.
"""
self.set_name(value)
@property
def task_type(self) -> str:
"""
Returns the current Task's type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
"""
return self.data.type
@property
def project(self) -> str:
"""
Returns the current Task's project ID.
"""
return self.data.project
@property
def parent(self) -> str:
"""
Returns the current Task's parent task ID (str).
"""
return self.data.parent
@property
def input_models_id(self) -> Mapping[str, str]:
"""
Returns the current Task's input model IDs as a dictionary.
"""
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property("execution.model", raise_on_error=False)
return {"Input Model": model_id} if model_id else {}
input_models = self._get_task_property("models.input", default=[], raise_on_error=False) or []
return {m.name: m.model for m in input_models}
@property
def output_models_id(self) -> Mapping[str, str]:
"""
Returns the current Task's output model IDs as a dictionary.
"""
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property("output.model", raise_on_error=False)
return {"Output Model": model_id} if model_id else {}
output_models = self._get_task_property("models.output", default=[]) or []
return {m.name: m.model for m in output_models}
@property
def comment(self) -> str:
"""
Returns the current Task's (user defined) comments.
"""
return self.data.comment or ""
@comment.setter
def comment(self, value: str) -> ():
"""
Set the comment of the task. Please note that this will override any comment currently
present. If you want to add lines to the comment field, get the comments first, add your
own and then set them again.
"""
self.set_comment(value)
@property
def cache_dir(self) -> Path:
"""The cache directory which is used to store the Task related files."""
return Path(get_cache_dir()) / self.id
@property
def status(self) -> str:
"""
The Task's status. To keep the Task updated.
ClearML reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self) -> str:
"""Return the task's cached status (don't reload if we don't have to)"""
return str(self.data.status)
def reload(self) -> ():
"""
Reload current Task's state from clearml-server.
Refresh all task's fields, including artifacts / models / parameters etc.
"""
return super(Task, self).reload()
def _get_output_model(self, upload_required: bool = True, model_id: Optional[str] = None) -> Model:
return Model(
session=self.session,
model_id=model_id or None,
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri
or self.get_output_destination(raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix("models"),
log=self.log,
)
@property
def metrics_manager(self) -> Metrics:
"""A metrics manager used to manage the metrics related to this task"""
return self._get_metrics_manager(self.get_output_destination())
@property
def _reporter(self) -> Reporter:
"""
Returns a simple metrics reporter instance.
"""
if self.__reporter is None:
self._setup_reporter()
return self.__reporter
def _get_metrics_manager(self, storage_uri: str) -> Metrics:
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task=self,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix("metrics"),
iteration_offset=self.get_initial_iteration(),
)
return self._metrics_manager
def _setup_reporter(self) -> Reporter:
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self.__reporter = Reporter(metrics=self._get_metrics_manager(storage_uri=storage_uri), task=self)
return self.__reporter
def _get_output_destination_suffix(self, extra_path: Optional[str] = None) -> str:
# limit path to support various storage infrastructure limits (such as max path pn posix or object storage)
# project path limit to 256 (including subproject names), and task name limit to 128.
def limit_folder_name(a_name: str, uuid: str, max_length: int, always_add_uuid: bool) -> str:
if always_add_uuid:
return "{}.{}".format(a_name[: max(2, max_length - len(uuid) - 1)], uuid)
if len(a_name) < max_length:
return a_name
return "{}.{}".format(a_name[: max(2, max_length - len(uuid) - 1)], uuid)
project_name = self.get_project_name() or "unknown"
return "/".join(
quote(x, safe="'[]{}()$^,.; -_+-=/")
for x in (
limit_folder_name(project_name, str(self.project), 256, False),
limit_folder_name(self.name, str(self.data.id), 128, True),
extra_path,
)
if x
)
def _reload(self) -> Any:
"""Reload the task object from the backend"""
with self._edit_lock:
if self._offline_mode:
# noinspection PyBroadException
try:
with open(
(self.get_offline_mode_folder() / self._offline_filename).as_posix(),
"rt",
) as f:
stored_dict = json.load(f)
stored_data = tasks.Task(**stored_dict)
# add missing entries
for k, v in stored_dict.items():
if not hasattr(stored_data, k):
setattr(stored_data, k, v)
if stored_dict.get("project_name"):
self._project_name = (None, stored_dict.get("project_name"))
if stored_dict.get("project_object"):
self._project_object = (None, stored_dict.get("project_object"))
except Exception:
stored_data = self._data
return stored_data or tasks.Task(
execution=tasks.Execution(
parameters={},
artifacts=[],
dataviews=[],
model="",
model_desc={},
model_labels={},
docker_cmd="",
),
output=tasks.Output(),
)
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def _reload_field(self, field: str) -> Any:
"""Reload the task specific field, dot seperated for nesting"""
with self._edit_lock:
if self._offline_mode:
task_object = self._reload()
else:
res = self.send(tasks.GetAllRequest(id=[self.id], only_fields=[field], search_hidden=True))
task_object = res.response.tasks[0]
for p in field.split("."):
task_object = getattr(task_object, p, None)
if task_object is None:
break
return task_object
def reset(self, set_started_on_success: bool = True, force: bool = False) -> ():
"""
Reset the task. Task will be reloaded following a successful reset.
:param set_started_on_success: If True, automatically set Task status to started after resetting it.
:param force: If not true, call fails if the task status is 'completed'
"""
self.send(tasks.ResetRequest(task=self.id, force=force))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors: bool = True, force: bool = False) -> ():
"""The signal that this Task started."""
return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors)
def stopped(
self,
ignore_errors: bool = True,
force: bool = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
) -> ():
"""The signal that this Task stopped."""
return self.send(
tasks.StoppedRequest(
self.id,
force=force,
status_reason=status_reason,
status_message=status_message,
),
ignore_errors=ignore_errors,
)
def stop_request(
self,
ignore_errors: bool = True,
force: bool = False,
status_message: Optional[str] = None,
) -> ():
"""
Request a task to stop. this will not change the task status
but mark a request for an agent or SDK to actually stop the Task.
This will trigger the Task's abort callback, and at the end will
change the task status to stopped and kill the Task's processes
Notice: calling this on your own Task, will cause
the watchdog to call the on_abort callback and kill the process
:param bool force: If not True, call fails if the task status is not 'in_progress'
:param bool ignore_errors: if False raise exception on error
:param str status_message: Optional, add status change message to the stop request.
This message will be stored as status_message on the Task's info panel
"""
# request task stop
return self.send(
tasks.StopRequest(
self.id,
force=force,
status_reason="abort request",
status_message=status_message,
),
ignore_errors=ignore_errors,
)
def completed(self, ignore_errors: bool = True) -> ():
"""
.. note:: Deprecated, use mark_completed(...) instead
"""
warnings.warn(
"'completed' is deprecated; use 'mark_completed' instead.",
DeprecationWarning,
)
return self.mark_completed(ignore_errors=ignore_errors)
def mark_completed(
self,
ignore_errors: bool = True,
status_message: Optional[str] = None,
force: bool = False,
) -> ():
"""
Use this method to close and change status of (remotely!) executed tasks.
This method closes the task it is a member of,
changes its status to "Completed", and
terminates the Python process that created the task.
This is in contrast to :meth:`Task.close`, which does the first two steps, but does not terminate any Python process.
Let's say that process A created the task and process B has a handle on the task, e.g., with :meth:`Task.get_task`.
Then, if we call :meth:`Task.mark_completed`, process A is terminated, but process B is not.
However, if :meth:`Task.mark_completed` was called from the same process in which the task was created,
then - effectively - the process terminates itself.
For example, in
.. code-block:: py
task = Task.init(...)
task.mark_completed()
from time import sleep
sleep(30)
print('This text will not be printed!')
the text will not be printed, because the Python process is immediately terminated.
:param bool ignore_errors: If True (default), ignore any errors raised
:param bool force: If True, the task status will be changed to `stopped` regardless of the current Task state.
:param str status_message: Optional, add status change message to the stop request.
This message will be stored as status_message on the Task's info panel
"""
if hasattr(tasks, "CompletedRequest") and callable(tasks.CompletedRequest):
if Session.check_min_api_version("2.20"):
return self.send(
tasks.CompletedRequest(
self.id,
status_reason="completed",
status_message=status_message,
force=force,
publish=True if self._get_runtime_properties().get("_publish_on_complete") else False,
),
ignore_errors=ignore_errors,
)
else:
resp = self.send(
tasks.CompletedRequest(
self.id,
status_reason="completed",
status_message=status_message,
force=force,
),
ignore_errors=ignore_errors,
)
if self._get_runtime_properties().get("_publish_on_complete"):
self.send(
tasks.PublishRequest(
self.id,
status_reason="completed",
status_message=status_message,
force=force,
),
ignore_errors=ignore_errors,
)
return resp
return self.send(
tasks.StoppedRequest(
self.id,
status_reason="completed",
status_message=status_message,
force=force,
),
ignore_errors=ignore_errors,
)
def mark_failed(
self,
ignore_errors: bool = True,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
force: bool = False,
) -> ():
"""The signal that this Task stopped."""
return self.send(
tasks.FailedRequest(
task=self.id,
status_reason=status_reason,
status_message=status_message,
force=force,
),
ignore_errors=ignore_errors,
)
def publish(self, ignore_errors: bool = True) -> ():
"""The signal that this task will be published"""
if self.status not in (
self.TaskStatusEnum.stopped,
self.TaskStatusEnum.completed,
):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def publish_on_completion(self, enable: bool = True) -> ():
"""The signal that this task will be published automatically on task completion"""
self._set_runtime_properties(runtime_properties={"_publish_on_complete": enable})
def _delete(
self,
delete_artifacts_and_models: bool = True,
skip_models_used_by_other_tasks: bool = True,
raise_on_error: bool = False,
callback: Callable[[str, str], bool] = None,
) -> bool:
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True).
If callback is provided, this argument is ignored.
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True, an exception will be raised when encountering an error.
If False, an error would be printed and no exception will be raised.
:param callback: An optional callback accepting a uri type (string) and a uri (string) that will be called
for each artifact and model. If provided, the delete_artifacts_and_models is ignored.
Return True to indicate the artifact/model should be deleted or False otherwise.
:return: True if the task was deleted successfully.
"""
try:
res = self.send(tasks.GetByIdRequest(self.task_id))
task = res.response.task
if task.status == self.TaskStatusEnum.published:
if raise_on_error:
raise self.DeleteError("Cannot delete published task {}".format(self.task_id))
self.log.error("Cannot delete published task {}".format(self.task_id))
return False
execution = {}
models_res = []
if delete_artifacts_and_models or callback:
execution = task.execution.to_dict() if task.execution else {}
models_res = self.send(models.GetAllRequest(task=[task.id], only_fields=["id", "uri"])).response.models
models_res = [
m
for m in models_res
if not callback
or callback(
"output_model" if task.output and (m.id == task.output.model) else "model",
m.uri,
)
]
event_uris = []
event_uris.extend(
[
x
for x in filter(
None,
self._get_all_events(
event_type="training_debug_image",
unique_selector=itemgetter("url"),
batch_size=10000,
),
)
if not callback or callback("debug_images", x)
]
)
event_uris.extend(
[x for x in filter(None, self._get_image_plot_uris()) if not callback or callback("image_plot", x)]
)
artifact_uris = []
if delete_artifacts_and_models or callback:
artifact_uris = [
e["uri"]
for e in execution["artifacts"]
if e["mode"] == "output" and (not callback or callback("artifact", e["uri"]))
]
task_deleted = self.send(tasks.DeleteRequest(self.task_id, force=True))
if not task_deleted.ok():
if raise_on_error:
raise self.DeleteError("Failed deleting task {}".format(self.task_id))
self.log.error("Failed deleting task {}".format(self.task_id))
return False
except self.DeleteError:
raise
except Exception as ex:
if raise_on_error:
raise self.DeleteError("Task deletion failed: {}".format(ex))
self.log.error("Task deletion failed: {}".format(ex))
return False
failures = []
for uri in artifact_uris:
if not self._delete_uri(uri):
failures.append(uri)
for m in models_res:
# noinspection PyBroadException
try:
is_output_model = task.output and (m.id == task.output.model)
res = self.send(
models.DeleteRequest(m.id, force=(not skip_models_used_by_other_tasks)),
ignore_errors=is_output_model,
)
# Should delete if model was deleted or if this was the output model (which was already deleted
# by DeleteRequest, and it's URI is dangling
should_delete = is_output_model or res.response.deleted
except SendError as ex:
if (ex.result.meta.result_code, ex.result.meta.result_subcode) == (
400,
201,
):
# Model not found, already deleted by DeleteRequest
should_delete = True
else:
failures.append("model id: {}".format(m.id))
continue
except Exception:
failures.append("model id: {}".format(m.id))
continue
if should_delete and not self._delete_uri(m.uri):
failures.append(m.uri)
for uri in event_uris:
if not self._delete_uri(uri):
failures.append(uri)
failures = list(filter(None, failures))
if len(failures):
error = "Failed deleting the following URIs:\n{}".format("\n".join(failures))
if raise_on_error:
raise self.DeleteError(error)
self.log.error(error)
return task_deleted
def _delete_uri(self, uri: str, silent: bool = False) -> bool:
# noinspection PyBroadException
try:
deleted = StorageHelper.get(uri).delete(uri, silent=silent)
if deleted:
self.log.debug("Deleted file: {}".format(uri))
return True
except Exception as ex:
self.log.error("Failed deleting {}: {}".format(uri, str(ex)))
return False
return False
def _get_image_plot_uris(self) -> Set[str]:
def image_source_selector(d: Dict[str, Any]) -> Optional[str]:
plot = d.get("plot_str")
if plot:
# noinspection PyBroadException
try:
plot = json.loads(plot)
return next(
filter(
None,
(image.get("source") for image in plot.get("layout", {}).get("images", [])),
),
None,
)
except Exception:
pass
return self._get_all_events(event_type="plot", unique_selector=image_source_selector, batch_size=10000)
def update_model_desc(self, new_model_desc_file: Optional[str] = None) -> ():
"""Change the Task's model description."""
with self._edit_lock:
self.reload()
execution = self._get_task_property("execution")
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError("mode_desc file %s cannot be found" % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else "design"
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(
self,
model_path: str,
name: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
model_name: Optional[str] = None,
iteration: Optional[int] = None,
auto_delete_file: bool = True,
) -> str:
"""
Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then ClearML updates the model object associated with the Task. The API call uses the URI
of the uploaded file, and other values provided by additional arguments.
Notice: A local model file will be uploaded to the task's `output_uri` destination,
If no `output_uri` was specified, the default files-server will be used to store the model file/s.
:param model_path: A local weights file or folder to be uploaded.
If remote URI is provided (e.g. ``http://`` or ``s3://`` etc) then the URI is stored as is, without any upload
:param name: The updated model name.
If not provided, the name is the model weights file filename without the extension.
:param comment: The updated model description. (Optional)
:param tags: The updated model tags. (Optional)
:param model_name: If provided the model name as it will appear in the model artifactory. (Optional)
Default: Task.name - name
:param iteration: iteration number for the current stored model (Optional)
:param bool auto_delete_file: Delete the temporary file after uploading (Optional)
- ``True`` - Delete (Default)
- ``False`` - Do not delete
:return: The URI of the uploaded weights file.
Notice: upload is done is a background thread, while the function call returns immediately
"""
output_uri = self.storage_uri or self._get_default_report_storage_uri()
from ...model import OutputModel
output_model = OutputModel(
task=self,
name=model_name or ("{} - {}".format(self.name, name) if name else self.name),
tags=tags,
comment=comment,
)
output_model.connect(task=self, name=name)
url = output_model.update_weights(
weights_filename=model_path,
upload_uri=output_uri,
iteration=iteration,
auto_delete_file=auto_delete_file,
)
return url
@property
def labels_stats(self) -> dict:
"""Get accumulated label stats for the current/last frames iteration"""
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats: dict, reset: bool = False) -> ():
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(
self,
model_id: str = None,
model_name: Optional[str] = None,
update_task_design: bool = True,
update_task_labels: bool = True,
name: Optional[str] = None,
) -> ():
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The ID of the model on the **ClearML Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name in the artifactory. The model_name is used to locate an existing model
in the **ClearML Server** (backend). If ``model_id`` is not specified,
then ``model_name`` must be specified.
:param update_task_design: Update the Task's design
- ``True`` - ClearML copies the Task's model design from the input model.
- ``False`` - ClearML does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration
- ``True`` - ClearML copies the Task's label enumeration from the input model.
- ``False`` - ClearML does not copy the Task's label enumeration from the input model.
:param name: Model section name to be stored on the Task (unrelated to the model object name itself)
Default: the model weight filename is used (excluding file extension)
"""
if model_id is None and not model_name:
raise ValueError("Expected one of [model_id, model_name]")
if model_name and not model_id:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=["-created"],
only_fields=["id", "created", "uri"],
)
)
model = get_single_result(
entity="model",
query=model_name,
results=res.response.models,
log=self.log,
)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug("Model %s [%s] is not published yet (not ready)" % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ""
from ...model import InputModel
# noinspection PyProtectedMember
name = name or InputModel._get_connect_name(model)
with self._edit_lock:
self.reload()
# store model id
if Session.check_min_api_version("2.13"):
self.send(
tasks.AddOrUpdateModelRequest(
task=self.id,
name=name,
model=model_id,
type=tasks.ModelTypeEnum.input,
)
)
else:
# backwards compatibility
self._set_task_property(
"execution.model",
model_id,
raise_on_error=False,
log_on_error=False,
)
# Auto populate from model, if empty
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def get_parameters(self, backwards_compatibility: bool = True, cast: bool = False) -> Optional[dict]:
"""
Get the parameters for a Task. This method returns a complete group of key-value parameter pairs, but does not
support parameter descriptions (the result is a dictionary of key-value pairs).
Notice the returned parameter dict is flat:
i.e. ``{'Args/param': 'value'}`` is the argument "param" from section "Args"
:param backwards_compatibility: If True (default), parameters without section name
(API version ``<2.9``, clearml-server ``<0.16``) will be at dict root level.
If False, parameters without section name, will be nested under "Args/" key.
:param cast: If True, cast the parameter to the original type. Default False,
values are returned in their string representation
:return: dict of the task parameters, all flattened to key/value.
Different sections with key prefix "section/"
"""
if not Session.check_min_api_version("2.9"):
return self._get_task_property("execution.parameters")
# API will makes sure we get old parameters with type legacy on top level (instead of nested in Args)
parameters = dict()
hyperparams = self._get_task_property("hyperparams") or {}
if not backwards_compatibility:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
parameters["{}/{}".format(section, key)] = (
cast_basic_type(section_param.value, section_param.type) if cast else section_param.value
)
else:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
v = cast_basic_type(section_param.value, section_param.type) if cast else section_param.value
if section_param.type == "legacy" and section in (self._legacy_parameters_section_name,):
parameters["{}".format(key)] = v
else:
parameters["{}/{}".format(section, key)] = v
return parameters
def set_parameters(self, *args: dict, **kwargs: Any) -> ():
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. ``{'Args/param': 'value'}`` will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionaries or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
return self._set_parameters(*args, __update=False, **kwargs)
def _set_parameters(self, *args: dict, **kwargs: Any) -> ():
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionaries or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
def stringify(value: Any) -> str:
# return empty string if value is None
if value is None:
return ""
str_value = str(value)
if isinstance(value, (tuple, list, dict)):
try:
str_json = json.dumps(value)
return str_json
except TypeError:
pass
if isinstance(value, Enum):
# remove the class name
return str_value.partition(".")[2]
return str_value
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError("only dict or iterable are supported as positional arguments")
prefix = kwargs.pop("__parameters_prefix", None)
descriptions = kwargs.pop("__parameters_descriptions", None) or dict()
params_types = kwargs.pop("__parameters_types", None) or dict()
update = kwargs.pop("__update", False)
# new parameters dict
new_parameters = dict(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
new_parameters.update(kwargs)
if prefix:
prefix = prefix.strip("/")
new_parameters = dict(("{}/{}".format(prefix, k), v) for k, v in new_parameters.items())
# verify parameters type:
not_allowed = {
k: type(v).__name__
for k, v in new_parameters.items()
if not verify_basic_type(v, self._parameters_allowed_types)
}
if not_allowed:
self.log.warning(
"Parameters must be of builtin type ({})".format(
", ".join("%s[%s]" % p for p in not_allowed.items()),
)
)
new_parameters = {k: v for k, v in new_parameters.items() if k not in not_allowed}
use_hyperparams = Session.check_min_api_version("2.9")
with self._edit_lock:
self.reload()
# if we have a specific prefix and we use hyperparameters, and we use set.
# overwrite only the prefix, leave the rest as is.
if not update and prefix:
parameters = copy(self.get_parameters() or {})
parameters = dict((k, v) for k, v in parameters.items() if not k.startswith(prefix + "/"))
elif update:
parameters = copy(self.get_parameters() or {})
else:
parameters = dict()
parameters.update(new_parameters)
if use_hyperparams:
# build nested dict from flat parameters dict:
org_hyperparams = self.data.hyperparams or {}
hyperparams = dict()
# if the task is a legacy task, we should put everything back under Args/key with legacy type
legacy_name = self._legacy_parameters_section_name
org_legacy_section = org_hyperparams.get(legacy_name, dict())
for k, v in parameters.items():
# legacy variable
if org_legacy_section.get(k, tasks.ParamsItem()).type == "legacy":
section = hyperparams.get(legacy_name, dict())
section[k] = copy(org_legacy_section[k])
section[k].value = stringify(v)
description = descriptions.get(k)
if description:
section[k].description = description
hyperparams[legacy_name] = section
continue
org_k = k
if "/" not in k:
k = "{}/{}".format(self._default_configuration_section_name, k)
section_name, key = k.split("/", 1)
section = hyperparams.get(section_name, dict())
org_param = org_hyperparams.get(section_name, dict()).get(key, None)
param_type = params_types.get(org_k) or (
org_param.type
if org_param is not None and (org_param.type or v == "")
else get_basic_type(v)
if v is not None
else None
)
if param_type and not isinstance(param_type, str):
param_type = param_type.__name__ if hasattr(param_type, "__name__") else str(param_type)
def create_description() -> str:
if org_param and org_param.description:
return org_param.description
# don't use get(org_k, "") here in case org_k in descriptions and the value is None
created_description = descriptions.get(org_k) or ""
if isinstance(v, Enum):
# append enum values to description
if created_description:
created_description += "\n"
created_description += "Values:\n" + ",\n".join(
[enum_key for enum_key in type(v).__dict__.keys() if not enum_key.startswith("_")]
)
return created_description
section[key] = tasks.ParamsItem(
section=section_name,
name=key,
value=stringify(v),
description=create_description(),
type=param_type,
)
hyperparams[section_name] = section
self._edit(hyperparams=hyperparams)
self.data.hyperparams = hyperparams
else:
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: stringify(v) for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(
parameters=parameters,
artifacts=[],
dataviews=[],
model="",
model_desc={},
model_labels={},
docker_cmd="",
)
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(
self,
name: str,
value: str,
description: Optional[str] = None,
value_type: Optional[Any] = None,
) -> ():
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
:param value_type: The type of the parameters (cast to string and store)
"""
if not Session.check_min_api_version("2.9"):
# not supported yet
description = None
value_type = None
self._set_parameters(
{name: value},
__update=True,
__parameters_descriptions={name: description},
__parameters_types={name: value_type},
)
def get_parameter(self, name: str, default: Any = None, cast: bool = False) -> Any:
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:param cast: If value is found, cast to original type. If False, return string.
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters(cast=cast)
return params.get(name, default)
def delete_parameter(self, name: str, force: bool = False) -> bool:
"""
Delete a parameter by its full name Section/name.
:param name: Parameter name in full, i.e. Section/name. For example, 'Args/batch_size'
:param force: If set to True then both new and running task hyper params can be deleted.
Otherwise only the new task ones. Default is False
:return: True if the parameter was deleted successfully
"""
if not Session.check_min_api_version("2.9"):
raise ValueError(
"Delete hyper-parameter is not supported by your clearml-server, upgrade to the latest version"
)
force_kwargs = {}
if Session.check_min_api_version("2.13"):
force_kwargs["force"] = force
with self._edit_lock:
paramkey = tasks.ParamKey(section=name.split("/", 1)[0], name=name.split("/", 1)[1])
res = self.send(
tasks.DeleteHyperParamsRequest(task=self.id, hyperparams=[paramkey], **force_kwargs),
raise_on_errors=False,
)
self.reload()
return res.ok() if not self._offline_mode else True
def update_parameters(self, *args: dict, **kwargs: Any) -> ():
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. ``{'Args/param': 'value'}`` will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionaries or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self._set_parameters(*args, __update=True, **kwargs)
def set_model_label_enumeration(self, enumeration: Mapping[str, int] = None) -> ():
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (
isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())
):
raise ValueError("Expected label to be a dict[str => int]")
execution.model_labels = enumeration
self._edit(execution=execution)
def remove_input_models(self, models_to_remove: Sequence[Union[str, "BaseModel"]]) -> ():
"""
Remove input models from the current task. Note that the models themselves are not deleted,
but the tasks' reference to the models is removed.
To delete the models themselves, see `Models.remove`
:param models_to_remove: The models to remove from the task. Can be a list of ids,
or of `BaseModel` (including its subclasses: `Model` and `InputModel`)
"""
ids_to_remove = [model if isinstance(model, str) else model.id for model in models_to_remove]
with self._edit_lock:
self.reload()
self.data.models.input = [model for model in self.data.models.input if model.model not in ids_to_remove]
self._edit(models=self.data.models)
def _set_default_docker_image(self) -> ():
if not DOCKER_IMAGE_ENV_VAR.exists() and not DOCKER_BASH_SETUP_ENV_VAR.exists():
return
self.set_base_docker(
docker_cmd=DOCKER_IMAGE_ENV_VAR.get(default=""),
docker_setup_bash_script=DOCKER_BASH_SETUP_ENV_VAR.get(default=""),
)
def set_base_docker(
self,
docker_cmd: str,
docker_arguments: Optional[Union[str, Sequence[str]]] = None,
docker_setup_bash_script: Optional[Union[str, Sequence[str]]] = None,
) -> ():
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
:param docker_cmd: docker container image (example: 'nvidia/cuda:11.1')
:param docker_arguments: docker execution parameters (example: '-e ENV=1')
:param docker_setup_bash_script: bash script to run at the
beginning of the docker before launching the Task itself. example: ['apt update', 'apt-get install -y gcc']
"""
image = docker_cmd.split(" ")[0] if docker_cmd else ""
if not docker_arguments and docker_cmd:
docker_arguments = docker_cmd.split(" ")[1:] if len(docker_cmd.split(" ")) > 1 else ""
arguments = (
(docker_arguments if isinstance(docker_arguments, str) else " ".join(docker_arguments))
if docker_arguments
else ""
)
if docker_setup_bash_script:
setup_shell_script = (
docker_setup_bash_script
if isinstance(docker_setup_bash_script, str)
else "\n".join(docker_setup_bash_script)
)
else:
setup_shell_script = ""
with self._edit_lock:
self.reload()
if Session.check_min_api_version("2.13"):
self.data.container = dict(
image=image,
arguments=arguments,
setup_shell_script=setup_shell_script,
)
self._edit(container=self.data.container)
else:
if setup_shell_script:
raise ValueError("Your ClearML-server does not support docker bash script feature, please upgrade.")
execution = self.data.execution
execution.docker_cmd = image + (" {}".format(arguments) if arguments else "")
self._edit(execution=execution)
def get_base_docker(self) -> str:
"""Get the base Docker command (image) that is set for this experiment."""
if Session.check_min_api_version("2.13"):
# backwards compatibility
container = self._get_task_property("container", raise_on_error=False, log_on_error=False, default={})
return (
container.get("image", "")
+ (" {}".format(container["arguments"]) if container.get("arguments", "") else "")
) or None
else:
return self._get_task_property("execution.docker_cmd", raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list: Sequence["tasks.Artifact"] = None) -> Optional[List["tasks.Artifact"]]:
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts or None if error.
"""
if not Session.check_min_api_version("2.3"):
return None
if not (
isinstance(artifacts_list, (list, tuple)) and all(isinstance(a, tasks.Artifact) for a in artifacts_list)
):
raise ValueError("Expected artifacts as List[tasks.Artifact]")
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return execution.artifacts or []
def _add_artifacts(self, artifacts_list: Sequence["tasks.Artifact"]) -> Optional[List["tasks.Artifact"]]:
"""
List of artifacts (tasks.Artifact) to add to the task
If an artifact by the same name already exists it will overwrite the existing artifact.
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts
"""
if not Session.check_min_api_version("2.3"):
return None
if not (
isinstance(artifacts_list, (list, tuple)) and all(isinstance(a, tasks.Artifact) for a in artifacts_list)
):
raise ValueError("Expected artifacts as List[tasks.Artifact]")
with self._edit_lock:
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.AddOrUpdateArtifactsRequest(task=self.task_id, artifacts=artifacts_list, force=True)
res = self.send(req, raise_on_errors=False)
if not res or not res.response or not res.response.updated:
return None
self.reload()
else:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return self.data.execution.artifacts or []
def delete_artifacts(
self,
artifact_names: Sequence[str],
raise_on_errors: bool = True,
delete_from_storage: bool = True,
silent_on_errors: bool = False,
) -> bool:
"""
Delete a list of artifacts, by artifact name, from the Task.
:param list artifact_names: list of artifact names
:param bool raise_on_errors: if True, do not suppress connectivity related exceptions
:param bool delete_from_storage: If True, try to delete the actual
file from the external storage (e.g. S3, GS, Azure, File Server etc.)
:param silent_on_errors: If True, do not log connectivity related errors
:return: True if successful
"""
return self._delete_artifacts(
artifact_names=artifact_names,
raise_on_errors=raise_on_errors,
delete_from_storage=delete_from_storage,
silent_on_errors=silent_on_errors,
)
def _delete_artifacts(
self,
artifact_names: Sequence[str],
raise_on_errors: bool = False,
delete_from_storage: bool = True,
silent_on_errors: bool = False,
) -> bool:
"""
Delete a list of artifacts, by artifact name, from the Task.
:param list artifact_names: list of artifact names
:param bool raise_on_errors: if True, do not suppress connectivity related exceptions
:param bool delete_from_storage: If True, try to delete the actual
file from the external storage (e.g. S3, GS, Azure, File Server etc.)
:param silent_on_errors: If True, do not log connectivity related errors
:return: True if successful
"""
if not Session.check_min_api_version("2.3"):
return False
if not artifact_names:
return True
if not isinstance(artifact_names, (list, tuple)):
raise ValueError("Expected artifact names as List[str]")
uris = []
with self._edit_lock:
if delete_from_storage:
if any(a not in self.artifacts for a in artifact_names):
self.reload()
for artifact in artifact_names:
# noinspection PyBroadException
try:
uri = self.artifacts[artifact].url
except Exception:
if raise_on_errors:
raise
uri = None
uris.append(uri)
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.DeleteArtifactsRequest(
task=self.task_id,
artifacts=[{"key": n, "mode": "output"} for n in artifact_names],
force=True,
)
res = self.send(req, raise_on_errors=raise_on_errors)
if not res or not res.response or not res.response.deleted:
return False
self.reload()
else:
self.reload()
execution = self.data.execution
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in artifact_names]
self._edit(execution=execution)
# check if we need to remove the actual files from an external storage, it can also be our file server
if uris:
for i, (artifact, uri) in enumerate(zip(artifact_names, uris)):
# delete the actual file from storage, and raise if error and needed
if uri and not self._delete_uri(uri, silent=silent_on_errors) and raise_on_errors:
remaining_uris = {name: uri for name, uri in zip(artifact_names[i + 1 :], uris[i + 1 :])}
raise ArtifactUriDeleteError(artifact=artifact, uri=uri, remaining_uris=remaining_uris)
return True
def _set_model_design(self, design: str = None) -> ():
with self._edit_lock:
self.reload()
if Session.check_min_api_version("2.9"):
configuration = (
self._get_task_property(
"configuration",
default={},
raise_on_error=False,
log_on_error=False,
)
or {}
)
configuration[self._default_configuration_section_name] = tasks.ConfigurationItem(
name=self._default_configuration_section_name, value=str(design)
)
self._edit(configuration=configuration)
else:
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self) -> Mapping[str, int]:
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self) -> str:
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
if Session.check_min_api_version("2.9"):
design = (
self._get_task_property(
"configuration",
default={},
raise_on_error=False,
log_on_error=False,
)
or {}
)
if design:
design = design.get(sorted(design.keys())[0]).value or ""
else:
design = self._get_task_property(
"execution.model_desc",
default={},
raise_on_error=False,
log_on_error=False,
)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def get_random_seed(self) -> Optional[int]:
# fixed seed for the time being
return self._random_seed
@classmethod
def set_random_seed(cls, random_seed: Optional[int]) -> ():
"""
Set the default random seed for any new initialized tasks
:param random_seed: If None or False, disable random seed initialization. If True, use the default random seed,
otherwise use the provided int value for random seed initialization when initializing a new task.
"""
if random_seed is not None:
if isinstance(random_seed, bool):
random_seed = cls.__default_random_seed if random_seed else None
else:
random_seed = int(random_seed)
cls._random_seed = random_seed
def set_project(
self,
project_id: Optional[str] = None,
project_name: Optional[str] = None,
) -> ():
"""
Set the project of the current task by either specifying a project name or ID
"""
# if running remotely and we are the main task, skip setting ourselves.
if self._is_remote_main_task():
return
if not project_id:
assert isinstance(project_name, six.string_types)
res = self.send(
projects.GetAllRequest(name=exact_match_regex(project_name)),
raise_on_errors=False,
)
if not res or not res.response or not res.response.projects or len(res.response.projects) != 1:
return False
project_id = res.response.projects[0].id
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self) -> Optional[str]:
"""
Get the current Task's project name.
"""
if self.project is None:
return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_project_object(self) -> "projects.Project":
"""Get the current Task's project as a python object."""
if self.project is None:
return self._project_object[1] if self._project_object and len(self._project_object) > 1 else None
if self._project_object and self._project_object[1] is not None and self._project_object[0] == self.project:
return self._project_object[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
self.log.warning("Project {} not found or no read access available".format(self.project))
return None
self._project_object = (self.project, res.response.project)
return self._project_object[1]
def get_tags(self) -> Sequence[str]:
"""Get all current Task's tags."""
return self._get_task_property("tags")
def set_system_tags(self, tags: Sequence[str]) -> ():
assert isinstance(tags, (list, tuple))
tags = list(set(tags))
if Session.check_min_api_version("2.3"):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self) -> Sequence[str]:
return self._get_task_property("system_tags" if Session.check_min_api_version("2.3") else "tags")
def set_tags(self, tags: Sequence[str]) -> ():
"""
Set the current Task's tags. Please note this will overwrite anything that is there already.
:param Sequence(str) tags: Any sequence of tags to set.
"""
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version("2.3"):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name: str) -> ():
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
name = str(name) or ""
self._set_task_property("name", name)
self._edit(name=name)
self.data.name = name
def set_parent(self, parent: Optional[Union[str, "Task"]]) -> ():
"""
Set the parent task for the Task.
:param parent: The parent task ID (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.id
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment: str) -> ():
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
comment = comment or ""
self._set_task_property("comment", str(comment))
self._edit(comment=str(comment))
def set_task_type(self, task_type: Union[str, "Task.TaskTypes"]) -> ():
"""
Set the task_type for the Task.
:param task_type: The task_type of the Task.
Valid task types:
- ``TaskTypes.training``
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:type task_type: str or TaskTypes
"""
if not isinstance(task_type, self.TaskTypes):
task_type = self.TaskTypes(task_type)
self._set_task_property("task_type", str(task_type))
self._edit(type=task_type)
def set_archived(self, archive: bool) -> ():
"""
Archive the Task or remove it from the archived folder.
:param archive: If True, archive the Task. If False, make sure it is removed from the archived folder
"""
with self._edit_lock:
system_tags = (
list(set(self.get_system_tags()) | {self.archived_tag})
if archive
else list(set(self.get_system_tags()) - {self.archived_tag})
)
self.set_system_tags(system_tags)
def get_archived(self) -> bool:
"""
Return the Archive state of the Task
:return: If True, the Task is archived, otherwise it is not.
"""
return self.archived_tag in self.get_system_tags()
def set_initial_iteration(self, offset: int = 0) -> int:
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self) -> int:
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self) -> str:
"""
Return The task status without refreshing the entire Task object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status, status_message = self.get_status_message()
return str(status)
def get_output_log_web_page(self) -> str:
"""
Return the Task results & outputs web page address.
For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log
:return: ``http/s`` URL link.
"""
return self.get_task_output_log_web_page(
task_id=self.id,
project_id=self.project,
app_server_host=self._get_app_server(),
)
def get_reported_scalars(
self, max_samples: int = 0, x_axis: str = "iter"
) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]:
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
.. note::
Calling this method will return potentially downsampled scalars. The maximum number of returned samples is 5000.
Even when setting `max_samples` to a value larger than 5000, it will be limited to at most 5000 samples.
To fetch all scalar values, please see the :meth:`Task.get_all_reported_scalars`.
Example:
.. code-block:: py
{"title": {"series": {
"x": [0, 1 ,2],
"y": [10, 11 ,12]
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning up to 5000 samples.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': timestamp as milliseconds since epoch, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ("iter", "timestamp", "iso_time"):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(
task=self.id,
key=x_axis,
samples=max(1, max_samples) if max_samples else None,
),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return {}
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_all_reported_scalars(
self, x_axis: str = "iter"
) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]:
"""
Return a nested dictionary for the all scalar graphs, containing all the registered samples,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values.
To fetch downsampled scalar values, please see the :meth:`Task.get_reported_scalars`.
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': timestamp as milliseconds since epoch, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
reported_scalars = {}
batch_size = 1000
scroll_id = None
while True:
response = self.send(
events.GetTaskEventsRequest(
task=self.id,
event_type="training_stats_scalar",
scroll_id=scroll_id,
batch_size=batch_size,
)
)
if not response:
return reported_scalars
response = response.wait()
if not response.ok() or not response.response_data:
return reported_scalars
response = response.response_data
for event in response.get("events", []):
metric = event["metric"]
variant = event["variant"]
if x_axis in ["timestamp", "iter"]:
x_val = event[x_axis]
else:
x_val = (
datetime.utcfromtimestamp(event["timestamp"] / 1000).isoformat(timespec="milliseconds") + "Z"
)
y_val = event["value"]
reported_scalars.setdefault(metric, {})
reported_scalars[metric].setdefault(variant, {"name": variant, "x": [], "y": []})
if (
len(reported_scalars[metric][variant]["x"]) == 0
or reported_scalars[metric][variant]["x"][-1] != x_val
):
reported_scalars[metric][variant]["x"].append(x_val)
reported_scalars[metric][variant]["y"].append(y_val)
else:
reported_scalars[metric][variant]["y"][-1] = y_val
if response.get("returned", 0) < batch_size or not response.get("scroll_id"):
break
scroll_id = response["scroll_id"]
return reported_scalars
def get_reported_plots(self, max_iterations: Optional[int] = None) -> List[dict]:
"""
Return a list of all the plots reported for this Task,
Notice the plot data is plotly compatible.
.. note::
This call is not cached, any call will retrieve all the plot reports from the back-end.
If the Task has many plots reported, it might take long for the call to return.
Example:
.. code-block:: py
[{
"timestamp": 1636921296370,
"type": "plot",
"task": "0ce5e89bbe484f428e43e767f1e2bb11",
"iter": 0,
"metric": "Manual Reporting",
"variant": "Just a plot",
"plot_str": "{'data': [{'type': 'scatter', 'mode': 'markers', 'name': null,
'x': [0.2620246750155817], 'y': [0.2620246750155817]}]}",
"@timestamp": "2021-11-14T20:21:42.387Z",
"worker": "machine-ml",
"plot_len": 6135,
},]
:param int max_iterations: Maximum number of historic plots (iterations from end) to return.
:return: list: List of dicts, each one represents a single plot
"""
# send request
res = self.send(
events.GetTaskPlotsRequest(
task=self.id,
iters=max_iterations or 1,
_allow_extra_fields_=True,
no_scroll=True,
),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return []
response = res.wait()
if not response.ok():
return []
if not response.response_data:
return []
return response.response_data.get("plots", [])
def get_reported_console_output(self, number_of_reports: int = 1) -> Sequence[str]:
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
if Session.check_min_api_version("2.9"):
request = events.GetTaskLogRequest(
task=self.id,
order="asc",
navigate_earlier=True,
batch_size=number_of_reports,
)
else:
request = events.GetTaskLogRequest(task=self.id, order="asc", from_="tail", batch_size=number_of_reports)
res = self.send(request)
response = res.wait()
if not response.ok() or not response.response_data.get("events"):
return []
lines = [r.get("msg", "") for r in response.response_data["events"]]
return lines
def get_configuration_object(self, name: str) -> Optional[str]:
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a text blob (unconstrained text string)
return None if configuration name is not valid
"""
return self._get_configuration_text(name)
def get_configuration_object_as_dict(self, name: str) -> Optional[Union[dict, list]]:
"""
Get the Task's configuration object section as parsed dictionary
Parsing supports JSON and HOCON, otherwise parse manually with `get_configuration_object()`
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a parsed dict.
return None if configuration name is not valid
"""
return self._get_configuration_dict(name)
def get_configuration_objects(self) -> Optional[Mapping[str, str]]:
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:return: The Task's configurations as a dict (config name as key) and text blob as value (unconstrained text
string)
"""
if not Session.check_min_api_version("2.9"):
raise ValueError(
"Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version"
)
configuration = self.data.configuration or {}
return {k: v.value for k, v in configuration.items()}
def get_reported_single_values(self) -> Dict[str, float]:
"""
Get all reported single values as a dictionary, where the keys are the names of the values
and the values of the dictionary are the actual reported values.
:return: A dict containing the reported values
"""
if not Session.check_min_api_version("2.20"):
raise ValueError(
"Current 'clearml-server' does not support getting reported single values. "
"Please upgrade to the latest version"
)
res = self.send(events.GetTaskSingleValueMetricsRequest(tasks=[self.id]))
res = res.wait()
if not res.ok() or not res.response_data.get("tasks"):
return {}
result = {}
for value in res.response_data["tasks"][0].get("values", []):
result[value.get("variant")] = value.get("value")
return result
def get_reported_single_value(self, name: str) -> Optional[float]:
"""
Get a single reported value, identified by its name. Note that this function calls
`Task.get_reported_single_values`.
:param name: The name of the reported value
:return: The actual value of the reported value, if found. Otherwise, returns None
"""
return self.get_reported_single_values().get(name)
def set_configuration_object(
self,
name: str,
config_text: Optional[str] = None,
description: Optional[str] = None,
config_type: Optional[str] = None,
config_dict: Optional[Union[dict, list]] = None,
) -> None:
"""
Set the Task's configuration object as a blob of text or automatically encoded dictionary/list.
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:param config_text: configuration as a blob of text (unconstrained text string)
usually the content of a configuration file of a sort
:param str description: Configuration section description
:param str config_type: Optional configuration format type
:param dict config_dict: configuration dictionary/list to be encoded using HOCON (json alike) into stored text
Notice you can either pass `config_text` or `config_dict`, not both
"""
return self._set_configuration(
name=name,
description=description,
config_type=config_type,
config_text=config_text,
config_dict=config_dict,
)
@classmethod
def get_projects(cls, **kwargs: Any) -> List["projects.Project"]:
"""
Return a list of projects in the system, sorted by last updated time
:return: A list of all the projects in the system. Each entry is a `services.projects.Project` object.
"""
ret_projects = []
page = kwargs.pop("page", -1)
page_size = kwargs.pop("page_size", 500)
order_by = kwargs.pop("order_by", ["last_update"])
res = None
while page == -1 or (
res and res.response and res.response.projects and len(res.response.projects) == page_size
):
page += 1
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=order_by, page=page, page_size=page_size, **kwargs),
raise_on_errors=True,
)
if res and res.response and res.response.projects:
ret_projects.extend([projects.Project(**p.to_dict()) for p in res.response.projects])
return ret_projects
@classmethod
def get_project_id(cls, project_name: str, search_hidden: bool = True) -> Optional[str]:
"""
Return a project's unique ID (str).
If more than one project matched the project_name, return the last updated project
If no project matched the requested name, returns None
:return: Project unique ID (str), or None if no project was found.
"""
assert project_name
assert isinstance(project_name, str)
extra = {"search_hidden": search_hidden} if Session.check_min_api_version("2.20") else {}
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=["last_update"], name=exact_match_regex(project_name), **extra),
raise_on_errors=False,
)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()).id for p in res.response.projects][0]
return None
@staticmethod
def running_locally() -> bool:
"""
Is the task running locally (i.e., ``clearml-agent`` is not executing it)
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name: str, package_version: Optional[str] = None) -> None:
"""
Force the adding of a package to the requirements list. If ``package_version`` is None, use the
installed package version, if found.
Example: ``Task.add_requirements('tensorflow', '2.4.0')``
Example: ``Task.add_requirements('tensorflow', '>=2.4')``
Example: ``Task.add_requirements('tensorflow')`` -> use the installed tensorflow version
Example: ``Task.add_requirements('tensorflow', '')`` -> no version limit
Alternatively, you can add all requirements from a file:
Example: ``Task.add_requirements('/path/to/your/project/requirements.txt')``
.. note::
Task.add_requirements does not directly modify the task's requirements. Instead, it improves the accuracy
of capturing a task's Python packages. To explicitly change task requirements, use
Task.set_packages, which overwrites existing packages with the specified ones.
:param str package_name: The package name or path to a requirements file
to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
if not running_remotely() and hasattr(cls, "current_task") and cls.current_task():
get_logger("task").warning("Requirement ignored, Task.add_requirements() must be called before Task.init()")
if not os.path.exists(package_name):
cls._force_requirements[package_name] = package_version
return
try:
import pkg_resources
except ImportError:
get_logger("task").warning(
"Requirement file `{}` skipped since pkg_resources is not installed".format(package_name)
)
else:
with Path(package_name).open() as requirements_txt:
for req in pkg_resources.parse_requirements(requirements_txt):
if req.marker is None or pkg_resources.evaluate_marker(str(req.marker)):
cls._force_requirements[req.name] = str(req.specifier)
@classmethod
def ignore_requirements(cls, package_name: str) -> None:
"""
Ignore a specific package when auto generating the requirements list.
Example: Task.ignore_requirements('pywin32')
:param str package_name: The package name to remove/ignore from the "Installed Packages" section of the task.
"""
if not running_remotely() and hasattr(cls, "current_task") and cls.current_task():
get_logger("task").warning(
"Requirement ignored, Task.ignore_requirements() must be called before Task.init()"
)
cls._ignore_requirements.add(str(package_name))
@classmethod
def force_requirements_env_freeze(
cls,
force: bool = True,
requirements_file: Optional[Union[str, Path]] = None,
) -> None:
"""
Force the use of ``pip freeze`` or ``conda list`` to capture the requirements from the active
environment (instead of statically analyzing the running code and listing directly imported packages).
Notice: Must be called before `Task.init` !
:param force: If ``True`` (default), force the use of ``pip freeze`` or ``conda list`` to capture the
requirements. If ``False``, ClearML statistically analyzes the code for requirements.
:param requirements_file: (Optional) Pass a requirements.txt file to specify the required packages (instead of
``pip freeze`` or automatic analysis). This will overwrite any existing requirement listing.
"""
if not running_remotely() and hasattr(cls, "current_task") and cls.current_task():
get_logger("task").warning(
"Requirements env freeze ignored, "
"Task.force_requirements_env_freeze() must be called before Task.init()"
)
cls._force_use_pip_freeze = requirements_file if requirements_file else bool(force)
@classmethod
def force_store_standalone_script(cls, force: bool = True) -> None:
"""
Force using storing the main python file as a single standalone script, instead of linking with the
local git repository/commit ID.
Notice: Must be called before `Task.init` !
:param force: Set force storing the main python file as a single standalone script
"""
if not running_remotely() and hasattr(cls, "current_task") and cls.current_task():
get_logger("task").warning(
"Store standalone script ignored, "
"Task.force_store_standalone_script() must be called before Task.init()"
)
cls._force_store_standalone_script = bool(force)
def _set_random_seed_used(self, random_seed: Optional[int]) -> ():
self._random_seed = random_seed
def _get_default_report_storage_uri(self) -> str:
if self._offline_mode:
return str(self.get_offline_mode_folder() / "data")
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def get_status_message(self) -> (Optional[str], Optional[str]):
"""
Return The task status without refreshing the entire Task object (only the status property)
Return also the last message coupled with the status change
Task Status options: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
Message: is a string
:return: (Task status as string, last message)
"""
status, status_message, _ = self._get_tasks_status([self.id])[0]
if self._data and status:
self._data.status = status
self._data.status_message = status_message
return status, status_message
def _get_status(self) -> (Optional[str], Optional[str]):
"""
retrieve Task status & message, But do not update the Task local status
this is important if we want to query in the background without breaking Tasks consistency
backwards compatibility,
:return: (status enum as string or None, str or None)
"""
status, status_message, _ = self._get_tasks_status([self.id])[0]
return status, status_message
@classmethod
def _get_tasks_status(cls, ids: List[str]) -> List[Optional[str]]:
"""
:param ids: task IDs (str) to query
:return: list of tuples (status, status_message, task_id)
"""
if cls._offline_mode:
return [(cls.TaskStatusEnum.created, "offline", i) for i in ids]
# noinspection PyBroadException
try:
all_tasks = (
cls._get_default_session()
.send(
tasks.GetAllRequest(id=ids, only_fields=["status", "status_message", "id"]),
)
.response.tasks
)
return [(task.status, task.status_message, task.id) for task in all_tasks]
except Exception:
return [(None, None, None) for _ in ids]
def _get_last_update(self) -> Optional[datetime]:
if self._offline_mode:
return None
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=["last_update"]),
).response.tasks
return all_tasks[0].last_update
except Exception:
return None
def _reload_last_iteration(self) -> ():
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=["last_iteration"]),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _set_runtime_properties(self, runtime_properties: Mapping[str, Union[str, int, float]]) -> bool:
if not Session.check_min_api_version("2.13") or not runtime_properties:
return False
with self._edit_lock:
self.reload()
current_runtime_properties = self.data.runtime or {}
current_runtime_properties.update(runtime_properties)
# noinspection PyProtectedMember
self._edit(runtime=current_runtime_properties)
return True
def _get_runtime_properties(self) -> Dict[str, str]:
if not Session.check_min_api_version("2.13"):
return dict()
return dict(**self.data.runtime) if self.data.runtime else dict()
# ---------------------- HyperDatasets DataView helpers ----------------------
def set_dataview(self, dataview) -> None:
"""
Store a HyperDatasets DataView definition into this Task using task properties
(i.e. under `input.*`), so the DataView appears in the UI, without using
runtime properties.
- If `dataview` is a string id, the backend is queried to fetch its full
definition and it is serialized into the task's `input` section.
- If `dataview` is a `DataView`, its current state is serialized
into the task's `input` section.
:param dataview: DataView instance or dataview id string
"""
from ...hyperdatasets import util as _hyperdataset_util
_hyperdataset_util.set_dataview(self, dataview)
def get_dataviews(self) -> Dict[str, Any]:
"""
Return a dictionary of HyperDatasets DataView objects reconstructed from this Task
task properties (primarily from `input.*`). Keys are arbitrary labels.
"""
from ...hyperdatasets import util as _hyperdataset_util
return _hyperdataset_util.get_dataviews(self)
def _clear_task(
self,
system_tags: Optional[Sequence[str]] = None,
comment: Optional[str] = None,
) -> ():
self._data.script = tasks.Script(
binary="",
repository="",
tag="",
branch="",
version_num="",
entry_point="",
working_dir="",
requirements={},
diff="",
)
if Session.check_min_api_version("2.13"):
self._data.models = tasks.TaskModels(input=[], output=[])
self._data.container = dict()
self._data.execution = tasks.Execution(
artifacts=[],
dataviews=[],
model="",
model_desc={},
model_labels={},
parameters={},
docker_cmd="",
)
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
if Session.check_min_api_version("2.13"):
self._set_task_property("system_tags", system_tags)
self._data.script.requirements = dict()
self._edit(
system_tags=self._data.system_tags,
comment=self._data.comment,
script=self._data.script,
execution=self._data.execution,
output_dest="",
hyperparams=dict(),
configuration=dict(),
container=self._data.container,
models=self._data.models,
)
elif Session.check_min_api_version("2.9"):
self._update_requirements("")
self._set_task_property("system_tags", system_tags)
self._edit(
system_tags=self._data.system_tags,
comment=self._data.comment,
script=self._data.script,
execution=self._data.execution,
output_dest="",
hyperparams=dict(),
configuration=dict(),
)
elif Session.check_min_api_version("2.3"):
self._set_task_property("system_tags", system_tags)
self._edit(
system_tags=self._data.system_tags,
comment=self._data.comment,
script=self._data.script,
execution=self._data.execution,
output_dest="",
)
else:
self._set_task_property("tags", system_tags)
self._edit(
tags=self._data.tags,
comment=self._data.comment,
script=self._data.script,
execution=self._data.execution,
output_dest=None,
)
@classmethod
def _get_api_server(cls) -> ():
return Session.get_api_server_host()
def _get_app_server(self) -> str:
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _is_remote_main_task(self) -> bool:
"""
:return: return True if running remotely and this Task is the registered main task
"""
return running_remotely() and get_remote_task_id() == self.id
def _save_data_to_offline_dir(self, **kwargs: Any) -> ():
for k, v in kwargs.items():
setattr(self.data, k, v)
offline_mode_folder = self.get_offline_mode_folder()
if not offline_mode_folder:
return
Path(offline_mode_folder).mkdir(parents=True, exist_ok=True)
with open((offline_mode_folder / self._offline_filename).as_posix(), "wt") as f:
export_data = self.data.to_dict()
export_data["project_name"] = self.get_project_name()
export_data["offline_folder"] = self.get_offline_mode_folder().as_posix()
export_data["offline_output_models"] = self._offline_output_models
json.dump(export_data, f, ensure_ascii=True, sort_keys=True)
def _edit(self, **kwargs: Any) -> Any:
with self._edit_lock:
if self._offline_mode:
self._save_data_to_offline_dir(**kwargs)
return None
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if not kwargs.pop("force", False) and status not in (
self.TaskStatusEnum.created,
self.TaskStatusEnum.in_progress,
):
# the exception being name/comment that we can always change.
if kwargs and all(
k in ("name", "project", "comment", "tags", "system_tags", "runtime") for k in kwargs.keys()
):
pass
else:
raise ValueError(
"Task object can only be updated if created or in_progress "
"[status={} fields={}]".format(status, list(kwargs.keys()))
)
res = self.send(
tasks.EditRequest(task=self.id, force=True, **kwargs),
raise_on_errors=False,
)
return res
def _update_requirements(self, requirements: Union[dict, str, Sequence[str]]) -> ():
if not isinstance(requirements, dict):
requirements = {"pip": requirements}
# make sure we have str as values:
for key in requirements.keys():
# fix python2 support (str/unicode)
if requirements[key] and not isinstance(requirements[key], six.string_types):
requirements[key] = "\n".join(requirements[key])
# protection, Old API might not support it
# noinspection PyBroadException
try:
with self._edit_lock:
self.reload()
self.data.script.requirements = requirements
if self._offline_mode:
self._edit(script=self.data.script)
else:
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script: dict) -> ():
with self._edit_lock:
self.reload()
self.data.script = script
self._edit(script=script)
def _set_configuration(
self,
name: str,
description: Optional[str] = None,
config_type: Optional[str] = None,
config_text: Optional[str] = None,
config_dict: Optional[Union[Mapping, list]] = None,
**kwargs: Any,
) -> None:
"""
Set Task configuration text/dict. Multiple configurations are supported.
:param str name: Configuration name.
:param str description: Configuration section description.
:param str config_type: Optional configuration format type (str).
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# make sure we have either dict or text
mutually_exclusive(config_dict=config_dict, config_text=config_text, _check_none=True)
if not Session.check_min_api_version("2.9"):
raise ValueError(
"Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version"
)
if description:
description = str(description)
# support empty string
a_config = config_dict_to_text(config_dict if config_text is None else config_text)
with self._edit_lock:
self.reload()
configuration = self.data.configuration or {}
configuration[name] = tasks.ConfigurationItem(
name=name,
value=a_config,
description=description or None,
type=config_type or None,
)
self._edit(configuration=configuration, **kwargs)
def _get_configuration_text(self, name: str) -> Optional[str]:
"""
Get Task configuration section as text
:param str name: Configuration name.
:return: The Task configuration as text (unconstrained text string).
return None if configuration name is not valid.
"""
if not Session.check_min_api_version("2.9"):
raise ValueError(
"Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version"
)
configuration = self.data.configuration or {}
if not configuration.get(name):
return None
return configuration[name].value
def _get_configuration_dict(self, name: str) -> Optional[dict]:
"""
Get Task configuration section as dictionary
:param str name: Configuration name.
:return: The Task configuration as dictionary.
return None if configuration name is not valid.
"""
config_text = self._get_configuration_text(name)
if not config_text:
return None
return text_to_config_dict(config_text)
def _validate_static_route(self, static_route: str):
"""
Validate a static route by its name. If the static route is not, an exception will be raised
:param static_route: The static route to validate
"""
Session.verify_feature_set("advanced")
if not Session.check_min_server_version("3.26"):
raise ValueError(
"Static routes not supported by the server version. "
"Minimum required version is 3.26"
)
response = self._get_default_session().send_request(
"routers", "get_routes", json={"name": "^{}$".format(static_route)}
)
if response.status_code != 200:
raise ValueError(
"Static route validation request for '{}' failed with status code {}".format(
static_route, response.status_code
)
)
static_route_object = response.json().get("data", {}).get("routes")
if not static_route_object:
raise ValueError("Static route '{}' does not exist".format(static_route))
static_route_object = static_route_object[0]
if not static_route_object.get("enabled", False):
raise ValueError("Static route '{}' is disabled".format(static_route))
if static_route_object.get("status", "") == "active" and not static_route_object.get("load_balancer", {}).get(
"enabled", False
):
raise ValueError("Static route '{}' is active but not load-balanced".format(static_route))
def get_offline_mode_folder(self) -> Optional[Path]:
"""
Return the folder where all the task outputs and logs are stored in the offline session.
:return: Path object, local folder, later to be used with `report_offline_session()`
"""
if not self.task_id:
return None
if self._offline_dir:
return self._offline_dir
if not self._offline_mode:
return None
self._offline_dir = get_offline_dir(task_id=self.task_id)
return self._offline_dir
@classmethod
def _clone_task(
cls,
cloned_task_id: str,
name: Optional[str] = None,
comment: Optional[str] = None,
execution_overrides: Optional[dict] = None,
tags: Optional[Sequence[str]] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
log: Optional[logging.Logger] = None,
session: Optional[Session] = None,
) -> str:
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new task's ID.
"""
session = session if session else cls._get_default_session()
use_clone_api = Session.check_min_api_version("2.9")
if use_clone_api:
res = cls._send(
session=session,
log=log,
req=tasks.CloneRequest(
task=cloned_task_id,
new_task_name=name,
new_task_tags=tags,
new_task_comment=comment,
new_task_parent=parent,
new_task_project=project,
execution_overrides=execution_overrides,
),
)
cloned_task_id = res.response.id
return cloned_task_id
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(
ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}),
)
# clear all artifacts
execution["artifacts"] = [e for e in execution["artifacts"] if e.get("mode") == "input"]
if not hasattr(task, "system_tags") and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
extra = {}
if hasattr(task, "hyperparams"):
extra["hyperparams"] = task.hyperparams
if hasattr(task, "configuration"):
extra["configuration"] = task.configuration
if getattr(task, "system_tags", None):
extra["system_tags"] = [t for t in task.system_tags if t not in (cls._development_tag, cls.archived_tag)]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, "input") else {"view": {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script,
**extra,
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(
session=session,
log=log,
req=tasks.SetRequirementsRequest(task=cloned_task_id, requirements=task.script.requirements),
)
return cloned_task_id
@classmethod
def get_all(
cls,
session: Optional[Session] = None,
log: Optional[logging.Logger] = None,
**kwargs: Any,
) -> Any:
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.service.v?.tasks.GetAllRequest` for details; the ? needs to be replaced by the appropriate version.)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_task_output_log_web_page(
cls,
task_id: str,
project_id: Optional[str] = None,
app_server_host: Optional[str] = None,
) -> str:
"""
Return the Task results & outputs web page address.
For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log
:param str task_id: Task ID.
:param str project_id: Project ID for this task.
:param str app_server_host: ClearML Application server host name.
If not provided, the current session will be used to resolve the host name.
:return: ``http/s`` URL link.
"""
if not app_server_host:
if not hasattr(cls, "__cached_app_server_host"):
cls.__cached_app_server_host = Session.get_app_server_host()
app_server_host = cls.__cached_app_server_host
template = "{}/projects/{}/tasks/{}/output/log" if Session.check_min_api_version("2.31") \
else "{}/projects/{}/experiments/{}/output/log"
return template.format(
app_server_host.rstrip("/"),
project_id if project_id is not None else "*",
task_id,
)
@classmethod
def _get_project_name(cls, project_id: str) -> Optional[str]:
res = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=project_id),
raise_on_errors=False,
)
if not res or not res.response or not res.response.project:
return None
return res.response.project.name
@classmethod
def _get_project_names(cls, project_ids: Sequence[str]) -> Dict[str, str]:
page = -1
page_size = 500
all_responses = []
while True:
page += 1
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(id=list(project_ids), page=page, page_size=page_size),
raise_on_errors=False,
)
if res and res.response and res.response.projects:
all_responses.extend(res.response.projects)
else:
break
return {p.id: p.name for p in all_responses}
def _get_all_events(
self,
max_events: int = 100,
batch_size: int = 500,
order: str = "asc",
event_type: str = None,
unique_selector: Callable[[dict], Any] = None,
) -> Union[List[Any], Set[Any]]:
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:param batch_size: The maximum number of events retrieved by each internal call performed by this method.
:param order: Events order (by timestamp) - "asc" for ascending, "desc" for descending.
:param event_type: Event type. Pass None to get all event types.
:param unique_selector: If provided, used to select a value from each event, only a unique set of these
values will be returned by this method.
:return: A list of events from the task. If unique_selector was provided, a set of values selected from events
of the task.
"""
batch_size = max_events or batch_size
def apply_unique_selector(events_set: Set[Any], evs: List[dict]) -> ():
try:
events_set.update(map(unique_selector, evs))
except TypeError:
self.log.error(
"Failed applying unique_selector on events (note the selector's result must be hashable)"
)
raise
log_events = self.send(
events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
)
)
returned_count = log_events.response.returned
total_events = log_events.response.total
scroll = log_events.response.scroll_id
if unique_selector:
events_list = set([])
apply_unique_selector(events_list, log_events.response.events)
else:
events_list = log_events.response.events
while returned_count < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(
events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
scroll_id=scroll,
)
)
scroll = log_events.response.scroll_id
returned_count += log_events.response.returned
if unique_selector:
apply_unique_selector(events_list, log_events.response.events)
else:
events_list.extend(log_events.response.events)
return events_list
@property
def _edit_lock(self) -> ():
# skip the actual lock, this one-time lock will always enter
# only used on shutdown process to avoid deadlocks
if self.__edit_lock is False:
return RLock()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(":")) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(":")[1] == str(self.id):
filename = os.path.join(gettempdir(), "clearml_{}.lock".format(self.id))
# no need to remove previous file lock if we have a dead process, it will automatically release the lock.
# # noinspection PyBroadException
# try:
# os.unlink(filename)
# except Exception:
# pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value: RLock) -> ():
self.__edit_lock = value
@classmethod
def __update_master_pid_task(
cls,
pid: Optional[int] = None,
task: Optional[Union[str, "Task"]] = None,
) -> None:
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ":")
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ":" + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ":" + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls) -> Optional[str]:
if not PROC_MASTER_ID_ENV_VAR.get(""):
return None
master_pid, _, master_task_id = PROC_MASTER_ID_ENV_VAR.get("").partition(":")
# we could not find a task ID, revert to old stub behaviour
if not master_task_id:
return None
return master_task_id
@classmethod
def __get_master_process_id(cls) -> Optional[str]:
if not PROC_MASTER_ID_ENV_VAR.get(""):
return None
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(":")
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[0]
@classmethod
def __is_subprocess(cls) -> bool:
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and PROC_MASTER_ID_ENV_VAR.get().split(":")[0] != str(os.getpid())
return is_subprocess
@classmethod
def _get_task_status(cls, task_id: str) -> (Optional[str], Optional[str]):
if cls._offline_mode:
return cls.TaskStatusEnum.created, "offline"
# noinspection PyBroadException
try:
all_tasks = (
cls._get_default_session()
.send(
tasks.GetAllRequest(id=[task_id], only_fields=["status", "status_message"]),
)
.response.tasks
)
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
@property
def report_subprocess_enabled(self) -> None:
return self._report_subprocess_enabled
| Task |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 1731,
"end": 1823
} | class ____(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
| PassThru |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 29257,
"end": 30771
} | class ____(nn.Module):
"""Feature fusion layer, merges feature maps from different stages.
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
align_corners (`bool`, *optional*, defaults to `True`):
The align_corner setting for bilinear upsample.
"""
def __init__(self, config: DPTConfig, align_corners: bool = True):
super().__init__()
self.align_corners = align_corners
self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
self.residual_layer1 = DPTPreActResidualLayer(config)
self.residual_layer2 = DPTPreActResidualLayer(config)
def forward(self, hidden_state: torch.Tensor, residual: Optional[torch.Tensor] = None) -> torch.Tensor:
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(
residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode="bilinear", align_corners=False
)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
hidden_state = nn.functional.interpolate(
hidden_state, scale_factor=2, mode="bilinear", align_corners=self.align_corners
)
hidden_state = self.projection(hidden_state)
return hidden_state
@auto_docstring
| DPTFeatureFusionLayer |
python | huggingface__transformers | tests/models/idefics3/test_modeling_idefics3.py | {
"start": 5267,
"end": 13708
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `Idefics3`.
"""
all_model_classes = (Idefics3Model,) if is_torch_available() else ()
test_resize_embeddings = True
def setUp(self):
self.model_tester = Idefics3VisionText2TextModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Idefics3Config, has_text_modality=False, common_properties=["image_token_id"]
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="input_embeds cannot be passed in without input_ids")
def test_inputs_embeds():
pass
@unittest.skip(reason="input_embeds cannot be passed in without input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@unittest.skip(reason="Compile not yet supported in idefics3 models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Ignore copy
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# make sure that decoder_input_ids are resized as well
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
| Idefics3ModelTest |
python | jupyterlab__jupyterlab | examples/notebook/main.py | {
"start": 1689,
"end": 2508
} | class ____(LabServerApp):
extension_url = "/example"
default_url = "/example"
app_url = "/example"
name = __name__
app_name = "JupyterLab Example Notebook"
app_settings_dir = os.path.join(HERE, "build", "application_settings")
schemas_dir = os.path.join(HERE, "build", "schemas")
static_dir = os.path.join(HERE, "build")
templates_dir = os.path.join(HERE, "templates")
themes_dir = os.path.join(HERE, "build", "themes")
user_settings_dir = os.path.join(HERE, "build", "user_settings")
workspaces_dir = os.path.join(HERE, "build", "workspaces")
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list."""
self.handlers.append(("/example", ExampleHandler))
if __name__ == "__main__":
ExampleApp.launch_instance()
| ExampleApp |
python | django__django | tests/context_processors/tests.py | {
"start": 2179,
"end": 3848
} | class ____(TestCase):
"""
Tests for the ``django.template.context_processors.debug`` processor.
"""
databases = {"default", "other"}
def test_debug(self):
url = "/debug/"
# We should have the debug flag in the template.
response = self.client.get(url)
self.assertContains(response, "Have debug")
# And now we should not
with override_settings(DEBUG=False):
response = self.client.get(url)
self.assertNotContains(response, "Have debug")
def test_sql_queries(self):
"""
Test whether sql_queries represents the actual amount
of queries executed. (#23364)
"""
url = "/debug/"
response = self.client.get(url)
self.assertContains(response, "First query list: 0")
self.assertContains(response, "Second query list: 1")
# Check we have not actually memoized connection.queries
self.assertContains(response, "Third query list: 2")
# Check queries for DB connection 'other'
self.assertContains(response, "Fourth query list: 3")
@override_settings(
ROOT_URLCONF="context_processors.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.csp",
],
},
}
],
MIDDLEWARE=[
"django.middleware.csp.ContentSecurityPolicyMiddleware",
],
SECURE_CSP={
"script-src": [CSP.SELF, CSP.NONCE],
},
)
| DebugContextProcessorTests |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/utils/test_identifiers.py | {
"start": 1011,
"end": 3213
} | class ____:
@pytest.fixture(
autouse=True,
params=[
pytest.param(NOTSET, id="default-namespace"),
pytest.param(uuid.UUID(int=42), id="custom-namespace"),
],
)
def setup_namespace(self, request):
self.default_namespace = request.param is NOTSET
self.namespace = uuid.NAMESPACE_OID if self.default_namespace else request.param
self.kwargs = {"namespace": self.namespace} if not self.default_namespace else {}
def test_deterministic(self):
"""Test that result is deterministic and a valid UUID object"""
args = ["".join(random.choices(string.ascii_letters, k=random.randint(3, 13))) for _ in range(100)]
result = generate_uuid(*args, **self.kwargs)
assert result == generate_uuid(*args, **self.kwargs)
assert uuid.UUID(result).version == 5, "Should generate UUID v5"
def test_nil_uuid(self):
"""Test that result of single None are NIL UUID, regardless namespace."""
assert generate_uuid(None, **self.kwargs) == "00000000-0000-0000-0000-000000000000"
def test_single_uuid_value(self):
"""Test that result of single not None value are the same as uuid5."""
assert generate_uuid("", **self.kwargs) == str(uuid.uuid5(self.namespace, ""))
assert generate_uuid("Airflow", **self.kwargs) == str(uuid.uuid5(self.namespace, "Airflow"))
def test_multiple_none_value(self):
"""Test that result of single None are NIL UUID, regardless of namespace."""
multi_none = generate_uuid(None, None, **self.kwargs)
assert multi_none != "00000000-0000-0000-0000-000000000000"
assert uuid.UUID(multi_none).version == 5
# Test that None values not skipped
assert generate_uuid(None, "1", None, **self.kwargs) != generate_uuid("1", **self.kwargs)
assert generate_uuid(None, "1", **self.kwargs) != generate_uuid("1", **self.kwargs)
assert generate_uuid("1", None, **self.kwargs) != generate_uuid("1", **self.kwargs)
def test_no_args_value(self):
with pytest.raises(ValueError, match="Expected at least 1 argument"):
generate_uuid(**self.kwargs)
| TestGenerateUuid |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/selective_checks.py | {
"start": 16055,
"end": 75782
} | class ____:
__HASHABLE_FIELDS = {"_files", "_default_branch", "_commit_ref", "_pr_labels", "_github_event"}
def __init__(
self,
files: tuple[str, ...] = (),
default_branch=AIRFLOW_BRANCH,
default_constraints_branch=DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH,
commit_ref: str | None = None,
pr_labels: tuple[str, ...] = (),
github_event: GithubEvents = GithubEvents.PULL_REQUEST,
github_repository: str = APACHE_AIRFLOW_GITHUB_REPOSITORY,
github_actor: str = "",
github_context_dict: dict[str, Any] | None = None,
):
self._files = files
self._default_branch = default_branch
self._default_constraints_branch = default_constraints_branch
self._commit_ref = commit_ref
self._pr_labels = pr_labels
self._github_event = github_event
self._github_repository = github_repository
self._github_actor = github_actor
self._github_context_dict = github_context_dict or {}
self._new_toml: dict[str, Any] = {}
self._old_toml: dict[str, Any] = {}
def __important_attributes(self) -> tuple[Any, ...]:
return tuple(getattr(self, f) for f in self.__HASHABLE_FIELDS)
def __hash__(self):
return hash(self.__important_attributes())
def __eq__(self, other):
return isinstance(other, SelectiveChecks) and all(
[getattr(other, f) == getattr(self, f) for f in self.__HASHABLE_FIELDS]
)
def __str__(self) -> str:
from airflow_breeze.utils.github import get_ga_output
output = []
for field_name in dir(self):
if not field_name.startswith("_"):
value = getattr(self, field_name)
if value is not None:
output.append(get_ga_output(field_name, value))
return "\n".join(output)
default_postgres_version = DEFAULT_POSTGRES_VERSION
default_mysql_version = DEFAULT_MYSQL_VERSION
default_kubernetes_version = DEFAULT_KUBERNETES_VERSION
default_kind_version = KIND_VERSION
default_helm_version = HELM_VERSION
@cached_property
def latest_versions_only(self) -> bool:
return LATEST_VERSIONS_ONLY_LABEL in self._pr_labels
@cached_property
def default_python_version(self) -> str:
return (
CURRENT_PYTHON_MAJOR_MINOR_VERSIONS[-1]
if LATEST_VERSIONS_ONLY_LABEL in self._pr_labels
else DEFAULT_PYTHON_MAJOR_MINOR_VERSION
)
@cached_property
def default_branch(self) -> str:
return self._default_branch
@cached_property
def default_constraints_branch(self) -> str:
return self._default_constraints_branch
def _should_run_all_tests_and_versions(self) -> bool:
if self._github_event in [GithubEvents.PUSH, GithubEvents.SCHEDULE, GithubEvents.WORKFLOW_DISPATCH]:
get_console().print(f"[warning]Running everything because event is {self._github_event}[/]")
return True
if not self._commit_ref:
get_console().print("[warning]Running everything in all versions as commit is missing[/]")
return True
if self.pyproject_toml_changed:
get_console().print("[warning]Running everything with all versions: changed pyproject.toml[/]")
return True
if self.generated_dependencies_changed:
get_console().print(
"[warning]Running everything with all versions: provider dependencies changed[/]"
)
return True
return False
@cached_property
def all_versions(self) -> bool:
if DEFAULT_VERSIONS_ONLY_LABEL in self._pr_labels:
return False
if LATEST_VERSIONS_ONLY_LABEL in self._pr_labels:
return False
if ALL_VERSIONS_LABEL in self._pr_labels:
return True
if self._should_run_all_tests_and_versions():
return True
return False
@cached_property
def full_tests_needed(self) -> bool:
if self._should_run_all_tests_and_versions():
return True
if self._matching_files(
FileGroupForCi.ENVIRONMENT_FILES,
CI_FILE_GROUP_MATCHES,
):
get_console().print("[warning]Running full set of tests because env files changed[/]")
return True
if self._matching_files(
FileGroupForCi.API_FILES,
CI_FILE_GROUP_MATCHES,
):
get_console().print("[warning]Running full set of tests because api files changed[/]")
return True
if self._matching_files(
FileGroupForCi.GIT_PROVIDER_FILES,
CI_FILE_GROUP_MATCHES,
):
# TODO(potiuk): remove me when we get rid of the dependency
get_console().print(
"[warning]Running full set of tests because git provider files changed "
"and for now we have core tests depending on them.[/]"
)
return True
if self._matching_files(
FileGroupForCi.STANDARD_PROVIDER_FILES,
CI_FILE_GROUP_MATCHES,
):
# TODO(potiuk): remove me when we get rid of the dependency
get_console().print(
"[warning]Running full set of tests because standard provider files changed "
"and for now we have core tests depending on them.[/]"
)
return True
if self._matching_files(
FileGroupForCi.TESTS_UTILS_FILES,
CI_FILE_GROUP_MATCHES,
):
get_console().print("[warning]Running full set of tests because tests/utils changed[/]")
return True
if self._is_large_enough_pr():
return True
if FULL_TESTS_NEEDED_LABEL in self._pr_labels:
get_console().print(
"[warning]Full tests needed because "
f"label '{FULL_TESTS_NEEDED_LABEL}' is in {self._pr_labels}[/]"
)
return True
return False
def _is_large_enough_pr(self) -> bool:
"""
Check if PR is large enough to run full tests.
The heuristics are based on number of files changed and total lines changed,
while excluding generated files which can be ignored.
"""
FILE_THRESHOLD = 25
LINE_THRESHOLD = 500
if not self._files:
return False
exclude_patterns = [
r"/newsfragments/",
r"^uv\.lock$",
r"pnpm-lock\.yaml$",
r"package-lock\.json$",
]
relevant_files = [
f for f in self._files if not any(re.search(pattern, f) for pattern in exclude_patterns)
]
files_changed = len(relevant_files)
if files_changed >= FILE_THRESHOLD:
get_console().print(
f"[warning]Running full set of tests because PR touches {files_changed} files "
f"(≥25 threshold)[/]"
)
return True
if not self._commit_ref:
get_console().print("[warning]Cannot determine if PR is big enough, skipping the check[/]")
return False
try:
result = run_command(
["git", "diff", "--numstat", f"{self._commit_ref}^...{self._commit_ref}"] + relevant_files,
capture_output=True,
text=True,
cwd=AIRFLOW_ROOT_PATH,
check=False,
)
if result.returncode == 0:
total_lines = 0
for line in result.stdout.strip().split("\n"):
if line:
parts = line.split("\t")
if len(parts) >= 2:
try:
additions = int(parts[0])
deletions = int(parts[1])
total_lines += additions + deletions
except ValueError:
pass
if total_lines >= LINE_THRESHOLD:
get_console().print(
f"[warning]Running full set of tests because PR changes {total_lines} lines "
f"in {files_changed} files[/]"
)
return True
except Exception:
pass
return False
@cached_property
def python_versions(self) -> list[str]:
if self.all_versions:
return CURRENT_PYTHON_MAJOR_MINOR_VERSIONS
if self.latest_versions_only:
return [CURRENT_PYTHON_MAJOR_MINOR_VERSIONS[-1]]
return [DEFAULT_PYTHON_MAJOR_MINOR_VERSION]
@cached_property
def python_versions_list_as_string(self) -> str:
return " ".join(self.python_versions)
@cached_property
def all_python_versions(self) -> list[str]:
"""
All python versions include all past python versions available in previous branches
Even if we remove them from the main version. This is needed to make sure we can cherry-pick
changes from main to the previous branch.
"""
if self.all_versions:
return ALL_PYTHON_MAJOR_MINOR_VERSIONS
if self.latest_versions_only:
return [CURRENT_PYTHON_MAJOR_MINOR_VERSIONS[-1]]
return [DEFAULT_PYTHON_MAJOR_MINOR_VERSION]
@cached_property
def all_python_versions_list_as_string(self) -> str:
return " ".join(self.all_python_versions)
@cached_property
def postgres_versions(self) -> list[str]:
if self.all_versions:
return CURRENT_POSTGRES_VERSIONS
if self.latest_versions_only:
return [CURRENT_POSTGRES_VERSIONS[-1]]
return [DEFAULT_POSTGRES_VERSION]
@cached_property
def mysql_versions(self) -> list[str]:
if self.all_versions:
return CURRENT_MYSQL_VERSIONS
if self.latest_versions_only:
return [CURRENT_MYSQL_VERSIONS[-1]]
return [DEFAULT_MYSQL_VERSION]
@cached_property
def kind_version(self) -> str:
return KIND_VERSION
@cached_property
def helm_version(self) -> str:
return HELM_VERSION
@cached_property
def postgres_exclude(self) -> list[dict[str, str]]:
if not self.all_versions:
# Only basic combination so we do not need to exclude anything
return []
return [
# Exclude all combinations that are repeating python/postgres versions
{"python-version": python_version, "backend-version": postgres_version}
for python_version, postgres_version in excluded_combos(
CURRENT_PYTHON_MAJOR_MINOR_VERSIONS, CURRENT_POSTGRES_VERSIONS
)
]
@cached_property
def mysql_exclude(self) -> list[dict[str, str]]:
if not self.all_versions:
# Only basic combination so we do not need to exclude anything
return []
return [
# Exclude all combinations that are repeating python/mysql versions
{"python-version": python_version, "backend-version": mysql_version}
for python_version, mysql_version in excluded_combos(
CURRENT_PYTHON_MAJOR_MINOR_VERSIONS, CURRENT_MYSQL_VERSIONS
)
]
@cached_property
def sqlite_exclude(self) -> list[dict[str, str]]:
return []
@cached_property
def kubernetes_versions(self) -> list[str]:
if self.all_versions:
return CURRENT_KUBERNETES_VERSIONS
if self.latest_versions_only:
return [CURRENT_KUBERNETES_VERSIONS[-1]]
return [DEFAULT_KUBERNETES_VERSION]
@cached_property
def kubernetes_versions_list_as_string(self) -> str:
return " ".join(self.kubernetes_versions)
@cached_property
def kubernetes_combos(self) -> list[str]:
python_version_array: list[str] = self.python_versions_list_as_string.split(" ")
kubernetes_version_array: list[str] = self.kubernetes_versions_list_as_string.split(" ")
combo_titles, short_combo_titles, combos = get_kubernetes_python_combos(
kubernetes_version_array, python_version_array
)
return short_combo_titles
@cached_property
def kubernetes_combos_list_as_string(self) -> str:
return " ".join(self.kubernetes_combos)
def _matching_files(self, match_group: FileGroupForCi, match_dict: HashableDict) -> list[str]:
return _matching_files(self._files, match_group, match_dict)
def _should_be_run(self, source_area: FileGroupForCi) -> bool:
if self.full_tests_needed:
get_console().print(f"[warning]{source_area} enabled because we are running everything[/]")
return True
matched_files = self._matching_files(source_area, CI_FILE_GROUP_MATCHES)
if matched_files:
get_console().print(
f"[warning]{source_area} enabled because it matched {len(matched_files)} changed files[/]"
)
return True
get_console().print(f"[warning]{source_area} disabled because it did not match any changed files[/]")
return False
@cached_property
def mypy_checks(self) -> list[str]:
checks_to_run: list[str] = []
if (
self._matching_files(FileGroupForCi.DEVEL_TOML_FILES, CI_FILE_GROUP_MATCHES)
and self._default_branch == "main"
):
return [
"mypy-airflow-core",
"mypy-providers",
"mypy-dev",
"mypy-task-sdk",
"mypy-devel-common",
"mypy-airflow-ctl",
]
if (
self._matching_files(FileGroupForCi.ALL_AIRFLOW_PYTHON_FILES, CI_FILE_GROUP_MATCHES)
or self.full_tests_needed
):
checks_to_run.append("mypy-airflow-core")
if (
self._matching_files(FileGroupForCi.ALL_PROVIDERS_PYTHON_FILES, CI_FILE_GROUP_MATCHES)
or self._matching_files(
FileGroupForCi.ALL_PROVIDERS_DISTRIBUTION_CONFIG_FILES, CI_FILE_GROUP_MATCHES
)
or self._are_all_providers_affected()
) and self._default_branch == "main":
checks_to_run.append("mypy-providers")
if (
self._matching_files(FileGroupForCi.ALL_DEV_PYTHON_FILES, CI_FILE_GROUP_MATCHES)
or self.full_tests_needed
):
checks_to_run.append("mypy-dev")
if (
self._matching_files(FileGroupForCi.TASK_SDK_FILES, CI_FILE_GROUP_MATCHES)
or self.full_tests_needed
):
checks_to_run.append("mypy-task-sdk")
if (
self._matching_files(FileGroupForCi.ALL_DEVEL_COMMON_PYTHON_FILES, CI_FILE_GROUP_MATCHES)
or self.full_tests_needed
):
checks_to_run.append("mypy-devel-common")
if (
self._matching_files(FileGroupForCi.ALL_AIRFLOW_CTL_PYTHON_FILES, CI_FILE_GROUP_MATCHES)
or self.full_tests_needed
):
checks_to_run.append("mypy-airflow-ctl")
return checks_to_run
@cached_property
def run_mypy(self) -> bool:
return self.mypy_checks != []
@cached_property
def run_python_scans(self) -> bool:
return self._should_be_run(FileGroupForCi.PYTHON_PRODUCTION_FILES)
@cached_property
def run_javascript_scans(self) -> bool:
return self._should_be_run(FileGroupForCi.JAVASCRIPT_PRODUCTION_FILES)
@cached_property
def run_api_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.API_FILES)
@cached_property
def run_ol_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.ASSET_FILES)
@cached_property
def run_api_codegen(self) -> bool:
return self._should_be_run(FileGroupForCi.API_CODEGEN_FILES)
@cached_property
def run_ui_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.UI_FILES)
@cached_property
def run_amazon_tests(self) -> bool:
if self.providers_test_types_list_as_strings_in_json == "[]":
return False
return (
"amazon" in self.providers_test_types_list_as_strings_in_json
or "Providers" in self.providers_test_types_list_as_strings_in_json.split(" ")
)
@cached_property
def run_task_sdk_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.TASK_SDK_FILES)
@cached_property
def run_task_sdk_integration_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.TASK_SDK_FILES) or self._should_be_run(
FileGroupForCi.TASK_SDK_INTEGRATION_TEST_FILES
)
@cached_property
def run_go_sdk_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.GO_SDK_FILES)
@cached_property
def run_airflow_ctl_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.AIRFLOW_CTL_FILES)
@cached_property
def run_kubernetes_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.KUBERNETES_FILES)
@cached_property
def docs_build(self) -> bool:
return self._should_be_run(FileGroupForCi.DOC_FILES)
@cached_property
def run_helm_tests(self) -> bool:
return self._should_be_run(FileGroupForCi.HELM_FILES) and self._default_branch == "main"
@cached_property
def run_unit_tests(self) -> bool:
def _only_new_ui_files() -> bool:
all_source_files = set(
self._matching_files(FileGroupForCi.ALL_SOURCE_FILES, CI_FILE_GROUP_MATCHES)
)
new_ui_source_files = set(self._matching_files(FileGroupForCi.UI_FILES, CI_FILE_GROUP_MATCHES))
remaining_files = all_source_files - new_ui_source_files
if all_source_files and new_ui_source_files and not remaining_files:
return True
return False
if self.full_tests_needed:
return True
if self._is_canary_run():
return True
if _only_new_ui_files():
return False
# we should run all test
return self._should_be_run(FileGroupForCi.ALL_SOURCE_FILES)
@cached_property
def run_system_tests(self) -> bool:
return self.run_unit_tests
@cached_property
def only_pyproject_toml_files_changed(self) -> bool:
return all(Path(file).name == "pyproject.toml" for file in self._files)
@cached_property
def ci_image_build(self) -> bool:
# in case pyproject.toml changed, CI image should be built - even if no build dependencies
# changes because some of our tests - those that need CI image might need to be run depending on
# changed rules for static checks that are part of the pyproject.toml file
return (
self.run_unit_tests
or self.docs_build
or self.run_kubernetes_tests
or self.run_task_sdk_integration_tests
or self.run_helm_tests
or self.run_ui_tests
or self.pyproject_toml_changed
or self.any_provider_yaml_or_pyproject_toml_changed
)
@cached_property
def prod_image_build(self) -> bool:
return self.run_kubernetes_tests or self.run_helm_tests or self.run_task_sdk_integration_tests
def _select_test_type_if_matching(
self, test_types: set[str], test_type: SelectiveCoreTestType
) -> list[str]:
matched_files = self._matching_files(test_type, TEST_TYPE_MATCHES)
count = len(matched_files)
if count > 0:
test_types.add(test_type.value)
get_console().print(f"[warning]{test_type} added because it matched {count} files[/]")
return matched_files
def _are_all_providers_affected(self) -> bool:
# if "Providers" test is present in the list of tests, it means that we should run all providers tests
# prepare all providers packages and build all providers documentation
return "Providers" in self._get_providers_test_types_to_run()
def _fail_if_suspended_providers_affected(self) -> bool:
return "allow suspended provider changes" not in self._pr_labels
def _get_core_test_types_to_run(self) -> list[str]:
if self.full_tests_needed:
return list(all_selective_core_test_types())
candidate_test_types: set[str] = {"Always"}
matched_files: set[str] = set()
for test_type in SelectiveCoreTestType:
if test_type not in [
SelectiveCoreTestType.ALWAYS,
SelectiveCoreTestType.CORE,
SelectiveCoreTestType.OTHER,
]:
matched_files.update(self._select_test_type_if_matching(candidate_test_types, test_type))
kubernetes_files = self._matching_files(FileGroupForCi.KUBERNETES_FILES, CI_FILE_GROUP_MATCHES)
system_test_files = self._matching_files(FileGroupForCi.SYSTEM_TEST_FILES, CI_FILE_GROUP_MATCHES)
all_source_files = self._matching_files(FileGroupForCi.ALL_SOURCE_FILES, CI_FILE_GROUP_MATCHES)
all_providers_source_files = self._matching_files(
FileGroupForCi.ALL_PROVIDERS_PYTHON_FILES, CI_FILE_GROUP_MATCHES
)
all_providers_distribution_config_files = self._matching_files(
FileGroupForCi.ALL_PROVIDERS_DISTRIBUTION_CONFIG_FILES, CI_FILE_GROUP_MATCHES
)
test_always_files = self._matching_files(FileGroupForCi.ALWAYS_TESTS_FILES, CI_FILE_GROUP_MATCHES)
test_ui_files = self._matching_files(FileGroupForCi.UI_FILES, CI_FILE_GROUP_MATCHES)
remaining_files = (
set(all_source_files)
- set(all_providers_source_files)
- set(all_providers_distribution_config_files)
- set(matched_files)
- set(kubernetes_files)
- set(system_test_files)
- set(test_always_files)
- set(test_ui_files)
)
get_console().print(f"[warning]Remaining non test/always files: {len(remaining_files)}[/]")
count_remaining_files = len(remaining_files)
for file in self._files:
if file.endswith("bash.py") and Path(file).parent.name == "operators":
candidate_test_types.add("Serialization")
candidate_test_types.add("Core")
break
if count_remaining_files > 0:
get_console().print(
f"[warning]We should run all core tests except providers."
f"There are {count_remaining_files} changed files that seems to fall "
f"into Core/Other category[/]"
)
get_console().print(remaining_files)
candidate_test_types.update(all_selective_core_test_types())
else:
get_console().print(
"[warning]There are no core/other files. Only tests relevant to the changed files are run.[/]"
)
# run core tests if any changes in serialization files
if SelectiveCoreTestType.SERIALIZATION.value in candidate_test_types:
candidate_test_types.add(SelectiveCoreTestType.CORE.value)
# sort according to predefined order
sorted_candidate_test_types = sorted(candidate_test_types)
get_console().print("[warning]Selected core test type candidates to run:[/]")
get_console().print(sorted_candidate_test_types)
return sorted_candidate_test_types
def _get_providers_test_types_to_run(self, split_to_individual_providers: bool = False) -> list[str]:
if self._default_branch != "main":
return []
if self.upgrade_to_newer_dependencies:
return ["Providers"]
if self.full_tests_needed or self.run_task_sdk_tests:
if split_to_individual_providers:
return list(providers_test_type())
return ["Providers"]
all_providers_source_files = self._matching_files(
FileGroupForCi.ALL_PROVIDERS_PYTHON_FILES, CI_FILE_GROUP_MATCHES
)
all_providers_distribution_config_files = self._matching_files(
FileGroupForCi.ALL_PROVIDERS_DISTRIBUTION_CONFIG_FILES, CI_FILE_GROUP_MATCHES
)
assets_source_files = self._matching_files(FileGroupForCi.ASSET_FILES, CI_FILE_GROUP_MATCHES)
if (
len(all_providers_source_files) == 0
and len(all_providers_distribution_config_files) == 0
and len(assets_source_files) == 0
and not self.run_api_tests
):
# IF API tests are needed, that will trigger extra provider checks
return []
affected_providers = self._find_all_providers_affected(
include_docs=False,
)
candidate_test_types: set[str] = set()
if isinstance(affected_providers, AllProvidersSentinel):
if split_to_individual_providers:
for provider in get_available_distributions():
candidate_test_types.add(f"Providers[{provider}]")
else:
candidate_test_types.add("Providers")
elif affected_providers:
if split_to_individual_providers:
for provider in affected_providers:
candidate_test_types.add(f"Providers[{provider}]")
else:
candidate_test_types.add(f"Providers[{','.join(sorted(affected_providers))}]")
sorted_candidate_test_types = sorted(candidate_test_types)
get_console().print("[warning]Selected providers test type candidates to run:[/]")
get_console().print(sorted_candidate_test_types)
return sorted_candidate_test_types
@staticmethod
def _extract_long_provider_tests(current_test_types: set[str]):
"""
In case there are Provider tests in the list of test to run - either in the form of
Providers or Providers[...] we subtract them from the test type,
and add them to the list of tests to run individually.
In case of Providers, we need to replace it with Providers[-<list_of_long_tests>], but
in case of Providers[list_of_tests] we need to remove the long tests from the list.
In case of celery tests we want to isolate them from the rest, because they seem to be hanging
infrequently when running together with other tests
:param current_test_types: The set of test types to run
"""
long_tests = ["amazon", "celery", "google", "standard"]
for original_test_type in tuple(current_test_types):
if original_test_type == "Providers":
current_test_types.remove(original_test_type)
for long_test in long_tests:
current_test_types.add(f"Providers[{long_test}]")
current_test_types.add(f"Providers[-{','.join(long_tests)}]")
elif original_test_type.startswith("Providers["):
provider_tests_to_run = (
original_test_type.replace("Providers[", "").replace("]", "").split(",")
)
if any(long_test in provider_tests_to_run for long_test in long_tests):
current_test_types.remove(original_test_type)
for long_test in long_tests:
if long_test in provider_tests_to_run:
current_test_types.add(f"Providers[{long_test}]")
provider_tests_to_run.remove(long_test)
current_test_types.add(f"Providers[{','.join(provider_tests_to_run)}]")
@cached_property
def core_test_types_list_as_strings_in_json(self) -> str | None:
if not self.run_unit_tests:
return None
current_test_types = sorted(set(self._get_core_test_types_to_run()))
return json.dumps(_get_test_list_as_json([current_test_types]))
@cached_property
def providers_test_types_list_as_strings_in_json(self) -> str:
if not self.run_unit_tests:
return "[]"
current_test_types = set(self._get_providers_test_types_to_run())
if self._default_branch != "main":
test_types_to_remove: set[str] = set()
for test_type in current_test_types:
if test_type.startswith("Providers"):
get_console().print(
f"[warning]Removing {test_type} because the target branch "
f"is {self._default_branch} and not main[/]"
)
test_types_to_remove.add(test_type)
current_test_types = current_test_types - test_types_to_remove
self._extract_long_provider_tests(current_test_types)
return json.dumps(_get_test_list_as_json([sorted(current_test_types)]))
def _get_individual_providers_list(self):
current_test_types = set(self._get_providers_test_types_to_run(split_to_individual_providers=True))
if "Providers" in current_test_types:
current_test_types.remove("Providers")
current_test_types.update(
{f"Providers[{provider}]" for provider in get_available_distributions(include_not_ready=True)}
)
return current_test_types
@cached_property
def individual_providers_test_types_list_as_strings_in_json(self) -> str | None:
"""Splits the list of test types into several lists of strings (to run them in parallel)."""
if not self.run_unit_tests:
return None
current_test_types = sorted(self._get_individual_providers_list())
if not current_test_types:
return None
# We are hard-coding the number of lists as reasonable starting point to split the
# list of test types - and we can modify it in the future
# TODO: In Python 3.12 we will be able to use itertools.batched
if len(current_test_types) < NUMBER_OF_LOW_DEP_SLICES:
return json.dumps(_get_test_list_as_json([current_test_types]))
list_of_list_of_types = _split_list(current_test_types, NUMBER_OF_LOW_DEP_SLICES)
return json.dumps(_get_test_list_as_json(list_of_list_of_types))
@cached_property
def include_success_outputs(
self,
) -> bool:
return INCLUDE_SUCCESS_OUTPUTS_LABEL in self._pr_labels
@cached_property
def basic_checks_only(self) -> bool:
return not self.ci_image_build
@staticmethod
def _print_diff(old_lines: list[str], new_lines: list[str]):
diff = "\n".join(line for line in difflib.ndiff(old_lines, new_lines) if line and line[0] in "+-?")
get_console().print(diff)
@cached_property
def generated_dependencies_changed(self) -> bool:
return "generated/provider_dependencies.json" in self._files
@cached_property
def any_provider_yaml_or_pyproject_toml_changed(self) -> bool:
if not self._commit_ref:
get_console().print("[warning]Cannot determine changes as commit is missing[/]")
return False
for file in self._files:
path_file = Path(file)
if path_file.name == "provider.yaml" or path_file.name == "pyproject.toml":
return True
return False
@cached_property
def pyproject_toml_changed(self) -> bool:
if not self._commit_ref:
get_console().print("[warning]Cannot determine pyproject.toml changes as commit is missing[/]")
return False
if "pyproject.toml" not in self._files:
return False
new_result = run_command(
["git", "show", f"{self._commit_ref}:pyproject.toml"],
capture_output=True,
text=True,
cwd=AIRFLOW_ROOT_PATH,
check=False,
)
if new_result.returncode != 0:
get_console().print(
f"[warning]Cannot determine pyproject.toml changes. "
f"Could not get pyproject.toml from {self._commit_ref}[/]"
)
return False
old_result = run_command(
["git", "show", f"{self._commit_ref}^:pyproject.toml"],
capture_output=True,
text=True,
cwd=AIRFLOW_ROOT_PATH,
check=False,
)
if old_result.returncode != 0:
get_console().print(
f"[warning]Cannot determine pyproject.toml changes. "
f"Could not get pyproject.toml from {self._commit_ref}^[/]"
)
return False
try:
import tomllib
except ImportError:
import tomli as tomllib
self._new_toml = tomllib.loads(new_result.stdout)
self._old_toml = tomllib.loads(old_result.stdout)
return True
@cached_property
def upgrade_to_newer_dependencies(self) -> bool:
if len(self._matching_files(FileGroupForCi.ALL_PYPROJECT_TOML_FILES, CI_FILE_GROUP_MATCHES)) > 0:
get_console().print("[warning]Upgrade to newer dependencies: Dependency files changed[/]")
return True
if self._github_event in [GithubEvents.PUSH, GithubEvents.SCHEDULE]:
get_console().print("[warning]Upgrade to newer dependencies: Push or Schedule event[/]")
return True
if UPGRADE_TO_NEWER_DEPENDENCIES_LABEL in self._pr_labels:
get_console().print(
f"[warning]Upgrade to newer dependencies: Label '{UPGRADE_TO_NEWER_DEPENDENCIES_LABEL}' "
f"in {self._pr_labels}[/]"
)
return True
return False
@cached_property
def docs_list_as_string(self) -> str | None:
_ALL_DOCS_LIST = ""
if not self.docs_build:
return None
if self._default_branch != "main":
return "apache-airflow docker-stack"
if self.full_tests_needed:
return _ALL_DOCS_LIST
providers_affected = self._find_all_providers_affected(
include_docs=True,
)
if (
isinstance(providers_affected, AllProvidersSentinel)
or "docs/conf.py" in self._files
or "docs/build_docs.py" in self._files
or self._are_all_providers_affected()
):
return _ALL_DOCS_LIST
packages = []
if any(file.startswith(("airflow-core/src/airflow/", "airflow-core/docs/")) for file in self._files):
packages.append("apache-airflow")
if any(file.startswith("providers-summary-docs/") for file in self._files):
packages.append("apache-airflow-providers")
if any(file.startswith("chart/") for file in self._files):
packages.append("helm-chart")
if any(file.startswith("docker-stack-docs") for file in self._files):
packages.append("docker-stack")
if any(file.startswith("task-sdk/src/") for file in self._files):
packages.append("task-sdk")
if any(file.startswith("airflow-ctl/") for file in self._files):
packages.append("apache-airflow-ctl")
if providers_affected:
for provider in providers_affected:
packages.append(provider.replace("-", "."))
return " ".join(packages)
@cached_property
def skip_prek_hooks(self) -> str:
prek_hooks_to_skip = set()
prek_hooks_to_skip.add("identity")
if self._default_branch != "main":
# Skip those tests on all "release" branches
prek_hooks_to_skip.update(
(
"compile-fab-assets",
"generate-openapi-spec-fab",
"check-airflow-providers-bug-report-template",
"check-airflow-provider-compatibility",
"check-extra-packages-references",
"check-provider-yaml-valid",
"lint-helm-chart",
"validate-operators-init",
)
)
if self.full_tests_needed:
# when full tests are needed, we do not want to skip any checks and we should
# run all the prek hooks just to be sure everything is ok when some structural changes occurred
return ",".join(sorted(prek_hooks_to_skip))
if not (
self._matching_files(FileGroupForCi.UI_FILES, CI_FILE_GROUP_MATCHES)
or self._matching_files(FileGroupForCi.API_CODEGEN_FILES, CI_FILE_GROUP_MATCHES)
):
prek_hooks_to_skip.add("ts-compile-lint-ui")
prek_hooks_to_skip.add("ts-compile-lint-simple-auth-manager-ui")
if not self._matching_files(FileGroupForCi.ALL_PYTHON_FILES, CI_FILE_GROUP_MATCHES):
prek_hooks_to_skip.add("flynt")
if not self._matching_files(
FileGroupForCi.HELM_FILES,
CI_FILE_GROUP_MATCHES,
):
prek_hooks_to_skip.add("lint-helm-chart")
if not (
self._matching_files(
FileGroupForCi.ALL_PROVIDERS_DISTRIBUTION_CONFIG_FILES, CI_FILE_GROUP_MATCHES
)
or self._matching_files(FileGroupForCi.ALL_PROVIDERS_PYTHON_FILES, CI_FILE_GROUP_MATCHES)
):
# only skip provider validation if none of the provider.yaml and provider
# python files changed because validation also walks through all the provider python files
prek_hooks_to_skip.add("check-provider-yaml-valid")
return ",".join(sorted(prek_hooks_to_skip))
@cached_property
def skip_providers_tests(self) -> bool:
if self._default_branch != "main":
return True
if self.full_tests_needed:
return False
if self._get_providers_test_types_to_run():
return False
if not self.run_unit_tests:
return True
return True
@cached_property
def docker_cache(self) -> str:
return "disabled" if DISABLE_IMAGE_CACHE_LABEL in self._pr_labels else "registry"
@cached_property
def debug_resources(self) -> bool:
return DEBUG_CI_RESOURCES_LABEL in self._pr_labels
@cached_property
def disable_airflow_repo_cache(self) -> bool:
return self.docker_cache == "disabled"
@cached_property
def helm_test_packages(self) -> str:
return json.dumps(all_helm_test_packages())
@cached_property
def selected_providers_list_as_string(self) -> str | None:
if self._default_branch != "main":
return None
if self.full_tests_needed:
return ""
if self._are_all_providers_affected():
return ""
affected_providers = self._find_all_providers_affected(include_docs=True)
if not affected_providers:
return None
if isinstance(affected_providers, AllProvidersSentinel):
return ""
return " ".join(sorted(affected_providers))
def get_job_label(self, event_type: str, branch: str):
import requests
job_name = "Basic tests"
workflow_name = "ci-amd-arm.yml"
headers = {"Accept": "application/vnd.github.v3+json"}
if os.environ.get("GITHUB_TOKEN"):
headers["Authorization"] = f"token {os.environ.get('GITHUB_TOKEN')}"
url = f"https://api.github.com/repos/{self._github_repository}/actions/workflows/{workflow_name}/runs"
payload = {"event": event_type, "status": "completed", "branch": branch}
response = requests.get(url, headers=headers, params=payload)
if response.status_code != 200:
get_console().print(f"[red]Error while listing workflow runs error: {response.json()}.\n")
return None
runs = response.json().get("workflow_runs", [])
if not runs:
get_console().print(
f"[yellow]No runs information found for workflow {workflow_name}, params: {payload}.\n"
)
return None
jobs_url = runs[0].get("jobs_url")
jobs_response = requests.get(jobs_url, headers=headers)
jobs = jobs_response.json().get("jobs", [])
if not jobs:
get_console().print("[yellow]No jobs information found for jobs %s.\n", jobs_url)
return None
for job in jobs:
if job_name in job.get("name", ""):
runner_labels = job.get("labels", [])
if "windows-2025" in runner_labels:
continue
if not runner_labels:
get_console().print("[yellow]No labels found for job {job_name}.\n", jobs_url)
return None
return runner_labels[0]
return None
@cached_property
def runner_type(self):
if self._github_event in [GithubEvents.SCHEDULE, GithubEvents.PUSH]:
branch = self._github_context_dict.get("ref_name", "main")
label = self.get_job_label(event_type=str(self._github_event.value), branch=branch)
return RUNNERS_TYPE_CROSS_MAPPING.get(label, PUBLIC_AMD_RUNNERS) if label else PUBLIC_AMD_RUNNERS
return PUBLIC_AMD_RUNNERS
@cached_property
def platform(self):
if "arm" in self.runner_type:
return "linux/arm64"
return "linux/amd64"
@cached_property
def amd_runners(self) -> str:
return PUBLIC_AMD_RUNNERS
@cached_property
def arm_runners(self) -> str:
return PUBLIC_ARM_RUNNERS
@cached_property
def has_migrations(self) -> bool:
return any([file.startswith("airflow-core/src/airflow/migrations/") for file in self._files])
@cached_property
def providers_compatibility_tests_matrix(self) -> str:
"""Provider compatibility input matrix for the current run. Filter out python versions not built"""
return json.dumps(
[
check
for check in PROVIDERS_COMPATIBILITY_TESTS_MATRIX
if check["python-version"] in self.python_versions
]
)
@cached_property
def excluded_providers_as_string(self) -> str:
providers_to_exclude = defaultdict(list)
for provider, provider_info in DEPENDENCIES.items():
if "excluded-python-versions" in provider_info:
for python_version in provider_info["excluded-python-versions"]:
providers_to_exclude[python_version].append(provider)
sorted_providers_to_exclude = dict(
sorted(providers_to_exclude.items(), key=lambda item: int(item[0].split(".")[1]))
) # ^ sort by Python minor version
return json.dumps(sorted_providers_to_exclude)
def _is_disabled_integration(self, integration: str) -> bool:
return (
integration in DISABLE_TESTABLE_INTEGRATIONS_FROM_CI
or integration in DISABLE_TESTABLE_INTEGRATIONS_FROM_ARM
and self.runner_type in PUBLIC_ARM_RUNNERS
)
@cached_property
def testable_core_integrations(self) -> list[str]:
if not self.run_unit_tests:
return []
return [
integration
for integration in TESTABLE_CORE_INTEGRATIONS
if not self._is_disabled_integration(integration)
]
@cached_property
def testable_providers_integrations(self) -> list[str]:
if not self.run_unit_tests:
return []
return [
integration
for integration in TESTABLE_PROVIDERS_INTEGRATIONS
if not self._is_disabled_integration(integration)
]
@cached_property
def is_committer_build(self):
if NON_COMMITTER_BUILD_LABEL in self._pr_labels:
return False
return self._github_actor in COMMITTERS
def _find_all_providers_affected(self, include_docs: bool) -> list[str] | AllProvidersSentinel | None:
affected_providers: set[str] = set()
all_providers_affected = False
suspended_providers: set[str] = set()
for changed_file in self._files:
provider = find_provider_affected(changed_file, include_docs=include_docs)
if provider == "Providers":
all_providers_affected = True
elif provider is not None:
if provider not in DEPENDENCIES:
suspended_providers.add(provider)
else:
affected_providers.add(provider)
if self.run_api_tests:
affected_providers.add("fab")
if self.run_ol_tests:
affected_providers.add("openlineage")
if all_providers_affected:
return ALL_PROVIDERS_SENTINEL
if suspended_providers:
# We check for suspended providers only after we have checked if all providers are affected.
# No matter if we found that we are modifying a suspended provider individually,
# if all providers are
# affected, then it means that we are ok to proceed because likely we are running some kind of
# global refactoring that affects multiple providers including the suspended one. This is a
# potential escape hatch if someone would like to modify suspended provider,
# but it can be found at the review time and is anyway harmless as the provider will not be
# released nor tested nor used in CI anyway.
get_console().print("[yellow]You are modifying suspended providers.\n")
get_console().print(
"[info]Some providers modified by this change have been suspended, "
"and before attempting such changes you should fix the reason for suspension."
)
get_console().print(
"[info]When fixing it, you should set suspended = false in provider.yaml "
"to make changes to the provider."
)
get_console().print(f"Suspended providers: {suspended_providers}")
if self._fail_if_suspended_providers_affected():
get_console().print(
"[error]This PR did not have `allow suspended provider changes`"
" label set so it will fail."
)
sys.exit(1)
else:
get_console().print(
"[info]This PR had `allow suspended provider changes` label set so it will continue"
)
if not affected_providers:
return None
for provider in list(affected_providers):
affected_providers.update(
get_related_providers(provider, upstream_dependencies=True, downstream_dependencies=True)
)
return sorted(affected_providers)
def _is_canary_run(self):
return (
self._github_event in [GithubEvents.SCHEDULE, GithubEvents.PUSH, GithubEvents.WORKFLOW_DISPATCH]
and self._github_repository == APACHE_AIRFLOW_GITHUB_REPOSITORY
) or CANARY_LABEL in self._pr_labels
@cached_property
def force_pip(self):
return FORCE_PIP_LABEL in self._pr_labels
@cached_property
def shared_distributions_as_json(self):
return json.dumps([file.name for file in (AIRFLOW_ROOT_PATH / "shared").iterdir() if file.is_dir()])
@cached_property
def ui_english_translation_changed(self) -> bool:
_translation_changed = bool(
self._matching_files(
FileGroupForCi.UI_ENGLISH_TRANSLATION_FILES,
CI_FILE_GROUP_MATCHES,
)
)
if FAIL_WHEN_ENGLISH_TRANSLATION_CHANGED and _translation_changed and not self._is_canary_run():
if ALLOW_TRANSACTION_CHANGE_LABEL in self._pr_labels:
get_console().print(
"[warning]The 'allow translation change' label is set and English "
"translation files changed. Bypassing the freeze period."
)
return True
get_console().print(
"[error]English translation changed but we are in a period of translation"
"freeze and label to allow it ('allow translation change') is not set"
)
get_console().print()
get_console().print(
"[warning]To allow translation change, please set the label "
"'allow translation change' on the PR, but this has to be communicated "
"and agreed to at the #i18n channel in slack"
)
sys.exit(1)
return _translation_changed
@cached_property
def provider_dependency_bump(self) -> bool:
"""Check for apache-airflow-providers dependency bumps in pyproject.toml files."""
pyproject_files = self._matching_files(
FileGroupForCi.ALL_PYPROJECT_TOML_FILES,
CI_FILE_GROUP_MATCHES,
)
if not pyproject_files or not self._github_event == GithubEvents.PULL_REQUEST:
return False
try:
import tomllib
except ImportError:
import tomli as tomllib
violations = []
for pyproject_file in pyproject_files:
# Get the new version of the file
new_result = run_command(
["git", "show", f"{self._commit_ref}:{pyproject_file}"],
capture_output=True,
text=True,
cwd=AIRFLOW_ROOT_PATH,
check=False,
)
if new_result.returncode != 0:
continue
# Get the old version of the file
old_result = run_command(
["git", "show", f"{self._commit_ref}^:{pyproject_file}"],
capture_output=True,
text=True,
cwd=AIRFLOW_ROOT_PATH,
check=False,
)
if old_result.returncode != 0:
continue
try:
new_toml = tomllib.loads(new_result.stdout)
old_toml = tomllib.loads(old_result.stdout)
except Exception:
continue
# Check dependencies and optional-dependencies sections
for section in ["dependencies", "optional-dependencies"]:
if section not in new_toml.get("project", {}):
continue
new_deps = new_toml["project"][section]
old_deps = old_toml.get("project", {}).get(section, {})
if isinstance(new_deps, dict):
# Handle optional-dependencies which is a dict
for group_name, deps_list in new_deps.items():
old_deps_list = old_deps.get(group_name, []) if isinstance(old_deps, dict) else []
violations.extend(
SelectiveChecks._check_provider_deps_in_list(
deps_list, old_deps_list, pyproject_file, f"{section}.{group_name}"
)
)
elif isinstance(new_deps, list):
# Handle dependencies which is a list
old_deps_list = old_deps if isinstance(old_deps, list) else []
violations.extend(
SelectiveChecks._check_provider_deps_in_list(
new_deps, old_deps_list, pyproject_file, section
)
)
if violations:
if ALLOW_PROVIDER_DEPENDENCY_BUMP_LABEL in self._pr_labels:
get_console().print(
"[warning]The 'allow provider dependency bump' label is set. "
"Bypassing provider dependency check."
)
return True
get_console().print(
"[error]Provider dependency version bumps detected that should only be "
"performed by Release Managers![/]"
)
get_console().print()
for violation in violations:
get_console().print(f"[error] - {violation}[/]")
get_console().print()
get_console().print(
"[warning]Only Release Managers should change >= conditions for apache-airflow-providers "
"dependencies.[/]\n\nIf you want to refer to a future version of the dependency, please add a "
"comment [info]'# use next version'[/info] in the line of the dependency instead.\n"
)
get_console().print()
get_console().print(
f"[warning]If this change is intentional and approved, please set the label on the PR:[/]\n\n"
f"'[info]{ALLOW_PROVIDER_DEPENDENCY_BUMP_LABEL}[/]\n"
)
get_console().print()
get_console().print(
"See https://github.com/apache/airflow/blob/main/contributing-docs/"
"13_airflow_dependencies_and_extras.rst for more comprehensive documentation "
"about airflow dependency management."
)
get_console().print()
sys.exit(1)
return False
@staticmethod
def _check_provider_deps_in_list(
new_deps: list, old_deps: list, file_path: str, section: str
) -> list[str]:
"""Check a list of dependencies for apache-airflow-providers version changes."""
violations = []
# Parse dependencies into a dict for easier comparison
def parse_dep(dep_str: str) -> tuple[str, str | None]:
"""Parse a dependency string and return (package_name, version_constraint)."""
if not isinstance(dep_str, str):
return "", None
# Remove inline comments
dep_str = dep_str.split("#")[0].strip()
# Match patterns like: apache-airflow-providers-xxx>=1.0.0 or apache-airflow-providers-xxx>=1.0.0,<2.0
match = re.match(r"^(apache-airflow-providers-[a-z0-9-]+)\s*(.*)", dep_str, re.IGNORECASE)
if match:
return match.group(1).lower(), match.group(2).strip()
return "", None
old_deps_dict = {}
for dep in old_deps:
pkg_name, version = parse_dep(dep)
if pkg_name:
old_deps_dict[pkg_name] = (dep, version)
for new_dep in new_deps:
pkg_name, new_version = parse_dep(new_dep)
if not pkg_name:
continue
# Check if this dependency existed before
if pkg_name in old_deps_dict:
old_dep_str, old_version = old_deps_dict[pkg_name]
# Check if the >= condition changed
if new_version and old_version and new_version != old_version:
# Check if >= version number changed
new_ge_match = re.search(r">=\s*([0-9.]+)", new_version)
old_ge_match = re.search(r">=\s*([0-9.]+)", old_version)
if new_ge_match and old_ge_match:
new_ge_version = new_ge_match.group(1)
old_ge_version = old_ge_match.group(1)
if new_ge_version != old_ge_version:
violations.append(
f"{file_path} [{section}]: {pkg_name} >= version changed from "
f"{old_ge_version} to {new_ge_version}"
)
return violations
def _has_common_compat_changed(self) -> bool:
"""Check if any common.compat provider file was changed."""
return any(f.startswith("providers/common/compat/") for f in self._files)
def _get_changed_providers_excluding_common_compat(self) -> set[str]:
"""Get set of changed providers excluding common.compat itself."""
changed_providers: set[str] = set()
for changed_file in self._files:
provider = find_provider_affected(changed_file, include_docs=False)
if provider and provider not in ["common.compat", "Providers"]:
changed_providers.add(provider)
return changed_providers
def _uses_next_version_comment(self, provider: str) -> bool:
"""Check if provider's pyproject.toml has '# use next version' for common-compat dependency."""
pyproject_file = f"providers/{provider.replace('.', '/')}/pyproject.toml"
result = run_command(
["git", "show", f"{self._commit_ref}:{pyproject_file}"],
capture_output=True,
text=True,
cwd=AIRFLOW_ROOT_PATH,
check=False,
)
if result.returncode != 0:
return True # If file doesn't exist, don't flag as violation
# Check if dependency line contains both the package and the comment
for line in result.stdout.splitlines():
if "apache-airflow-providers-common-compat" in line.lower():
return "# use next version" in line.lower()
return True # If dependency not found, don't flag as violation
def _print_violations_and_exit_or_bypass(self, violations: list[str]) -> bool:
"""Print violations and either exit with error or bypass with warning."""
console = get_console()
if SKIP_COMMON_COMPAT_CHECK_LABEL in self._pr_labels:
console.print("[warning]The 'skip common compat check' label is set. Bypassing check for:[/]")
for provider in violations:
console.print(
f"[warning] - {provider} (providers/{provider.replace('.', '/')}/pyproject.toml)[/]"
)
console.print()
return True
console.print(
"[error]common.compat provider changed but the following providers don't have "
"'# use next version' comment for their common-compat dependency![/]"
)
console.print()
for provider in violations:
console.print(f"[error] - {provider} (providers/{provider.replace('.', '/')}/pyproject.toml)[/]")
console.print()
console.print(
"[warning]When common.compat changes with other providers in the same PR, "
"add '# use next version' comment where they depend on common-compat.[/]\n"
"[warning]Example:[/] "
'[info]"apache-airflow-providers-common-compat>=1.8.0", # use next version[/]\n'
)
console.print(
f"[warning]To bypass this check, add the label: '[info]{SKIP_COMMON_COMPAT_CHECK_LABEL}[/]'\n"
)
sys.exit(1)
@cached_property
def common_compat_changed_without_next_version(self) -> bool:
"""
Check if common.compat provider changed and other providers changed don't have '# use next version'
comment for their common-compat dependency.
"""
if self._github_event != GithubEvents.PULL_REQUEST:
return False
if not self._has_common_compat_changed():
return False
changed_providers = self._get_changed_providers_excluding_common_compat()
if not changed_providers:
return False # Only common.compat changed
get_console().print(f"[warning]common.compat changed with providers: {sorted(changed_providers)}[/]")
# Find providers missing '# use next version' comment
violations = [p for p in sorted(changed_providers) if not self._uses_next_version_comment(p)]
if violations:
return self._print_violations_and_exit_or_bypass(violations)
return False
| SelectiveChecks |
python | celery__celery | t/unit/worker/test_state.py | {
"start": 2332,
"end": 4346
} | class ____:
@pytest.fixture
def p(self):
return MyPersistent(state, filename='celery-state')
def test_close_twice(self, p):
p._is_open = False
p.close()
def test_constructor(self, p):
assert p.db == {}
assert p.db.filename == p.filename
def test_save(self, p):
p.db['foo'] = 'bar'
p.save()
assert p.db.in_sync
assert p.db.closed
def add_revoked(self, p, *ids):
for id in ids:
p.db.setdefault('revoked', LimitedSet()).add(id)
def test_merge(self, p, data=['foo', 'bar', 'baz']):
state.revoked.update(data)
p.merge()
for item in data:
assert item in state.revoked
def test_merge_dict(self, p):
p.clock = Mock()
p.clock.adjust.return_value = 626
d = {'revoked': {'abc': time()}, 'clock': 313}
p._merge_with(d)
p.clock.adjust.assert_called_with(313)
assert d['clock'] == 626
assert 'abc' in state.revoked
def test_sync_clock_and_purge(self, p):
passthrough = Mock()
passthrough.side_effect = lambda x: x
with patch('celery.worker.state.revoked') as revoked:
d = {'clock': 0}
p.clock = Mock()
p.clock.forward.return_value = 627
p._dumps = passthrough
p.compress = passthrough
p._sync_with(d)
revoked.purge.assert_called_with()
assert d['clock'] == 627
assert 'revoked' not in d
assert d['zrevoked'] is revoked
def test_sync(self, p,
data1=['foo', 'bar', 'baz'], data2=['baz', 'ini', 'koz']):
self.add_revoked(p, *data1)
for item in data2:
state.revoked.add(item)
p.sync()
assert p.db['zrevoked']
pickled = p.decompress(p.db['zrevoked'])
assert pickled
saved = pickle.loads(pickled)
for item in data2:
assert item in saved
| test_Persistent |
python | kamyu104__LeetCode-Solutions | Python/find-the-index-of-the-large-integer.py | {
"start": 32,
"end": 143
} | class ____(object):
def compareSub(self, l, r, x, y):
pass
def length(self):
pass
| ArrayReader |
python | huggingface__transformers | src/transformers/models/depth_pro/modeling_depth_pro.py | {
"start": 18877,
"end": 21082
} | class ____(nn.Module):
def __init__(self, config: DepthProConfig):
super().__init__()
self.config = config
self.n_scaled_images = len(self.config.scaled_images_ratios)
self.n_intermediate_hooks = len(self.config.intermediate_hook_ids)
# for image_features
self.image_block = DepthProFeatureUpsampleBlock(
config=config,
input_dims=config.image_model_config.hidden_size,
intermediate_dims=config.image_model_config.hidden_size,
output_dims=config.scaled_images_feature_dims[0],
n_upsample_layers=1,
use_proj=False,
bias=True,
)
# for scaled_images_features
self.scaled_images = nn.ModuleList()
for i, feature_dims in enumerate(config.scaled_images_feature_dims):
block = DepthProFeatureUpsampleBlock(
config=config,
input_dims=config.patch_model_config.hidden_size,
intermediate_dims=feature_dims,
output_dims=feature_dims,
n_upsample_layers=1,
)
self.scaled_images.append(block)
# for intermediate_features
self.intermediate = nn.ModuleList()
for i, feature_dims in enumerate(config.intermediate_feature_dims):
intermediate_dims = config.fusion_hidden_size if i == 0 else feature_dims
block = DepthProFeatureUpsampleBlock(
config=config,
input_dims=config.patch_model_config.hidden_size,
intermediate_dims=intermediate_dims,
output_dims=feature_dims,
n_upsample_layers=2 + i,
)
self.intermediate.append(block)
def forward(self, features: list[torch.Tensor]) -> list[torch.Tensor]:
features[0] = self.image_block(features[0])
for i in range(self.n_scaled_images):
features[i + 1] = self.scaled_images[i](features[i + 1])
for i in range(self.n_intermediate_hooks):
features[self.n_scaled_images + i + 1] = self.intermediate[i](features[self.n_scaled_images + i + 1])
return features
| DepthProFeatureUpsample |
python | django-guardian__django-guardian | benchmarks/models.py | {
"start": 319,
"end": 451
} | class ____(GroupObjectPermissionBase):
content_object = models.ForeignKey("TestDirectModel", on_delete=models.CASCADE)
| DirectGroup |
python | kamyu104__LeetCode-Solutions | Python/sum-of-special-evenly-spaced-elements-in-array.py | {
"start": 49,
"end": 940
} | class ____(object):
def solve(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
MOD = 10**9+7
prefix = {}
result = []
for x, y in queries:
if y*y > len(nums):
total = 0
for i in xrange(x, len(nums), y):
total += nums[i]
total %= MOD
result.append(total)
else:
begin = x%y
if (begin, y) not in prefix:
prefix[(begin, y)] = [0]
for i in xrange(begin, len(nums), y):
prefix[(begin, y)].append((prefix[(begin, y)][-1] + nums[i]) % MOD)
result.append((prefix[(begin, y)][-1]-prefix[(begin, y)][x//y]) % MOD)
return result
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 31598,
"end": 31950
} | class ____(typing.Generic[_ValueType]):
_inner_value: _ValueType
def __init__(self, inner_value: _ValueType) -> None:
self._inner_value = inner_value
@given(st.builds(Wrapper))
def test_issue_2603_regression(built):
"""It was impossible to build annotated classes with constructors."""
assert isinstance(built, Wrapper)
| Wrapper |
python | django__django | tests/admin_scripts/tests.py | {
"start": 40199,
"end": 43585
} | class ____(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
super().setUp()
self.write_settings(
"settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"]
)
def test_builtin_command(self):
"""
minimal: manage.py builtin commands fail with an error when no settings
provided.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"""
minimal: manage.py builtin commands fail if settings are provided as
argument
"""
args = ["check", "--settings=test_project.settings", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"""
minimal: manage.py builtin commands fail if settings are provided in
the environment.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args, "test_project.settings")
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"""
minimal: manage.py builtin commands fail if settings file (from
argument) doesn't exist.
"""
args = ["check", "--settings=bad_settings", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"""
minimal: manage.py builtin commands fail if settings file (from
environment) doesn't exist.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args, "bad_settings")
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"""
minimal: manage.py can't execute user commands without appropriate
settings
"""
args = ["noargs_command"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"""
minimal: manage.py can't execute user commands, even if settings are
provided as argument.
"""
args = ["noargs_command", "--settings=test_project.settings"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"""
minimal: manage.py can't execute user commands, even if settings are
provided in environment.
"""
args = ["noargs_command"]
out, err = self.run_manage(args, "test_project.settings")
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
| ManageMinimalSettings |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_release_details.py | {
"start": 7586,
"end": 9630
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
project2 = self.create_project(name="bar", organization=project.organization)
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
release.add_project(project2)
ReleaseFile.objects.create(
organization_id=project.organization_id,
release_id=release.id,
file=File.objects.create(name="application.js", type="release.file"),
name="http://example.com/application.js",
)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.delete(url)
assert response.status_code == 204, response.content
assert not Release.objects.filter(id=release.id).exists()
def test_existing_group(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
project2 = self.create_project(name="baz", organization=project.organization)
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
release.add_project(project2)
self.create_group(first_release=release)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.delete(url)
assert response.status_code == 400, response.content
assert Release.objects.filter(id=release.id).exists()
| ReleaseDeleteTest |
python | huggingface__transformers | src/transformers/models/vaultgemma/modular_vaultgemma.py | {
"start": 8842,
"end": 8885
} | class ____(Gemma2MLP):
pass
| VaultGemmaMLP |
python | huggingface__transformers | src/transformers/utils/notebook.py | {
"start": 8256,
"end": 11487
} | class ____(NotebookProgressBar):
"""
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.
Args:
num_steps (`int`): The number of steps during training. column_names (`list[str]`, *optional*):
The list of column names for the metrics table (will be inferred from the first call to
[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
"""
def __init__(self, num_steps, column_names=None):
super().__init__(num_steps)
self.inner_table = None if column_names is None else [column_names]
self.child_bar = None
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def write_line(self, values):
"""
Write the values in the inner table.
Args:
values (`dict[str, float]`): The values to display.
"""
if self.inner_table is None:
self.inner_table = [list(values.keys()), list(values.values())]
else:
columns = self.inner_table[0]
for key in values:
if key not in columns:
columns.append(key)
self.inner_table[0] = columns
if len(self.inner_table) > 1:
last_values = self.inner_table[-1]
first_column = self.inner_table[0][0]
if last_values[0] != values[first_column]:
# write new line
self.inner_table.append([values.get(c, "No Log") for c in columns])
else:
# update last line
new_values = values
for c in columns:
if c not in new_values:
new_values[c] = last_values[columns.index(c)]
self.inner_table[-1] = [new_values[c] for c in columns]
else:
self.inner_table.append([values[c] for c in columns])
def add_child(self, total, prefix=None, width=300):
"""
Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be
easily updated).
Args:
total (`int`): The number of iterations for the child progress bar.
prefix (`str`, *optional*): A prefix to write on the left of the progress bar.
width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar.
"""
self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width)
return self.child_bar
def remove_child(self):
"""
Closes the child progress bar.
"""
self.child_bar = None
self.display()
| NotebookTrainingTracker |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 60234,
"end": 60424
} | class ____(themeable):
"""
Spacing between the legend and the plotting area
Parameters
----------
theme_element : float
Value in points.
"""
| legend_box_spacing |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py | {
"start": 672,
"end": 5777
} | class ____(SanitySingleVersion):
"""Sanity test for ansible-doc."""
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type in DOCUMENTABLE_PLUGINS]
return [target for target in targets
if os.path.splitext(target.path)[1] == '.py'
and os.path.basename(target.path) != '__init__.py'
and any(is_subdir(target.path, path) for path in plugin_paths)
]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
doc_targets: dict[str, list[str]] = collections.defaultdict(list)
remap_types = dict(
modules='module',
)
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_parts = os.path.relpath(plugin_file_path, plugin_path).split(os.path.sep)
plugin_name = os.path.splitext(plugin_parts[-1])[0]
if plugin_name.startswith('_') and not data_context().content.collection:
plugin_name = plugin_name[1:]
plugin_fqcn = data_context().content.prefix + '.'.join(plugin_parts[:-1] + [plugin_name])
doc_targets[plugin_type].append(plugin_fqcn)
env = ansible_environment(args, color=False)
for doc_type in MULTI_FILE_PLUGINS:
if doc_targets.get(doc_type):
# List plugins
cmd = ['ansible-doc', '-l', '--json', '-t', doc_type]
prefix = data_context().content.prefix if data_context().content.collection else 'ansible.builtin.'
cmd.append(prefix[:-1])
try:
stdout, stderr = intercept_python(args, python, cmd, env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if status:
summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if stdout:
display.info(stdout.strip(), verbosity=3)
if stderr:
summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if args.explain:
continue
plugin_list_json = json.loads(stdout)
doc_targets[doc_type] = []
for plugin_name, plugin_value in sorted(plugin_list_json.items()):
if plugin_value != 'UNDOCUMENTED':
doc_targets[doc_type].append(plugin_name)
if not doc_targets[doc_type]:
del doc_targets[doc_type]
error_messages: list[SanityMessage] = []
for doc_type in sorted(doc_targets):
for format_option in [None, '--json']:
cmd = ['ansible-doc', '-t', doc_type]
if format_option is not None:
cmd.append(format_option)
cmd.extend(sorted(doc_targets[doc_type]))
try:
stdout, stderr = intercept_python(args, python, cmd, env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if status:
summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if stdout:
display.info(stdout.strip(), verbosity=3)
if stderr:
# ignore removed module/plugin warnings
stderr = re.sub(r'\[WARNING]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
if stderr:
summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if args.explain:
return SanitySuccess(self.name)
error_messages = settings.process_errors(error_messages, paths)
if error_messages:
return SanityFailure(self.name, messages=error_messages)
return SanitySuccess(self.name)
| AnsibleDocTest |
python | scrapy__scrapy | scrapy/extensions/spiderstate.py | {
"start": 364,
"end": 1626
} | class ____:
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir: str | None = None):
self.jobdir: str | None = jobdir
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
jobdir = job_dir(crawler.settings)
if not jobdir:
raise NotConfigured
obj = cls(jobdir)
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider: Spider) -> None:
if self.jobdir:
with Path(self.statefn).open("wb") as f:
assert hasattr(spider, "state") # set in spider_opened
pickle.dump(spider.state, f, protocol=4)
def spider_opened(self, spider: Spider) -> None:
if self.jobdir and Path(self.statefn).exists():
with Path(self.statefn).open("rb") as f:
spider.state = pickle.load(f) # type: ignore[attr-defined] # noqa: S301
else:
spider.state = {} # type: ignore[attr-defined]
@property
def statefn(self) -> str:
assert self.jobdir
return str(Path(self.jobdir, "spider.state"))
| SpiderState |
python | pydata__xarray | xarray/tests/test_variable.py | {
"start": 89884,
"end": 94633
} | class ____(VariableSubclassobjects):
def cls(self, *args, **kwargs) -> Variable:
return Variable(*args, **kwargs).chunk()
def test_chunk(self):
unblocked = Variable(["dim_0", "dim_1"], np.ones((3, 4)))
assert unblocked.chunks is None
blocked = unblocked.chunk()
assert blocked.chunks == ((3,), (4,))
first_dask_name = blocked.data.name
blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) # type: ignore[arg-type]
assert blocked.chunks == ((2, 1), (2, 2))
assert blocked.data.name != first_dask_name
blocked = unblocked.chunk(chunks=(3, 3))
assert blocked.chunks == ((3,), (3, 1))
assert blocked.data.name != first_dask_name
# name doesn't change when rechunking by same amount
# this fails if ReprObject doesn't have __dask_tokenize__ defined
assert unblocked.chunk(2).data.name == unblocked.chunk(2).data.name
assert blocked.load().chunks is None
# Check that kwargs are passed
import dask.array as da
blocked = unblocked.chunk(name="testname_")
assert isinstance(blocked.data, da.Array)
assert "testname_" in blocked.data.name
# test kwargs form of chunks
blocked = unblocked.chunk(dim_0=3, dim_1=3)
assert blocked.chunks == ((3,), (3, 1))
assert blocked.data.name != first_dask_name
@pytest.mark.skip
def test_0d_object_array_with_list(self):
super().test_0d_object_array_with_list()
@pytest.mark.skip
def test_array_interface(self):
# dask array does not have `argsort`
super().test_array_interface()
@pytest.mark.skip
def test_copy_index(self):
super().test_copy_index()
@pytest.mark.skip
@pytest.mark.filterwarnings("ignore:elementwise comparison failed.*:FutureWarning")
def test_eq_all_dtypes(self):
super().test_eq_all_dtypes()
def test_getitem_fancy(self):
super().test_getitem_fancy()
def test_getitem_1d_fancy(self):
super().test_getitem_1d_fancy()
def test_getitem_with_mask_nd_indexer(self):
import dask.array as da
v = Variable(["x"], da.arange(3, chunks=3))
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(
v._getitem_with_mask(indexer, fill_value=-1),
self.cls(("x", "y"), [[0, -1], [-1, 2]]),
)
@pytest.mark.parametrize("dim", ["x", "y"])
@pytest.mark.parametrize("window", [3, 8, 11])
@pytest.mark.parametrize("center", [True, False])
def test_dask_rolling(self, dim, window, center):
import dask
import dask.array as da
dask.config.set(scheduler="single-threaded")
x = Variable(("x", "y"), np.array(np.random.randn(100, 40), dtype=float))
dx = Variable(("x", "y"), da.from_array(x, chunks=[(6, 30, 30, 20, 14), 8]))
expected = x.rolling_window(
dim, window, "window", center=center, fill_value=np.nan
)
with raise_if_dask_computes():
actual = dx.rolling_window(
dim, window, "window", center=center, fill_value=np.nan
)
assert isinstance(actual.data, da.Array)
assert actual.shape == expected.shape
assert_equal(actual, expected)
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/11585")
def test_multiindex(self):
super().test_multiindex()
@pytest.mark.parametrize(
"mode",
[
"mean",
pytest.param(
"median",
marks=pytest.mark.xfail(reason="median is not implemented by Dask"),
),
pytest.param(
"reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug")
),
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
@pytest.mark.filterwarnings(
r"ignore:dask.array.pad.+? converts integers to floats."
)
def test_pad(self, mode, xr_arg, np_arg):
super().test_pad(mode, xr_arg, np_arg)
@pytest.mark.skip(reason="dask doesn't support extension arrays")
def test_pandas_period_index(self):
super().test_pandas_period_index()
@pytest.mark.skip(reason="dask doesn't support extension arrays")
def test_pandas_datetime64_with_tz(self):
super().test_pandas_datetime64_with_tz()
@pytest.mark.skip(reason="dask doesn't support extension arrays")
def test_pandas_categorical_dtype(self):
super().test_pandas_categorical_dtype()
@requires_sparse
| TestVariableWithDask |
python | pypa__pip | src/pip/_vendor/packaging/utils.py | {
"start": 744,
"end": 5050
} | class ____(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
# Core metadata spec for `Name`
_validate_regex = re.compile(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
)
_canonicalize_regex = re.compile(r"[-_.]+")
_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:
if validate and not _validate_regex.match(name):
raise InvalidName(f"name is invalid: {name!r}")
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def is_normalized_name(name: str) -> bool:
return _normalized_regex.match(name) is not None
@functools.singledispatch
def canonicalize_version(
version: Version | str, *, strip_trailing_zero: bool = True
) -> str:
"""
Return a canonical form of a version as a string.
>>> canonicalize_version('1.0.1')
'1.0.1'
Per PEP 625, versions may have multiple canonical forms, differing
only by trailing zeros.
>>> canonicalize_version('1.0.0')
'1'
>>> canonicalize_version('1.0.0', strip_trailing_zero=False)
'1.0.0'
Invalid versions are returned unaltered.
>>> canonicalize_version('foo bar baz')
'foo bar baz'
"""
return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version)
@canonicalize_version.register
def _(version: str, *, strip_trailing_zero: bool = True) -> str:
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero)
def parse_wheel_filename(
filename: str,
) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename!r}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename!r}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name.
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename!r}")
name = canonicalize_name(name_part)
try:
version = Version(parts[1])
except InvalidVersion as e:
raise InvalidWheelFilename(
f"Invalid wheel filename (invalid version): {filename!r}"
) from e
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in {filename!r}"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename!r}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}")
name = canonicalize_name(name_part)
try:
version = Version(version_part)
except InvalidVersion as e:
raise InvalidSdistFilename(
f"Invalid sdist filename (invalid version): {filename!r}"
) from e
return (name, version)
| InvalidSdistFilename |
python | scrapy__scrapy | tests/test_utils_iterators.py | {
"start": 19427,
"end": 20439
} | class ____:
bbody = b"utf8-body"
ubody = bbody.decode("utf8")
@pytest.mark.parametrize(
"obj",
[
bbody,
ubody,
TextResponse(url="http://example.org/", body=bbody, encoding="utf-8"),
Response(url="http://example.org/", body=bbody),
],
)
def test_body_or_str(self, obj: Response | str | bytes) -> None:
r1 = _body_or_str(obj)
self._assert_type_and_value(r1, self.ubody, obj)
r2 = _body_or_str(obj, unicode=True)
self._assert_type_and_value(r2, self.ubody, obj)
r3 = _body_or_str(obj, unicode=False)
self._assert_type_and_value(r3, self.bbody, obj)
assert type(r1) is type(r2)
assert type(r1) is not type(r3)
@staticmethod
def _assert_type_and_value(
a: str | bytes, b: str | bytes, obj: Response | str | bytes
) -> None:
assert type(a) is type(b), f"Got {type(a)}, expected {type(b)} for {obj!r}"
assert a == b
| TestBodyOrStr |
python | graphql-python__graphene | graphene/utils/tests/test_dataloader.py | {
"start": 1115,
"end": 10662
} | class ____(DataLoader):
async def batch_load_fn(self, character_ids):
return mock_batch_load_fn(character_ids)
Context = namedtuple("Context", "character_loader")
@mark.asyncio
async def test_basic_dataloader():
schema = Schema(query=Query)
character_loader = CharacterLoader()
context = Context(character_loader=character_loader)
query = """
{
skywalkerFamily {
name
sibling {
name
}
}
}
"""
result = await schema.execute_async(query, context=context)
assert not result.errors
assert result.data == {
"skywalkerFamily": [
{"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}},
{"name": "Darth Vader", "sibling": None},
{"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}},
]
}
assert mock_batch_load_fn.call_count == 1
assert get_character.call_count == 3
def id_loader(**options):
load_calls = []
async def default_resolve(x):
return x
resolve = options.pop("resolve", default_resolve)
async def fn(keys):
load_calls.append(keys)
return await resolve(keys)
# return keys
identity_loader = DataLoader(fn, **options)
return identity_loader, load_calls
@mark.asyncio
async def test_build_a_simple_data_loader():
async def call_fn(keys):
return keys
identity_loader = DataLoader(call_fn)
promise1 = identity_loader.load(1)
value1 = await promise1
assert value1 == 1
@mark.asyncio
async def test_can_build_a_data_loader_from_a_partial():
value_map = {1: "one"}
async def call_fn(context, keys):
return [context.get(key) for key in keys]
partial_fn = partial(call_fn, value_map)
identity_loader = DataLoader(partial_fn)
promise1 = identity_loader.load(1)
value1 = await promise1
assert value1 == "one"
@mark.asyncio
async def test_supports_loading_multiple_keys_in_one_call():
async def call_fn(keys):
return keys
identity_loader = DataLoader(call_fn)
promise_all = identity_loader.load_many([1, 2])
values = await promise_all
assert values == [1, 2]
promise_all = identity_loader.load_many([])
values = await promise_all
assert values == []
@mark.asyncio
async def test_batches_multiple_requests():
identity_loader, load_calls = id_loader()
promise1 = identity_loader.load(1)
promise2 = identity_loader.load(2)
p = gather(promise1, promise2)
value1, value2 = await p
assert value1 == 1
assert value2 == 2
assert load_calls == [[1, 2]]
@mark.asyncio
async def test_batches_multiple_requests_with_max_batch_sizes():
identity_loader, load_calls = id_loader(max_batch_size=2)
promise1 = identity_loader.load(1)
promise2 = identity_loader.load(2)
promise3 = identity_loader.load(3)
p = gather(promise1, promise2, promise3)
value1, value2, value3 = await p
assert value1 == 1
assert value2 == 2
assert value3 == 3
assert load_calls == [[1, 2], [3]]
@mark.asyncio
async def test_coalesces_identical_requests():
identity_loader, load_calls = id_loader()
promise1 = identity_loader.load(1)
promise2 = identity_loader.load(1)
assert promise1 == promise2
p = gather(promise1, promise2)
value1, value2 = await p
assert value1 == 1
assert value2 == 1
assert load_calls == [[1]]
@mark.asyncio
async def test_caches_repeated_requests():
identity_loader, load_calls = id_loader()
a, b = await gather(identity_loader.load("A"), identity_loader.load("B"))
assert a == "A"
assert b == "B"
assert load_calls == [["A", "B"]]
a2, c = await gather(identity_loader.load("A"), identity_loader.load("C"))
assert a2 == "A"
assert c == "C"
assert load_calls == [["A", "B"], ["C"]]
a3, b2, c2 = await gather(
identity_loader.load("A"), identity_loader.load("B"), identity_loader.load("C")
)
assert a3 == "A"
assert b2 == "B"
assert c2 == "C"
assert load_calls == [["A", "B"], ["C"]]
@mark.asyncio
async def test_clears_single_value_in_loader():
identity_loader, load_calls = id_loader()
a, b = await gather(identity_loader.load("A"), identity_loader.load("B"))
assert a == "A"
assert b == "B"
assert load_calls == [["A", "B"]]
identity_loader.clear("A")
a2, b2 = await gather(identity_loader.load("A"), identity_loader.load("B"))
assert a2 == "A"
assert b2 == "B"
assert load_calls == [["A", "B"], ["A"]]
@mark.asyncio
async def test_clears_all_values_in_loader():
identity_loader, load_calls = id_loader()
a, b = await gather(identity_loader.load("A"), identity_loader.load("B"))
assert a == "A"
assert b == "B"
assert load_calls == [["A", "B"]]
identity_loader.clear_all()
a2, b2 = await gather(identity_loader.load("A"), identity_loader.load("B"))
assert a2 == "A"
assert b2 == "B"
assert load_calls == [["A", "B"], ["A", "B"]]
@mark.asyncio
async def test_allows_priming_the_cache():
identity_loader, load_calls = id_loader()
identity_loader.prime("A", "A")
a, b = await gather(identity_loader.load("A"), identity_loader.load("B"))
assert a == "A"
assert b == "B"
assert load_calls == [["B"]]
@mark.asyncio
async def test_does_not_prime_keys_that_already_exist():
identity_loader, load_calls = id_loader()
identity_loader.prime("A", "X")
a1 = await identity_loader.load("A")
b1 = await identity_loader.load("B")
assert a1 == "X"
assert b1 == "B"
identity_loader.prime("A", "Y")
identity_loader.prime("B", "Y")
a2 = await identity_loader.load("A")
b2 = await identity_loader.load("B")
assert a2 == "X"
assert b2 == "B"
assert load_calls == [["B"]]
# # Represents Errors
@mark.asyncio
async def test_resolves_to_error_to_indicate_failure():
async def resolve(keys):
mapped_keys = [
key if key % 2 == 0 else Exception("Odd: {}".format(key)) for key in keys
]
return mapped_keys
even_loader, load_calls = id_loader(resolve=resolve)
with raises(Exception) as exc_info:
await even_loader.load(1)
assert str(exc_info.value) == "Odd: 1"
value2 = await even_loader.load(2)
assert value2 == 2
assert load_calls == [[1], [2]]
@mark.asyncio
async def test_can_represent_failures_and_successes_simultaneously():
async def resolve(keys):
mapped_keys = [
key if key % 2 == 0 else Exception("Odd: {}".format(key)) for key in keys
]
return mapped_keys
even_loader, load_calls = id_loader(resolve=resolve)
promise1 = even_loader.load(1)
promise2 = even_loader.load(2)
with raises(Exception) as exc_info:
await promise1
assert str(exc_info.value) == "Odd: 1"
value2 = await promise2
assert value2 == 2
assert load_calls == [[1, 2]]
@mark.asyncio
async def test_caches_failed_fetches():
async def resolve(keys):
mapped_keys = [Exception("Error: {}".format(key)) for key in keys]
return mapped_keys
error_loader, load_calls = id_loader(resolve=resolve)
with raises(Exception) as exc_info:
await error_loader.load(1)
assert str(exc_info.value) == "Error: 1"
with raises(Exception) as exc_info:
await error_loader.load(1)
assert str(exc_info.value) == "Error: 1"
assert load_calls == [[1]]
@mark.asyncio
async def test_caches_failed_fetches_2():
identity_loader, load_calls = id_loader()
identity_loader.prime(1, Exception("Error: 1"))
with raises(Exception) as _:
await identity_loader.load(1)
assert load_calls == []
# It is resilient to job queue ordering
@mark.asyncio
async def test_batches_loads_occuring_within_promises():
identity_loader, load_calls = id_loader()
async def load_b_1():
return await load_b_2()
async def load_b_2():
return await identity_loader.load("B")
values = await gather(identity_loader.load("A"), load_b_1())
assert values == ["A", "B"]
assert load_calls == [["A", "B"]]
@mark.asyncio
async def test_catches_error_if_loader_resolver_fails():
exc = Exception("AOH!")
def do_resolve(x):
raise exc
a_loader, a_load_calls = id_loader(resolve=do_resolve)
with raises(Exception) as exc_info:
await a_loader.load("A1")
assert exc_info.value == exc
@mark.asyncio
async def test_can_call_a_loader_from_a_loader():
deep_loader, deep_load_calls = id_loader()
a_loader, a_load_calls = id_loader(
resolve=lambda keys: deep_loader.load(tuple(keys))
)
b_loader, b_load_calls = id_loader(
resolve=lambda keys: deep_loader.load(tuple(keys))
)
a1, b1, a2, b2 = await gather(
a_loader.load("A1"),
b_loader.load("B1"),
a_loader.load("A2"),
b_loader.load("B2"),
)
assert a1 == "A1"
assert b1 == "B1"
assert a2 == "A2"
assert b2 == "B2"
assert a_load_calls == [["A1", "A2"]]
assert b_load_calls == [["B1", "B2"]]
assert deep_load_calls == [[("A1", "A2"), ("B1", "B2")]]
@mark.asyncio
async def test_dataloader_clear_with_missing_key_works():
async def do_resolve(x):
return x
a_loader, a_load_calls = id_loader(resolve=do_resolve)
assert a_loader.clear("A1") == a_loader
| CharacterLoader |
python | huggingface__transformers | src/transformers/utils/chat_template_utils.py | {
"start": 2574,
"end": 23051
} | class ____(Exception):
"""Exception raised for errors in parsing docstrings to generate JSON schemas"""
def _get_json_schema_type(param_type: type) -> dict[str, str]:
type_mapping = {
int: {"type": "integer"},
float: {"type": "number"},
str: {"type": "string"},
bool: {"type": "boolean"},
type(None): {"type": "null"},
Any: {},
}
if is_vision_available():
type_mapping[Image] = {"type": "image"}
if is_torch_available():
type_mapping[Tensor] = {"type": "audio"}
return type_mapping.get(param_type, {"type": "object"})
def _parse_type_hint(hint: str) -> dict:
origin = get_origin(hint)
args = get_args(hint)
if origin is None:
try:
return _get_json_schema_type(hint)
except KeyError:
raise TypeHintParsingException(
"Couldn't parse this type hint, likely due to a custom class or object: ", hint
)
elif origin is Union or (hasattr(types, "UnionType") and origin is types.UnionType):
# Recurse into each of the subtypes in the Union, except None, which is handled separately at the end
subtypes = [_parse_type_hint(t) for t in args if t is not type(None)]
if len(subtypes) == 1:
# A single non-null type can be expressed directly
return_dict = subtypes[0]
elif all(isinstance(subtype["type"], str) for subtype in subtypes):
# A union of basic types can be expressed as a list in the schema
return_dict = {"type": sorted([subtype["type"] for subtype in subtypes])}
else:
# A union of more complex types requires "anyOf"
return_dict = {"anyOf": subtypes}
if type(None) in args:
return_dict["nullable"] = True
return return_dict
elif origin is Literal and len(args) > 0:
LITERAL_TYPES = (int, float, str, bool, type(None))
args_types = []
for arg in args:
if type(arg) not in LITERAL_TYPES:
raise TypeHintParsingException("Only the valid python literals can be listed in typing.Literal.")
arg_type = _get_json_schema_type(type(arg)).get("type")
if arg_type is not None and arg_type not in args_types:
args_types.append(arg_type)
return {
"type": args_types.pop() if len(args_types) == 1 else list(args_types),
"enum": list(args),
}
elif origin is list:
if not args:
return {"type": "array"}
else:
# Lists can only have a single type argument, so recurse into it
return {"type": "array", "items": _parse_type_hint(args[0])}
elif origin is tuple:
if not args:
return {"type": "array"}
if len(args) == 1:
raise TypeHintParsingException(
f"The type hint {str(hint).replace('typing.', '')} is a Tuple with a single element, which "
"we do not automatically convert to JSON schema as it is rarely necessary. If this input can contain "
"more than one element, we recommend "
"using a list[] type instead, or if it really is a single element, remove the tuple[] wrapper and just "
"pass the element directly."
)
if ... in args:
raise TypeHintParsingException(
"Conversion of '...' is not supported in Tuple type hints. "
"Use list[] types for variable-length"
" inputs instead."
)
return {"type": "array", "prefixItems": [_parse_type_hint(t) for t in args]}
elif origin is dict:
# The JSON equivalent to a dict is 'object', which mandates that all keys are strings
# However, we can specify the type of the dict values with "additionalProperties"
out = {"type": "object"}
if len(args) == 2:
out["additionalProperties"] = _parse_type_hint(args[1])
return out
raise TypeHintParsingException("Couldn't parse this type hint, likely due to a custom class or object: ", hint)
def _convert_type_hints_to_json_schema(func: Callable) -> dict:
type_hints = get_type_hints(func)
signature = inspect.signature(func)
required = []
for param_name, param in signature.parameters.items():
if param.annotation == inspect.Parameter.empty:
raise TypeHintParsingException(f"Argument {param.name} is missing a type hint in function {func.__name__}")
if param.default == inspect.Parameter.empty:
required.append(param_name)
properties = {}
for param_name, param_type in type_hints.items():
properties[param_name] = _parse_type_hint(param_type)
schema = {"type": "object", "properties": properties}
if required:
schema["required"] = required
return schema
def parse_google_format_docstring(docstring: str) -> tuple[str | None, dict | None, str | None]:
"""
Parses a Google-style docstring to extract the function description,
argument descriptions, and return description.
Args:
docstring (str): The docstring to parse.
Returns:
The function description, arguments, and return description.
"""
# Extract the sections
description_match = description_re.search(docstring)
args_match = args_re.search(docstring)
returns_match = returns_re.search(docstring)
# Clean and store the sections
description = description_match.group(1).strip() if description_match else None
docstring_args = args_match.group(1).strip() if args_match else None
returns = returns_match.group(1).strip() if returns_match else None
# Parsing the arguments into a dictionary
if docstring_args is not None:
docstring_args = "\n".join([line for line in docstring_args.split("\n") if line.strip()]) # Remove blank lines
matches = args_split_re.findall(docstring_args)
args_dict = {match[0]: re.sub(r"\s*\n+\s*", " ", match[1].strip()) for match in matches}
else:
args_dict = {}
return description, args_dict, returns
def get_json_schema(func: Callable) -> dict:
"""
This function generates a JSON schema for a given function, based on its docstring and type hints. This is
mostly used for passing lists of tools to a chat template. The JSON schema contains the name and description of
the function, as well as the names, types and descriptions for each of its arguments. `get_json_schema()` requires
that the function has a docstring, and that each argument has a description in the docstring, in the standard
Google docstring format shown below. It also requires that all the function arguments have a valid Python type hint.
Although it is not required, a `Returns` block can also be added, which will be included in the schema. This is
optional because most chat templates ignore the return value of the function.
Args:
func: The function to generate a JSON schema for.
Returns:
A dictionary containing the JSON schema for the function.
Examples:
```python
>>> def multiply(x: float, y: float):
>>> '''
>>> A function that multiplies two numbers
>>>
>>> Args:
>>> x: The first number to multiply
>>> y: The second number to multiply
>>> '''
>>> return x * y
>>>
>>> print(get_json_schema(multiply))
{
"name": "multiply",
"description": "A function that multiplies two numbers",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number", "description": "The first number to multiply"},
"y": {"type": "number", "description": "The second number to multiply"}
},
"required": ["x", "y"]
}
}
```
The general use for these schemas is that they are used to generate tool descriptions for chat templates that
support them, like so:
```python
>>> from transformers import AutoTokenizer
>>> from transformers.utils import get_json_schema
>>>
>>> def multiply(x: float, y: float):
>>> '''
>>> A function that multiplies two numbers
>>>
>>> Args:
>>> x: The first number to multiply
>>> y: The second number to multiply
>>> return x * y
>>> '''
>>>
>>> multiply_schema = get_json_schema(multiply)
>>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>>> messages = [{"role": "user", "content": "What is 179 x 4571?"}]
>>> formatted_chat = tokenizer.apply_chat_template(
>>> messages,
>>> tools=[multiply_schema],
>>> chat_template="tool_use",
>>> return_dict=True,
>>> return_tensors="pt",
>>> add_generation_prompt=True
>>> )
>>> # The formatted chat can now be passed to model.generate()
```
Each argument description can also have an optional `(choices: ...)` block at the end, such as
`(choices: ["tea", "coffee"])`, which will be parsed into an `enum` field in the schema. Note that this will
only be parsed correctly if it is at the end of the line:
```python
>>> def drink_beverage(beverage: str):
>>> '''
>>> A function that drinks a beverage
>>>
>>> Args:
>>> beverage: The beverage to drink (choices: ["tea", "coffee"])
>>> '''
>>> pass
>>>
>>> print(get_json_schema(drink_beverage))
```
{
'name': 'drink_beverage',
'description': 'A function that drinks a beverage',
'parameters': {
'type': 'object',
'properties': {
'beverage': {
'type': 'string',
'enum': ['tea', 'coffee'],
'description': 'The beverage to drink'
}
},
'required': ['beverage']
}
}
"""
doc = inspect.getdoc(func)
if not doc:
raise DocstringParsingException(
f"Cannot generate JSON schema for {func.__name__} because it has no docstring!"
)
doc = doc.strip()
main_doc, param_descriptions, return_doc = parse_google_format_docstring(doc)
json_schema = _convert_type_hints_to_json_schema(func)
if (return_dict := json_schema["properties"].pop("return", None)) is not None:
if return_doc is not None: # We allow a missing return docstring since most templates ignore it
return_dict["description"] = return_doc
for arg, schema in json_schema["properties"].items():
if arg not in param_descriptions:
raise DocstringParsingException(
f"Cannot generate JSON schema for {func.__name__} because the docstring has no description for the argument '{arg}'"
)
desc = param_descriptions[arg]
enum_choices = re.search(r"\(choices:\s*(.*?)\)\s*$", desc, flags=re.IGNORECASE)
if enum_choices:
schema["enum"] = [c.strip() for c in json.loads(enum_choices.group(1))]
desc = enum_choices.string[: enum_choices.start()].strip()
schema["description"] = desc
output = {"name": func.__name__, "description": main_doc, "parameters": json_schema}
if return_dict is not None:
output["return"] = return_dict
return {"type": "function", "function": output}
def _render_with_assistant_indices(
compiled_template, messages, tools, documents, add_generation_prompt, **template_kwargs
):
rendered_blocks = []
generation_indices = []
with compiled_template.environment.activate_tracker(rendered_blocks, generation_indices):
for block in compiled_template.generate(
messages=messages,
tools=tools,
documents=documents,
add_generation_prompt=add_generation_prompt,
**template_kwargs,
):
rendered_blocks.append(block)
rendered_chat = "".join(rendered_blocks)
return rendered_chat, generation_indices
@lru_cache
def _compile_jinja_template(chat_template):
if not is_jinja_available():
raise ImportError(
"apply_chat_template requires jinja2 to be installed. Please install it using `pip install jinja2`."
)
class AssistantTracker(Extension):
# This extension is used to track the indices of assistant-generated tokens in the rendered chat
tags = {"generation"}
def __init__(self, environment: ImmutableSandboxedEnvironment):
# The class is only initiated by jinja.
super().__init__(environment)
environment.extend(activate_tracker=self.activate_tracker)
self._rendered_blocks = None
self._generation_indices = None
def parse(self, parser: jinja2.parser.Parser) -> jinja2.nodes.CallBlock:
lineno = next(parser.stream).lineno
body = parser.parse_statements(["name:endgeneration"], drop_needle=True)
return jinja2.nodes.CallBlock(self.call_method("_generation_support"), [], [], body).set_lineno(lineno)
@jinja2.pass_eval_context
def _generation_support(self, context: jinja2.nodes.EvalContext, caller: jinja2.runtime.Macro) -> str:
rv = caller()
if self.is_active():
# Only track generation indices if the tracker is active
start_index = len("".join(self._rendered_blocks))
end_index = start_index + len(rv)
self._generation_indices.append((start_index, end_index))
return rv
def is_active(self) -> bool:
return self._rendered_blocks or self._generation_indices
@contextmanager
def activate_tracker(self, rendered_blocks: list[int], generation_indices: list[int]):
try:
if self.is_active():
raise ValueError("AssistantTracker should not be reused before closed")
self._rendered_blocks = rendered_blocks
self._generation_indices = generation_indices
yield
finally:
self._rendered_blocks = None
self._generation_indices = None
if version.parse(jinja2.__version__) < version.parse("3.1.0"):
raise ImportError(
f"apply_chat_template requires jinja2>=3.1.0 to be installed. Your version is {jinja2.__version__}."
)
def raise_exception(message):
raise jinja2.exceptions.TemplateError(message)
def tojson(x, ensure_ascii=False, indent=None, separators=None, sort_keys=False):
# We override the built-in tojson filter because Jinja's default filter escapes HTML characters
# We also expose some options like custom indents and separators
return json.dumps(x, ensure_ascii=ensure_ascii, indent=indent, separators=separators, sort_keys=sort_keys)
def strftime_now(format):
return datetime.now().strftime(format)
jinja_env = ImmutableSandboxedEnvironment(
trim_blocks=True, lstrip_blocks=True, extensions=[AssistantTracker, jinja2.ext.loopcontrols]
)
jinja_env.filters["tojson"] = tojson
jinja_env.globals["raise_exception"] = raise_exception
jinja_env.globals["strftime_now"] = strftime_now
return jinja_env.from_string(chat_template)
def render_jinja_template(
conversations: list[ChatType],
tools: list[dict | Callable] | None = None,
documents: ChatType | None = None,
chat_template: str | None = None,
return_assistant_tokens_mask: bool = False,
continue_final_message: bool = False,
add_generation_prompt: bool = False,
**kwargs,
) -> str:
if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template):
logger.warning_once(
"return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword."
)
# Compilation function uses a cache to avoid recompiling the same template
compiled_template = _compile_jinja_template(chat_template)
# We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas
if tools is not None:
tool_schemas = []
for tool in tools:
if isinstance(tool, dict):
tool_schemas.append(tool)
elif isfunction(tool):
tool_schemas.append(get_json_schema(tool))
else:
raise ValueError(
"Tools should either be a JSON schema, or a callable function with type hints "
"and a docstring suitable for auto-conversion to a schema."
)
else:
tool_schemas = None
if documents is not None:
for document in documents:
if not isinstance(document, dict):
raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!")
rendered = []
all_generation_indices = []
continue_final_message_tag = "CONTINUE_FINAL_MESSAGE_TAG "
for chat in conversations:
if hasattr(chat, "messages"):
# Indicates it's a Conversation object
chat = chat.messages
if continue_final_message:
chat = deepcopy(chat)
final_message = chat[-1]["content"]
if isinstance(final_message, (list, tuple)):
for content_block in reversed(final_message):
if "text" in content_block:
# Pick the last text block in the message (the first one we hit while iterating in reverse)
final_message = content_block["text"]
content_block["text"] = content_block["text"] + continue_final_message_tag
break
else:
raise ValueError(
"continue_final_message is set but we could not find any text to continue in the final message!"
)
else:
chat[-1]["content"] = chat[-1]["content"] + continue_final_message_tag
if return_assistant_tokens_mask:
rendered_chat, generation_indices = _render_with_assistant_indices(
compiled_template=compiled_template,
messages=chat,
tools=tool_schemas,
documents=documents,
add_generation_prompt=add_generation_prompt,
**kwargs,
)
all_generation_indices.append(generation_indices)
else:
rendered_chat = compiled_template.render(
messages=chat,
tools=tool_schemas,
documents=documents,
add_generation_prompt=add_generation_prompt,
**kwargs,
)
if continue_final_message:
if (final_message.strip() not in rendered_chat) or (
continue_final_message_tag.strip() not in rendered_chat
):
raise ValueError(
"continue_final_message is set but the final message does not appear in the chat after "
"applying the chat template! This can happen if the chat template deletes portions of "
"the final message. Please verify the chat template and final message in your chat to "
"ensure they are compatible."
)
tag_loc = rendered_chat.rindex(continue_final_message_tag.strip())
if rendered_chat[tag_loc : tag_loc + len(continue_final_message_tag)] == continue_final_message_tag:
# The template preserves spacing, so things are simple
rendered_chat = rendered_chat[:tag_loc]
else:
# The message has trailing spacing that was trimmed, so we must be more cautious
rendered_chat = rendered_chat[:tag_loc].rstrip()
rendered.append(rendered_chat)
return rendered, all_generation_indices
def is_valid_message(message):
"""
Check that input is a valid message in a chat, namely a dict with "role" and "content" keys.
"""
if not isinstance(message, dict):
return False
if not ("role" in message and "content" in message):
return False
return True
| DocstringParsingException |
python | pypa__pip | src/pip/_vendor/urllib3/packages/six.py | {
"start": 6903,
"end": 12082
} | class ____(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute(
"filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"
),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute(
"reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"
),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute(
"zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"
),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule(
"collections_abc",
"collections",
"collections.abc" if sys.version_info >= (3, 3) else "collections",
),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule(
"_dummy_thread",
"dummy_thread",
"_dummy_thread" if sys.version_info < (3, 9) else "_thread",
),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule(
"email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"
),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
| _MovedItems |
python | tiangolo__fastapi | scripts/contributors.py | {
"start": 1320,
"end": 1376
} | class ____(BaseModel):
nodes: list[ReviewNode]
| Reviews |
python | bottlepy__bottle | test/test_wsgi.py | {
"start": 162,
"end": 6465
} | class ____(ServerTestBase):
''' Tests for WSGI functionality, routing and output casting (decorators) '''
def test_get(self):
""" WSGI: GET routes"""
@bottle.route('/')
def test(): return 'test'
self.assertStatus(404, '/not/found')
self.assertStatus(405, '/', post="var=value")
self.assertBody('test', '/')
def test_post(self):
""" WSGI: POST routes"""
@bottle.route('/', method='POST')
def test(): return 'test'
self.assertStatus(404, '/not/found')
self.assertStatus(405, '/')
self.assertBody('test', '/', post="var=value")
def test_headget(self):
""" WSGI: HEAD routes and GET fallback"""
@bottle.route('/get')
def test(): return 'test'
@bottle.route('/head', method='HEAD')
def test2(): return 'test'
# GET -> HEAD
self.assertStatus(405, '/head')
# HEAD -> HEAD
self.assertStatus(200, '/head', method='HEAD')
self.assertBody('', '/head', method='HEAD')
# HEAD -> GET
self.assertStatus(200, '/get', method='HEAD')
self.assertBody('', '/get', method='HEAD')
def test_request_attrs(self):
""" WSGI: POST routes"""
@bottle.route('/')
def test():
self.assertEqual(bottle.request.app,
bottle.default_app())
self.assertEqual(bottle.request.route,
bottle.default_app().routes[0])
return 'foo'
self.assertBody('foo', '/')
def get204(self):
""" 204 responses must not return some entity headers """
bad = ('content-length', 'content-type')
for h in bad:
bottle.response.set_header(h, 'foo')
bottle.status = 204
for h, v in bottle.response.headerlist:
self.assertFalse(h.lower() in bad, "Header %s not deleted" % h)
def get304(self):
""" 304 responses must not return entity headers """
bad = ('allow', 'content-encoding', 'content-language',
'content-length', 'content-md5', 'content-range',
'content-type', 'last-modified') # + c-location, expires?
for h in bad:
bottle.response.set_header(h, 'foo')
bottle.status = 304
for h, v in bottle.response.headerlist:
self.assertFalse(h.lower() in bad, "Header %s not deleted" % h)
def test_anymethod(self):
self.assertStatus(404, '/any')
@bottle.route('/any', method='ANY')
def test2(): return 'test'
self.assertStatus(200, '/any', method='HEAD')
self.assertBody('test', '/any', method='GET')
self.assertBody('test', '/any', method='POST')
self.assertBody('test', '/any', method='DELETE')
@bottle.route('/any', method='GET')
def test2(): return 'test2'
self.assertBody('test2', '/any', method='GET')
@bottle.route('/any', method='POST')
def test2(): return 'test3'
self.assertBody('test3', '/any', method='POST')
self.assertBody('test', '/any', method='DELETE')
def test_500(self):
""" WSGI: Exceptions within handler code (HTTP 500) """
@bottle.route('/')
def test(): return 1/0
self.assertStatus(500, '/')
def test_500_unicode(self):
@bottle.route('/')
def test(): raise Exception(touni('Unicode äöüß message.'))
self.assertStatus(500, '/')
def test_utf8_url(self):
""" WSGI: UTF-8 Characters in the URL """
@bottle.route('/my-öäü/<string>')
def test(string): return string
self.assertBody(tob('urf8-öäü'), '/my-öäü/urf8-öäü')
def test_utf8_header(self):
header = 'öäü'.encode('utf8').decode('latin1')
@bottle.route('/test')
def test():
h = bottle.request.get_header('X-Test')
self.assertEqual(h, 'öäü')
bottle.response.set_header('X-Test', h)
self.assertHeader('X-Test', header, '/test', env={'HTTP_X_TEST': header})
def test_utf8_404(self):
self.assertStatus(404, '/not-found/urf8-öäü')
def test_401(self):
""" WSGI: abort(401, '') (HTTP 401) """
@bottle.route('/')
def test(): bottle.abort(401)
self.assertStatus(401, '/')
@bottle.error(401)
def err(e):
bottle.response.status = 200
return str(type(e))
self.assertStatus(200, '/')
self.assertBody("<class 'bottle.HTTPError'>",'/')
def test_303(self):
""" WSGI: redirect (HTTP 303) """
@bottle.route('/')
def test(): bottle.redirect('/yes')
@bottle.route('/one')
def test2(): bottle.redirect('/yes',305)
env = {'SERVER_PROTOCOL':'HTTP/1.1'}
self.assertStatus(303, '/', env=env)
self.assertHeader('Location', 'http://127.0.0.1/yes', '/', env=env)
env = {'SERVER_PROTOCOL':'HTTP/1.0'}
self.assertStatus(302, '/', env=env)
self.assertHeader('Location', 'http://127.0.0.1/yes', '/', env=env)
self.assertStatus(305, '/one', env=env)
self.assertHeader('Location', 'http://127.0.0.1/yes', '/one', env=env)
def test_generator_callback(self):
@bottle.route('/yield')
def test():
bottle.response.headers['Test-Header'] = 'test'
yield 'foo'
@bottle.route('/yield_nothing')
def test2():
yield
bottle.response.headers['Test-Header'] = 'test'
self.assertBody('foo', '/yield')
self.assertHeader('Test-Header', 'test', '/yield')
self.assertBody('', '/yield_nothing')
self.assertHeader('Test-Header', 'test', '/yield_nothing')
def test_cookie(self):
""" WSGI: Cookies """
@bottle.route('/cookie')
def test():
bottle.response.set_cookie('b', 'b')
bottle.response.set_cookie('c', 'c', path='/')
return 'hello'
try:
c = self.urlopen('/cookie')['header'].get_all('Set-Cookie', '')
except:
c = self.urlopen('/cookie')['header'].get('Set-Cookie', '').split(',')
c = [x.strip() for x in c]
self.assertTrue('b=b' in c)
self.assertTrue('c=c; Path=/' in c)
| TestWsgi |
python | django__django | tests/admin_views/tests.py | {
"start": 357304,
"end": 359169
} | class ____(TestCase):
"""#11277 -Labels of hidden fields in admin were not hidden."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_all_fields_visible(self):
response = self.client.get(reverse("admin:admin_views_emptymodelvisible_add"))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, "first")
self.assert_field_visible(response, "second")
def test_all_fields_hidden(self):
response = self.client.get(reverse("admin:admin_views_emptymodelhidden_add"))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, "first")
self.assert_field_hidden(response, "second")
def test_mixin(self):
response = self.client.get(reverse("admin:admin_views_emptymodelmixin_add"))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, "first")
self.assert_field_visible(response, "second")
def assert_field_visible(self, response, field_name):
self.assertContains(
response, f'<div class="flex-container fieldBox field-{field_name}">'
)
def assert_field_hidden(self, response, field_name):
self.assertContains(
response, f'<div class="flex-container fieldBox field-{field_name} hidden">'
)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(ROOT_URLCONF="admin_views.urls")
| TestLabelVisibility |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/evaluator_test.py | {
"start": 954,
"end": 5809
} | class ____(test_util.TensorFlowTestCase):
def testParseNamesWithoutPrefixOrSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("hidden_0/Weights:0"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithoutPrefixWithDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1:DebugNanCount"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugNanCount", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"hidden_0/Weights:0:DebugNumericSummary"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugNumericSummary", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithDeviceNamePrefixWithoutDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1"))
self.assertEqual("/job:ps/replica:0/task:2/cpu:0", device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:worker/replica:0/task:3/gpu:0:hidden_0/Weights:0"))
self.assertEqual("/job:worker/replica:0/task:3/gpu:0", device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithDeviceNamePrefixWithDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount"))
self.assertEqual("/job:ps/replica:0/task:2/cpu:0", device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugNanCount", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:worker/replica:0/task:3/gpu:0:"
"hidden_0/Weights:0:DebugNumericSummary"))
self.assertEqual("/job:worker/replica:0/task:3/gpu:0", device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugNumericSummary", debug_op)
self.assertEqual(0, exec_index)
def testParseMalformedDebugTensorName(self):
with self.assertRaisesRegex(
ValueError,
r"The debug tensor name in the to-be-evaluated expression is "
r"malformed:"):
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount:1337")
with self.assertRaisesRegex(
ValueError,
r"The debug tensor name in the to-be-evaluated expression is "
r"malformed:"):
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/cpu:0:foo:1:DebugNanCount")
with self.assertRaises(ValueError):
evaluator._parse_debug_tensor_name(
"foo:1:DebugNanCount[]")
with self.assertRaises(ValueError):
evaluator._parse_debug_tensor_name(
"foo:1[DebugNanCount]")
def testParseNamesWithExecIndex(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1[20]"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(20, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("hidden_0/Weights:0[3]"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(3, exec_index)
| ParseDebugTensorNameTest |
python | networkx__networkx | networkx/utils/tests/test_decorators.py | {
"start": 5556,
"end": 9839
} | class ____:
@classmethod
def setup_class(cls):
global np
np = pytest.importorskip("numpy")
@np_random_state(1)
def instantiate_np_random_state(self, random_state):
allowed = (np.random.RandomState, np.random.Generator)
assert isinstance(random_state, allowed)
return random_state.random()
@py_random_state(1)
def instantiate_py_random_state(self, random_state):
allowed = (random.Random, PythonRandomInterface, PythonRandomViaNumpyBits)
assert isinstance(random_state, allowed)
return random_state.random()
def test_random_state_None(self):
np.random.seed(42)
rv = np.random.random()
np.random.seed(42)
assert rv == self.instantiate_np_random_state(None)
random.seed(42)
rv = random.random()
random.seed(42)
assert rv == self.instantiate_py_random_state(None)
def test_random_state_np_random(self):
np.random.seed(42)
rv = np.random.random()
np.random.seed(42)
assert rv == self.instantiate_np_random_state(np.random)
np.random.seed(42)
assert rv == self.instantiate_py_random_state(np.random)
def test_random_state_int(self):
np.random.seed(42)
np_rv = np.random.random()
random.seed(42)
py_rv = random.random()
np.random.seed(42)
seed = 1
rval = self.instantiate_np_random_state(seed)
rval_expected = np.random.RandomState(seed).rand()
assert rval == rval_expected
# test that global seed wasn't changed in function
assert np_rv == np.random.random()
random.seed(42)
rval = self.instantiate_py_random_state(seed)
rval_expected = random.Random(seed).random()
assert rval == rval_expected
# test that global seed wasn't changed in function
assert py_rv == random.random()
def test_random_state_np_random_Generator(self):
np.random.seed(42)
np_rv = np.random.random()
np.random.seed(42)
seed = 1
rng = np.random.default_rng(seed)
rval = self.instantiate_np_random_state(rng)
rval_expected = np.random.default_rng(seed).random()
assert rval == rval_expected
rval = self.instantiate_py_random_state(rng)
rval_expected = np.random.default_rng(seed).random(size=2)[1]
assert rval == rval_expected
# test that global seed wasn't changed in function
assert np_rv == np.random.random()
def test_random_state_np_random_RandomState(self):
np.random.seed(42)
np_rv = np.random.random()
np.random.seed(42)
seed = 1
rng = np.random.RandomState(seed)
rval = self.instantiate_np_random_state(rng)
rval_expected = np.random.RandomState(seed).random()
assert rval == rval_expected
rval = self.instantiate_py_random_state(rng)
rval_expected = np.random.RandomState(seed).random(size=2)[1]
assert rval == rval_expected
# test that global seed wasn't changed in function
assert np_rv == np.random.random()
def test_random_state_py_random(self):
seed = 1
rng = random.Random(seed)
rv = self.instantiate_py_random_state(rng)
assert rv == random.Random(seed).random()
pytest.raises(ValueError, self.instantiate_np_random_state, rng)
def test_random_state_string_arg_index():
with pytest.raises(nx.NetworkXError):
@np_random_state("a")
def make_random_state(rs):
pass
rstate = make_random_state(1)
def test_py_random_state_string_arg_index():
with pytest.raises(nx.NetworkXError):
@py_random_state("a")
def make_random_state(rs):
pass
rstate = make_random_state(1)
def test_random_state_invalid_arg_index():
with pytest.raises(nx.NetworkXError):
@np_random_state(2)
def make_random_state(rs):
pass
rstate = make_random_state(1)
def test_py_random_state_invalid_arg_index():
with pytest.raises(nx.NetworkXError):
@py_random_state(2)
def make_random_state(rs):
pass
rstate = make_random_state(1)
| TestRandomState |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/multitenancy/taskqueue.py | {
"start": 1350,
"end": 1503
} | class ____(webapp2.RequestHandler):
def post(self):
name = self.request.get("counter_name")
update_counter(name)
| DeferredCounterHandler |
python | pyodide__pyodide | src/py/pyodide/webloop.py | {
"start": 492,
"end": 5219
} | class ____(Future[T]):
"""A :py:class:`~asyncio.Future` with extra :js:meth:`~Promise.then`,
:js:meth:`~Promise.catch`, and :js:meth:`finally_() <Promise.finally>` methods
based on the Javascript promise API. :py:meth:`~asyncio.loop.create_future`
returns these so in practice all futures encountered in Pyodide should be an
instance of :py:class:`~pyodide.webloop.PyodideFuture`.
"""
@overload
def then(
self,
onfulfilled: None,
onrejected: Callable[[BaseException], Awaitable[S]],
) -> "PyodideFuture[S]": ...
@overload
def then(
self,
onfulfilled: None,
onrejected: Callable[[BaseException], S],
) -> "PyodideFuture[S]": ...
@overload
def then(
self,
onfulfilled: Callable[[T], Awaitable[S]],
onrejected: Callable[[BaseException], Awaitable[S]] | None = None,
) -> "PyodideFuture[S]": ...
@overload
def then(
self,
onfulfilled: Callable[[T], S],
onrejected: Callable[[BaseException], S] | None = None,
) -> "PyodideFuture[S]": ...
def then(
self,
onfulfilled: Callable[[T], S | Awaitable[S]] | None,
onrejected: Callable[[BaseException], S | Awaitable[S]] | None = None,
) -> "PyodideFuture[S]":
"""When the Future is done, either execute onfulfilled with the result
or execute onrejected with the exception.
Returns a new Future which will be marked done when either the
onfulfilled or onrejected callback is completed. If the return value of
the executed callback is awaitable it will be awaited repeatedly until a
nonawaitable value is received. The returned Future will be resolved
with that value. If an error is raised, the returned Future will be
rejected with the error.
Parameters
----------
onfulfilled:
A function called if the Future is fulfilled. This function receives
one argument, the fulfillment value.
onrejected:
A function called if the Future is rejected. This function receives
one argument, the rejection value.
Returns
-------
A new future to be resolved when the original future is done and the
appropriate callback is also done.
"""
result: PyodideFuture[S] = PyodideFuture()
onfulfilled_: Callable[[T], S | Awaitable[S]]
onrejected_: Callable[[BaseException], S | Awaitable[S]]
if onfulfilled:
onfulfilled_ = onfulfilled
else:
def onfulfilled_(x):
return x
if onrejected:
onrejected_ = onrejected
else:
def onrejected_(x):
raise x
async def callback(fut: Future[T]) -> None:
e = fut.exception()
try:
if e:
r = onrejected_(e)
else:
r = onfulfilled_(fut.result())
while inspect.isawaitable(r):
r = await r
except Exception as result_exception:
result.set_exception(result_exception)
return
result.set_result(r)
def wrapper(fut: Future[T]) -> None:
asyncio.ensure_future(callback(fut))
self.add_done_callback(wrapper)
return result
@overload
def catch(
self, onrejected: Callable[[BaseException], Awaitable[S]]
) -> "PyodideFuture[S]": ...
@overload
def catch(self, onrejected: Callable[[BaseException], S]) -> "PyodideFuture[S]": ...
def catch(
self, onrejected: Callable[[BaseException], object]
) -> "PyodideFuture[Any]":
"""Equivalent to ``then(None, onrejected)``"""
return self.then(None, onrejected)
def finally_(self, onfinally: Callable[[], Any]) -> "PyodideFuture[T]":
"""When the future is either resolved or rejected, call ``onfinally`` with
no arguments.
"""
result: PyodideFuture[T] = PyodideFuture()
async def callback(fut: Future[T]) -> None:
exc = fut.exception()
try:
r = onfinally()
while inspect.isawaitable(r):
r = await r
except Exception as e:
result.set_exception(e)
return
if exc:
result.set_exception(exc)
else:
result.set_result(fut.result())
def wrapper(fut: Future[T]) -> None:
asyncio.ensure_future(callback(fut))
self.add_done_callback(wrapper)
return result
| PyodideFuture |
python | django__django | tests/defer/models.py | {
"start": 648,
"end": 681
} | class ____(Primary):
pass
| Child |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 7456,
"end": 9199
} | class ____(types.Type):
"""
A type for handling ops against nulls
Exists so we can:
1. Teach numba that all occurrences of `cudf.NA` are
to be read as instances of this type instead
2. Define ops like `if x is cudf.NA` where `x` is of
type `Masked` to mean `if x.valid is False`
"""
def __init__(self):
super().__init__(name="NA")
def unify(self, context, other):
"""
Masked <-> NA is deferred to MaskedType.unify()
Literal <-> NA -> Masked
"""
if isinstance(other, MaskedType):
# bounce to MaskedType.unify
return None
elif isinstance(other, NAType):
# unify {NA, NA} -> NA
return self
else:
return MaskedType(other)
na_type = NAType()
@typeof_impl.register(type(NA))
def typeof_na(val, c):
"""
Tie instances of _NAType (cudf.NA) to our NAType.
Effectively make it so numba sees `cudf.NA` as an
instance of this NAType -> handle it accordingly.
"""
return na_type
register_model(NAType)(models.OpaqueModel)
# Ultimately, we want numba to produce PTX code that specifies how to implement
# an operation on two singular `Masked` structs together, which is defined
# as producing a new `Masked` with the right validity and if valid,
# the correct value. This happens in two phases:
# 1. Specify that `Masked` <op> `Masked` exists and what it should return
# 2. Implement how to actually do (1) at the LLVM level
# The following code accomplishes (1) - it is really just a way of specifying
# that the <op> has a CUDA overload that accepts two `Masked` that
# are parameterized with `value_type` and what flavor of `Masked` to return.
| NAType |
python | pydata__xarray | xarray/tests/test_options.py | {
"start": 3459,
"end": 8412
} | class ____:
def test_dataset_attr_retention(self) -> None:
# Use .mean() for all tests: a typical reduction operation
ds = create_test_dataset_attrs()
original_attrs = ds.attrs
# Test default behaviour (keeps attrs for reduction operations)
result = ds.mean()
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs="default"):
result = ds.mean()
assert (
result.attrs == original_attrs
) # "default" uses operation's default which is True for reduce
with xarray.set_options(keep_attrs=True):
result = ds.mean()
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs=False):
result = ds.mean()
assert result.attrs == {}
def test_dataarray_attr_retention(self) -> None:
# Use .mean() for all tests: a typical reduction operation
da = create_test_dataarray_attrs()
original_attrs = da.attrs
# Test default behaviour (keeps attrs for reduction operations)
result = da.mean()
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs="default"):
result = da.mean()
assert (
result.attrs == original_attrs
) # "default" uses operation's default which is True for reduce
with xarray.set_options(keep_attrs=True):
result = da.mean()
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs=False):
result = da.mean()
assert result.attrs == {}
def test_groupby_attr_retention(self) -> None:
da = xarray.DataArray([1, 2, 3], [("x", [1, 1, 2])])
da.attrs = {"attr1": 5, "attr2": "history", "attr3": {"nested": "more_info"}}
original_attrs = da.attrs
# Test default behaviour
result = da.groupby("x").sum(keep_attrs=True)
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs="default"):
result = da.groupby("x").sum(keep_attrs=True)
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs=True):
result1 = da.groupby("x")
result = result1.sum()
assert result.attrs == original_attrs
with xarray.set_options(keep_attrs=False):
result = da.groupby("x").sum()
assert result.attrs == {}
def test_concat_attr_retention(self) -> None:
ds1 = create_test_dataset_attrs()
ds2 = create_test_dataset_attrs()
ds2.attrs = {"wrong": "attributes"}
original_attrs = ds1.attrs
# Test default behaviour of keeping the attrs of the first
# dataset in the supplied list
# global keep_attrs option current doesn't affect concat
result = concat([ds1, ds2], dim="dim1")
assert result.attrs == original_attrs
def test_merge_attr_retention(self) -> None:
da1 = create_test_dataarray_attrs(var="var1")
da2 = create_test_dataarray_attrs(var="var2")
da2.attrs = {"wrong": "attributes"}
original_attrs = da1.attrs
# merge currently discards attrs, and the global keep_attrs
# option doesn't affect this
result = merge([da1, da2])
assert result.attrs == original_attrs
def test_display_style_text(self) -> None:
ds = create_test_dataset_attrs()
with xarray.set_options(display_style="text"):
text = ds._repr_html_()
assert text.startswith("<pre>")
assert "'nested'" in text
assert "<xarray.Dataset>" in text
def test_display_style_html(self) -> None:
ds = create_test_dataset_attrs()
with xarray.set_options(display_style="html"):
html = ds._repr_html_()
assert html.startswith("<div>")
assert "'nested'" in html
def test_display_dataarray_style_text(self) -> None:
da = create_test_dataarray_attrs()
with xarray.set_options(display_style="text"):
text = da._repr_html_()
assert text.startswith("<pre>")
assert "<xarray.DataArray 'var1'" in text
def test_display_dataarray_style_html(self) -> None:
da = create_test_dataarray_attrs()
with xarray.set_options(display_style="html"):
html = da._repr_html_()
assert html.startswith("<div>")
assert "#x27;nested'" in html
@pytest.mark.parametrize(
"set_value",
[("left"), ("exact")],
)
def test_get_options_retention(set_value):
"""Test to check if get_options will return changes made by set_options"""
with xarray.set_options(arithmetic_join=set_value):
get_options = xarray.get_options()
assert get_options["arithmetic_join"] == set_value
| TestAttrRetention |
python | apache__airflow | airflow-core/src/airflow/stats.py | {
"start": 1140,
"end": 2985
} | class ____(type):
factory: Callable
instance: StatsLogger | NoStatsLogger | None = None
def __getattr__(cls, name: str) -> str:
if not cls.instance:
try:
cls.instance = cls.factory()
except (socket.gaierror, ImportError) as e:
log.error("Could not configure StatsClient: %s, using NoStatsLogger instead.", e)
cls.instance = NoStatsLogger()
return getattr(cls.instance, name)
def __init__(cls, *args, **kwargs) -> None:
super().__init__(cls)
if not hasattr(cls.__class__, "factory"):
is_datadog_enabled_defined = conf.has_option("metrics", "statsd_datadog_enabled")
if is_datadog_enabled_defined and conf.getboolean("metrics", "statsd_datadog_enabled"):
from airflow.metrics import datadog_logger
cls.__class__.factory = datadog_logger.get_dogstatsd_logger
elif conf.getboolean("metrics", "statsd_on"):
from airflow.metrics import statsd_logger
cls.__class__.factory = statsd_logger.get_statsd_logger
elif conf.getboolean("metrics", "otel_on"):
from airflow.metrics import otel_logger
cls.__class__.factory = otel_logger.get_otel_logger
else:
cls.__class__.factory = NoStatsLogger
@classmethod
def get_constant_tags(cls) -> list[str]:
"""Get constant DataDog tags to add to all stats."""
tags_in_string = conf.get("metrics", "statsd_datadog_tags", fallback=None)
if not tags_in_string:
return []
return tags_in_string.split(",")
if TYPE_CHECKING:
Stats: StatsLogger
else:
class Stats(metaclass=_Stats):
"""Empty class for Stats - we use metaclass to inject the right one."""
| _Stats |
python | modin-project__modin | modin/core/dataframe/pandas/interchange/dataframe_protocol/buffer.py | {
"start": 1506,
"end": 4227
} | class ____(ProtocolBuffer):
"""
Data in the buffer is guaranteed to be contiguous in memory.
Note that there is no dtype attribute present, a buffer can be thought of
as simply a block of memory. However, if the column that the buffer is
attached to has a dtype that's supported by DLPack and ``__dlpack__`` is
implemented, then that dtype information will be contained in the return
value from ``__dlpack__``.
This distinction is useful to support both (a) data exchange via DLPack on a
buffer and (b) dtypes like variable-length strings which do not have a
fixed number of bytes per element.
Parameters
----------
x : np.ndarray
Data to be held by ``Buffer``.
allow_copy : bool, default: True
A keyword that defines whether or not the library is allowed
to make a copy of the data. For example, copying data would be necessary
if a library supports strided buffers, given that this protocol
specifies contiguous buffers. Currently, if the flag is set to ``False``
and a copy is needed, a ``RuntimeError`` will be raised.
"""
def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None:
if not x.strides == (x.dtype.itemsize,):
# The protocol does not support strided buffers, so a copy is
# necessary. If that's not allowed, we need to raise an exception.
if allow_copy:
x = x.copy()
else:
raise RuntimeError(
"Exports cannot be zero-copy in the case "
+ "of a non-contiguous buffer"
)
# Store the numpy array in which the data resides as a private
# attribute, so we can use it to retrieve the public attributes
self._x = x
@property
def bufsize(self) -> int:
return self._x.size * self._x.dtype.itemsize
@property
def ptr(self) -> int:
return self._x.__array_interface__["data"][0]
def __dlpack__(self):
raise NotImplementedError("__dlpack__")
def __dlpack_device__(self) -> Tuple[enum.IntEnum, int]:
class Device(enum.IntEnum):
CPU = 1
return (Device.CPU, None)
def __repr__(self) -> str:
"""
Return a string representation for a particular ``PandasProtocolBuffer``.
Returns
-------
str
"""
return (
"Buffer("
+ str(
{
"bufsize": self.bufsize,
"ptr": self.ptr,
"device": self.__dlpack_device__()[0].name,
}
)
+ ")"
)
| PandasProtocolBuffer |
python | spack__spack | lib/spack/spack/solver/input_analysis.py | {
"start": 16107,
"end": 19109
} | class ____(NoDuplicatesCounter):
def __init__(
self,
specs: List[spack.spec.Spec],
tests: spack.concretize.TestsType,
possible_graph: PossibleDependencyGraph,
) -> None:
super().__init__(specs, tests, possible_graph)
self._link_run: Set[str] = set()
self._direct_build: Set[str] = set()
self._total_build: Set[str] = set()
self._link_run_virtuals: Set[str] = set()
def _compute_cache_values(self) -> None:
self._link_run, virtuals, _ = self.possible_graph.possible_dependencies(
*self.specs, allowed_deps=self.link_run_types
)
self._possible_virtuals.update(virtuals)
self._link_run_virtuals.update(virtuals)
for x in self._link_run:
reals, virtuals, _ = self.possible_graph.possible_dependencies(
x, allowed_deps=dt.BUILD, transitive=False, strict_depflag=True
)
self._possible_virtuals.update(virtuals)
self._direct_build.update(reals)
self._total_build, virtuals, _ = self.possible_graph.possible_dependencies(
*self._direct_build, allowed_deps=self.all_types
)
self._possible_virtuals.update(virtuals)
self._possible_dependencies = set(self._link_run) | set(self._total_build)
def possible_packages_facts(self, gen, fn):
build_tools = set()
for current_tag in ("build-tools", "compiler"):
build_tools.update(spack.repo.PATH.packages_with_tags(current_tag))
gen.h2("Packages with at most a single node")
for package_name in sorted(self.possible_dependencies() - build_tools):
gen.fact(fn.max_dupes(package_name, 1))
gen.newline()
gen.h2("Packages with multiple possible nodes (build-tools)")
default = spack.config.CONFIG.get("concretizer:duplicates:max_dupes:default", 2)
duplicates = spack.config.CONFIG.get("concretizer:duplicates:max_dupes", {})
for package_name in sorted(self.possible_dependencies() & build_tools):
max_dupes = duplicates.get(package_name, default)
gen.fact(fn.max_dupes(package_name, max_dupes))
if max_dupes > 1:
gen.fact(fn.multiple_unification_sets(package_name))
gen.newline()
gen.h2("Maximum number of nodes (link-run virtuals)")
for package_name in sorted(self._link_run_virtuals):
gen.fact(fn.max_dupes(package_name, 1))
gen.newline()
gen.h2("Maximum number of nodes (other virtuals)")
for package_name in sorted(self.possible_virtuals() - self._link_run_virtuals):
max_dupes = duplicates.get(package_name, default)
gen.fact(fn.max_dupes(package_name, max_dupes))
gen.newline()
gen.h2("Possible package in link-run subDAG")
for name in sorted(self._link_run):
gen.fact(fn.possible_in_link_run(name))
gen.newline()
| MinimalDuplicatesCounter |
python | sphinx-doc__sphinx | sphinx/transforms/__init__.py | {
"start": 8164,
"end": 8982
} | class ____(SphinxTransform):
"""Detect old style (4 column based indices) and automatically upgrade to new style."""
default_priority = 210
def apply(self, **kwargs: Any) -> None:
for node in self.document.findall(addnodes.index):
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
msg = (
__(
'4 column based index found. '
'It might be a bug of extensions you use: %r'
)
% node['entries']
)
logger.warning(msg, location=node)
for i, entry in enumerate(node['entries']):
if len(entry) == 4:
node['entries'][i] = (*entry, None)
| AutoIndexUpgrader |
python | huggingface__transformers | src/transformers/models/mimi/modeling_mimi.py | {
"start": 63079,
"end": 79671
} | class ____(MimiPreTrainedModel):
def __init__(self, config: MimiConfig):
super().__init__(config)
self.config = config
self.encoder = MimiEncoder(config)
self.encoder_transformer = MimiTransformerModel(config)
self.downsample = None
self.upsample = None
if config.frame_rate != config.encodec_frame_rate:
self.downsample = MimiConv1d(
config,
config.hidden_size,
config.hidden_size,
kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate),
stride=2,
bias=False,
pad_mode="replicate",
layer_idx=len(self.encoder._mimiconv1d_layer_names),
)
self.upsample = MimiConvTranspose1d(
config,
config.hidden_size,
config.hidden_size,
kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate),
stride=2,
bias=False,
groups=config.upsample_groups,
)
self.decoder_transformer = MimiTransformerModel(config)
self.decoder = MimiDecoder(config)
self.quantizer = MimiSplitResidualVectorQuantizer(config)
self.bits_per_codebook = int(math.log2(self.config.codebook_size))
if 2**self.bits_per_codebook != self.config.codebook_size:
raise ValueError("The codebook_size must be a power of 2.")
# Initialize weights and apply final processing
self.post_init()
def _encode_frame(
self,
input_values: torch.Tensor,
num_quantizers: int,
padding_mask: int,
past_key_values: Optional[Cache] = None,
padding_cache: Optional[MimiConv1dPaddingCache] = None,
return_dict: Optional[bool] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Encodes the given input using the underlying VQVAE. The padding mask is required to compute the correct scale.
"""
# TODO: @eustlb, let's make the encoder support padding_mask so that batched inputs are supported.
embeddings = self.encoder(input_values, padding_cache=padding_cache)
# TODO: @eustlb, convert the padding mask to attention mask.
encoder_outputs = self.encoder_transformer(
embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict
)
if return_dict:
past_key_values = encoder_outputs.get("past_key_values")
elif len(encoder_outputs) > 1:
past_key_values = encoder_outputs[1]
embeddings = encoder_outputs[0].transpose(1, 2)
embeddings = self.downsample(embeddings, padding_cache=padding_cache)
codes = self.quantizer.encode(embeddings, num_quantizers)
codes = codes.transpose(0, 1)
return codes, past_key_values, padding_cache
def get_encoded_length(self, input_length: torch.LongTensor) -> torch.LongTensor:
"""
Return the number of frames of the encoded audio waveform.
"""
output_length = input_length
# encoder
for layer_name in self.encoder._mimiconv1d_layer_names:
output_length = self.encoder.get_submodule(layer_name)._get_output_length(output_length)
# downsample
output_length = self.downsample._get_output_length(output_length)
return output_length
def get_audio_codes_mask(self, padding_mask: torch.Tensor, padding_side: str = "right"):
"""
Get the mask for the audio codes from the original padding mask.
"""
encoded_lengths = self.get_encoded_length(padding_mask.sum(dim=-1))
audio_codes_mask = torch.arange(encoded_lengths.max(), device=encoded_lengths.device).expand(
len(encoded_lengths), -1
)
audio_codes_mask = audio_codes_mask < encoded_lengths.unsqueeze(1)
audio_codes_mask = audio_codes_mask.to(padding_mask.device)
if padding_side == "right":
return audio_codes_mask
else:
return audio_codes_mask.flip(dims=[-1])
def encode(
self,
input_values: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
num_quantizers: Optional[float] = None,
encoder_past_key_values: Optional[Cache] = None,
padding_cache: Optional[MimiConv1dPaddingCache] = None,
use_streaming: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor, Optional[torch.Tensor]], MimiEncoderOutput]:
"""
Encodes the input audio waveform into discrete codes.
Args:
input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Float values of the input audio waveform.
padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*.
num_quantizers (`int`, *optional*):
Number of quantizers (i.e codebooks) to use. By default, all quantizers are used.
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
`codebook` of shape `[batch_size, num_codebooks, frames]`, the discrete encoded codes for the input audio waveform.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_streaming = use_streaming if use_streaming is not None else self.config.use_streaming
num_quantizers = self.config.num_quantizers if num_quantizers is None else num_quantizers
if num_quantizers > self.config.num_quantizers:
raise ValueError(
f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.config.num_quantizers}, but is currently {num_quantizers}."
)
_, channels, input_length = input_values.shape
if channels < 1 or channels > 2:
raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
if padding_mask is None:
padding_mask = torch.ones_like(input_values).bool()
if use_streaming and padding_cache is None:
per_layer_padding, per_layer_padding_mode, per_layer_in_channels = [], [], []
for layer_name in self.encoder._mimiconv1d_layer_names:
per_layer_padding.append(self.encoder.get_submodule(layer_name).padding_total)
per_layer_padding_mode.append(self.encoder.get_submodule(layer_name).pad_mode)
per_layer_in_channels.append(self.encoder.get_submodule(layer_name).in_channels)
# downsample layer
per_layer_padding.append(self.downsample.padding_total)
per_layer_padding_mode.append(self.downsample.pad_mode)
per_layer_in_channels.append(self.downsample.in_channels)
padding_cache = MimiConv1dPaddingCache(
num_layers=len(self.encoder._mimiconv1d_layer_names) + 1,
per_layer_padding=per_layer_padding,
per_layer_padding_mode=per_layer_padding_mode,
per_layer_in_channels=per_layer_in_channels,
)
encoded_frames, encoder_past_key_values, padding_cache = self._encode_frame(
input_values,
num_quantizers,
padding_mask.bool(),
past_key_values=encoder_past_key_values,
padding_cache=padding_cache,
return_dict=return_dict,
)
if not return_dict:
return (
encoded_frames,
encoder_past_key_values,
padding_cache,
)
return MimiEncoderOutput(encoded_frames, encoder_past_key_values, padding_cache)
def _decode_frame(
self,
codes: torch.Tensor,
past_key_values: Optional[Cache] = None,
return_dict: Optional[bool] = None,
) -> torch.Tensor:
embeddings = self.quantizer.decode(codes)
embeddings = self.upsample(embeddings)
decoder_outputs = self.decoder_transformer(
embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict
)
if return_dict:
past_key_values = decoder_outputs.get("past_key_values")
elif len(decoder_outputs) > 1:
past_key_values = decoder_outputs[1]
embeddings = decoder_outputs[0].transpose(1, 2)
outputs = self.decoder(embeddings)
return outputs, past_key_values
def decode(
self,
audio_codes: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
decoder_past_key_values: Optional[Cache] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor, torch.Tensor], MimiDecoderOutput]:
"""
Decodes the given frames into an output audio waveform.
Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
trimmed.
Args:
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discret code embeddings computed using `model.encode`.
padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*.
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
audio_values, decoder_past_key_values = self._decode_frame(
audio_codes, past_key_values=decoder_past_key_values, return_dict=return_dict
)
# truncate based on padding mask
if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
audio_values = audio_values[..., : padding_mask.shape[-1]]
if not return_dict:
return (
audio_values,
decoder_past_key_values,
)
return MimiDecoderOutput(audio_values, decoder_past_key_values)
@auto_docstring
def forward(
self,
input_values: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
num_quantizers: Optional[int] = None,
audio_codes: Optional[torch.Tensor] = None,
encoder_past_key_values: Optional[Cache] = None,
decoder_past_key_values: Optional[Cache] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor, torch.Tensor], MimiOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
Raw audio input converted to Float.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*.
num_quantizers (`int`, *optional*):
Number of quantizers (i.e codebooks) to use. By default, all quantizers are used.
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discret code embeddings computed using `model.encode`.
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
Examples:
```python
>>> from datasets import load_dataset
>>> from transformers import AutoFeatureExtractor, MimiModel
>>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
>>> audio_sample = dataset["train"]["audio"][0]["array"]
>>> model_id = "kyutai/mimi"
>>> model = MimiModel.from_pretrained(model_id)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
>>> inputs = feature_extractor(raw_audio=audio_sample, return_tensors="pt")
>>> outputs = model(**inputs)
>>> audio_codes = outputs.audio_codes
>>> audio_values = outputs.audio_values
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if padding_mask is None:
padding_mask = torch.ones_like(input_values).bool()
if audio_codes is None:
encoder_outputs = self.encode(
input_values, padding_mask, num_quantizers, encoder_past_key_values, return_dict=return_dict
)
audio_codes = encoder_outputs[0]
if return_dict:
encoder_past_key_values = encoder_outputs.get("past_key_values")
elif len(encoder_outputs) > 1:
encoder_past_key_values = encoder_outputs[1]
decoder_outputs = self.decode(audio_codes, padding_mask, decoder_past_key_values, return_dict=return_dict)
audio_values = decoder_outputs[0]
if return_dict:
decoder_past_key_values = decoder_outputs.get("past_key_values")
elif len(decoder_outputs) > 1:
decoder_past_key_values = decoder_outputs[1]
if not return_dict:
return (audio_codes, audio_values, encoder_past_key_values, decoder_past_key_values)
return MimiOutput(
audio_codes=audio_codes,
audio_values=audio_values,
encoder_past_key_values=encoder_past_key_values,
decoder_past_key_values=decoder_past_key_values,
)
__all__ = ["MimiModel", "MimiPreTrainedModel"]
| MimiModel |
python | graphql-python__graphene | graphene/types/scalars.py | {
"start": 3041,
"end": 3656
} | class ____(Scalar):
"""
The `Float` scalar type represents signed double-precision fractional
values as specified by
[IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).
"""
@staticmethod
def coerce_float(value: Any) -> float:
try:
return float(value)
except ValueError:
return Undefined
serialize = coerce_float
parse_value = coerce_float
@staticmethod
def parse_literal(ast, _variables=None):
if isinstance(ast, (FloatValueNode, IntValueNode)):
return float(ast.value)
return Undefined
| Float |
python | django__django | tests/sitemaps_tests/base.py | {
"start": 358,
"end": 1103
} | class ____(TestCase):
protocol = "http"
sites_installed = apps.is_installed("django.contrib.sites")
domain = "example.com" if sites_installed else "testserver"
@classmethod
def setUpTestData(cls):
# Create an object for sitemap content.
TestModel.objects.create(name="Test Object")
cls.i18n_model = I18nTestModel.objects.create(name="Test Object")
def setUp(self):
self.base_url = "%s://%s" % (self.protocol, self.domain)
cache.clear()
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
| SitemapTestsBase |
python | skorch-dev__skorch | skorch/probabilistic.py | {
"start": 13372,
"end": 19020
} | class ____(RegressorMixin):
"""Mixin class that provides a predict method for GP regressors."""
def predict(self, X, return_std=False, return_cov=False):
"""Returns the predicted mean and optionally standard deviation.
Parameters
----------
X : input data
Input data where the GP is evaluated.
return_std : bool (default=False)
If True, the standard-deviation of the predictive distribution at the
query points is returned along with the mean.
return_cov : bool (default=False)
This exists solely for sklearn compatibility and is not supported by
skorch.
Returns
-------
y_pred : numpy ndarray
Mean of predictive distribution at the query points.
y_std : numpy ndarray
Standard deviation of predictive distribution at query points. Only
returned when ``return_std`` is True.
"""
if return_cov:
msg = ("The 'return_cov' argument is not supported. Please try: "
"'posterior = next(gpr.forward_iter(X)); "
"posterior.covariance_matrix'.")
raise NotImplementedError(msg)
if return_std:
return self._predict_with_std(X)
return self._predict(X)
def _predict_with_std(self, X):
nonlin = self._get_predict_nonlinearity()
y_preds, y_stds = [], []
for yi in self.forward_iter(X, training=False):
posterior = yi[0] if isinstance(yi, tuple) else yi
y_preds.append(to_numpy(nonlin(posterior.mean)))
y_stds.append(to_numpy(nonlin(posterior.stddev)))
y_pred = np.concatenate(y_preds, 0)
y_std = np.concatenate(y_stds, 0)
return y_pred, y_std
def _predict(self, X):
# When return_std is False, turn on skip_posterior_variances -- this
# avoids doing the math for the posterior variances altogether, which
# will save a great deal of compute.
nonlin = self._get_predict_nonlinearity()
y_preds = []
with gpytorch.settings.skip_posterior_variances():
for yi in self.forward_iter(X, training=False):
posterior = yi[0] if isinstance(yi, tuple) else yi
y_preds.append(to_numpy(nonlin(posterior.mean)))
y_pred = np.concatenate(y_preds, 0)
return y_pred
exact_gp_regr_doc_start = """Exact Gaussian Process regressor
Use this specifically if you want to perform an exact solution to the
Gaussian Process. This implies that the module should by a
:class:`~gpytorch.models.ExactGP` module and you cannot use batching (i.e.
batch size should be -1).
"""
exact_gp_regr_module_text = """
Module : gpytorch.models.ExactGP (class or instance)
The module needs to return a
:class:`~gpytorch.distributions.MultivariateNormal` distribution.
"""
exact_gp_regr_criterion_text = """
likelihood : gpytorch.likelihoods.GaussianLikelihood (class or instance)
The likelihood used for the exact GP regressor. Usually doesn't need to be
changed.
criterion : gpytorch.mlls.ExactMarginalLogLikelihood
The objective function to learn the posterior of of the GP regressor.
Usually doesn't need to be changed.
"""
exact_gp_regr_batch_size_text = """
batch_size : int (default=-1)
Mini-batch size. For exact GPs, it must be set to -1, since the exact
solution cannot deal with batching. To make use of batching, use
:class:`.GPRegressor` in conjunction with a variational strategy.
"""
# this is the same text for exact and approximate GP regression
gp_regr_train_split_text = """
train_split : None or callable (default=None)
If None, there is no train/validation split. Else, train_split should be a
function or callable that is called with X and y data and should return
the tuple ``dataset_train, dataset_valid``. The validation data may be
None. There is no default train split for GP regressors because random
splitting is typically not desired, e.g. because there is a temporal
relationship between samples.
"""
# this is the same text for all GPs
gp_likelihood_attribute_text = """
likelihood_: torch module (instance)
The instantiated likelihood.
"""
def get_exact_gp_regr_doc(doc):
"""Customizes the net docs to avoid duplication."""
# dedent/indent roundtrip required for consistent indention in both
# Python <3.13 and Python >=3.13
# Because <3.13 => no automatic dedent, but it is the case in >=3.13
indentation = " "
doc = textwrap.indent(textwrap.dedent(doc.split("\n", 5)[-1]), indentation)
params_start_idx = doc.find(' Parameters\n ----------')
doc = doc[params_start_idx:]
doc = exact_gp_regr_doc_start + doc
pattern = re.compile(r'(\n\s+)(module .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + exact_gp_regr_module_text + doc[end:]
pattern = re.compile(r'(\n\s+)(criterion .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + exact_gp_regr_criterion_text + doc[end:]
pattern = re.compile(r'(\n\s+)(batch_size .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + exact_gp_regr_batch_size_text + doc[end:]
pattern = re.compile(r'(\n\s+)(train_split .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + gp_regr_train_split_text + doc[end:]
doc = doc + gp_likelihood_attribute_text
return doc
| _GPRegressorPredictMixin |
python | pytorch__pytorch | torch/_inductor/autotune_process.py | {
"start": 22330,
"end": 27472
} | class ____(GPUDeviceBenchmarkMixin, BenchmarkRequest):
"""
A class to handle CUDA (CUTLASS) benchmark requests. This class is for
managing the lifecycle of a CUDA kernel benchmark, including compiling
the source code, managing workspace memory, and executing the kernel.
Important: Instances of this class have to be serializable across
process boundaries. Do not put CUDA Tensors in here!
"""
def __init__(
self,
kernel_name: str,
input_tensor_meta: Union[TensorMeta, list[TensorMeta]],
output_tensor_meta: Union[TensorMeta, list[TensorMeta]],
extra_args: Iterable[Any],
source_code: str,
) -> None:
super().__init__(kernel_name, input_tensor_meta, output_tensor_meta, extra_args)
self.source_code = source_code
self.workspace_size: int = 0
self.workspace: Optional[torch.Tensor] = None
self.DLL: Optional[DLLWrapper] = None
self._workspace_size_updated = False
self.hash_key: str = ""
self.source_file: str = ""
self.hash_key, self.source_file = CUDACodeCache.write(self.source_code, "so")
def precompile(self):
"""
Precompile the CUDA source code to populate the CUDACodeCache.
This may happen in a separate thread pool.
"""
autotuning_log.debug("Precompiling %s", self)
CUDACodeCache.compile(self.source_code, "so")
autotuning_log.debug("Done precompiling %s", self)
def make_run_fn(
self, *input_tensors: torch.Tensor, out: torch.Tensor
) -> Callable[[], None]:
"""
Create a function to run the CUDA kernel with the given input and output tensors.
"""
self.ensure_dll_loaded()
self.update_workspace_size()
args = [c_void_p(tensor.data_ptr()) for tensor in list(input_tensors) + [out]]
autotuning_log.debug(
"make_run_fn: self.kernel_name=%s, self.source_file=%s, self.hash_key=%s, self.DLL=%s, args=%s, self.extra_args=%s",
self.kernel_name,
self.source_file,
self.hash_key,
self.DLL,
args,
self.extra_args,
)
stream_ptr = c_void_p(torch.cuda.current_stream().cuda_stream)
run_method = getattr(self.DLL, self.kernel_name)
workspace_ptr = c_void_p(0)
if self.workspace_size > 0:
self.workspace = torch.zeros(
(self.workspace_size + 7) // 8,
dtype=torch.float64,
device=out.device,
)
workspace_ptr = c_void_p(self.workspace.data_ptr())
# Generate partial function.
ret = functools.partial(
run_method,
*args,
*self.extra_args,
None, # null workspace size ptr
workspace_ptr, # set workspace ptr,
stream_ptr,
)
# sanity check to make sure we cleanup run fn properly
try:
ret()
except RuntimeError as e:
err_msg = str(e)
def raise_runtime_error():
raise RuntimeError(err_msg)
self.cleanup_run_fn()
return raise_runtime_error
return ret
def update_workspace_size(self) -> None:
if self._workspace_size_updated:
return
self.ensure_dll_loaded()
unique_input_count = len(
dict.fromkeys(meta.name for meta in self.input_tensor_meta)
)
args = [c_void_p(None) for _ in range(unique_input_count + 1)]
stream_ptr = c_void_p(torch.cuda.current_stream().cuda_stream)
run_method = getattr(self.DLL, self.kernel_name)
# Retrieve workspace_size and initialize workspace.
c_workspace_size = c_size_t()
run_method(
*args, # input ptrs and output ptrs
*self.extra_args,
byref(
c_workspace_size
), # set workspace size ptr to retrieve workspace size
None, # null workspace ptr
stream_ptr,
)
torch.cuda.synchronize() # shake out any CUDA errors
self.workspace_size = c_workspace_size.value
autotuning_log.debug(
"update_workspace_size called: new workspace size=%d, self.kernel_name=%s, self.source_file=%s, self.hash_key=%s, self.DLL=%s, args=%s, self.extra_args=%s", # noqa: B950
self.workspace_size,
self.kernel_name,
self.source_file,
self.hash_key,
self.DLL,
args,
self.extra_args,
)
self._workspace_size_updated = True
def ensure_dll_loaded(self):
if self.DLL is None:
self.DLL, self.hash_key, self.source_file = CUDACodeCache.load(
self.source_code, "so"
)
def cleanup_run_fn(self) -> None:
if self.DLL is not None:
self.DLL.close()
self.DLL = None
self.workspace = None
def __str__(self) -> str:
return f"{self.kernel_name=}, {self.source_file=}, {self.hash_key=}"
| CUDABenchmarkRequest |
python | keras-team__keras | keras/src/ops/operation_test.py | {
"start": 1553,
"end": 1910
} | class ____(operation.Operation):
def __init__(self, alpha, beta=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
| OpWithKwargsInConstructor |
python | kamyu104__LeetCode-Solutions | Python/add-two-integers.py | {
"start": 36,
"end": 208
} | class ____(object):
def sum(self, num1, num2):
"""
:type num1: int
:type num2: int
:rtype: int
"""
return num1+num2
| Solution |
python | keon__algorithms | tests/test_map.py | {
"start": 4910,
"end": 5709
} | class ____(unittest.TestCase):
def test_longest_palindromic_subsequence_is_correct(self):
self.assertEqual(3, longest_palindromic_subsequence('BBABCBCAB'))
self.assertEqual(4, longest_palindromic_subsequence('abbaeae'))
self.assertEqual(7, longest_palindromic_subsequence('babbbababaa'))
self.assertEqual(4, longest_palindromic_subsequence('daccandeeja'))
def test_longest_palindromic_subsequence_is_incorrect(self):
self.assertNotEqual(4, longest_palindromic_subsequence('BBABCBCAB'))
self.assertNotEqual(5, longest_palindromic_subsequence('abbaeae'))
self.assertNotEqual(2, longest_palindromic_subsequence('babbbababaa'))
self.assertNotEqual(1, longest_palindromic_subsequence('daccandeeja'))
| TestLongestPalindromicSubsequence |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 106525,
"end": 106920
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("organization_id", "client_mutation_id")
organization_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="organizationId"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| FollowOrganizationInput |
python | django__django | tests/admin_inlines/models.py | {
"start": 6983,
"end": 7141
} | class ____(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
| ParentModelWithCustomPk |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/training_utils_v1.py | {
"start": 10897,
"end": 12866
} | class ____(Aggregator):
"""Combine tensor-likes which cannot be merged on the fly.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
"""
def __init__(self, batch_size):
self.composite = None
super(ConcatAggregator, self).__init__(
use_steps=True, num_samples=None, steps=None, batch_size=batch_size)
def create(self, batch_element):
self.composite = is_composite_or_composite_value(batch_element)
def aggregate(self, batch_element, batch_start=None, batch_end=None):
# TODO(psv): Add num_samples check here to detect when output batch
# #samples is < batch size and != input batch #samples.
if self.batch_size and self.batch_size < batch_element.shape[0]:
raise ValueError(
'Mismatch between expected batch size and model output batch size. '
'Output shape = {}, expected output shape = shape {}'.format(
batch_element.shape,
(self.batch_size,) + batch_element.shape[1:]))
self.results.append(batch_element)
def finalize(self):
# Special case of single batch inference which skips a copy.
if len(self.results) == 1:
self.results = self.results[0]
elif self.composite:
# TODO(taylorrobie): efficiently concatenate.
results = self.results[0]
for r in self.results[1:]:
results = _append_composite_tensor(results, r)
self.results = results
else:
self.results = np.concatenate(self.results, axis=0)
_COPY_THREADS = 4
_COPY_POOL = None
def get_copy_pool():
"""Shared threadpool for copying arrays.
Pool instantiation takes ~ 2ms, so a singleton pool is used rather than
creating a pool per SliceAggregator.
Returns:
The global copy threadpool.
"""
global _COPY_POOL
if _COPY_POOL is None:
_COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)
atexit.register(_COPY_POOL.close)
return _COPY_POOL
| ConcatAggregator |
python | huggingface__transformers | tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py | {
"start": 3381,
"end": 12292
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = MobileNetV2ImageProcessor if is_vision_available() else None
fast_image_processing_class = MobileNetV2ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = MobileNetV2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_center_crop"))
self.assertTrue(hasattr(image_processor, "crop_size"))
self.assertTrue(hasattr(image_processor, "do_reduce_labels"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
self.assertEqual(image_processor.do_reduce_labels, False)
image_processor = image_processing_class.from_dict(
self.image_processor_dict, size=42, crop_size=84, do_reduce_labels=True
)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
self.assertEqual(image_processor.do_reduce_labels, True)
def test_call_segmentation_maps(self):
# Initialize image_processing
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processing(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processing(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processing(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_reduce_labels(self):
# Initialize image_processing
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
image, map = prepare_semantic_single_inputs()
encoding = image_processing(image, map, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 150)
image_processing.do_reduce_labels = True
encoding = image_processing(image, map, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
# Test with single image
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
# Test with single image and segmentation map
image, segmentation_map = prepare_semantic_single_inputs()
encoding_slow = image_processor_slow(image, segmentation_map, return_tensors="pt")
encoding_fast = image_processor_fast(image, segmentation_map, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
torch.testing.assert_close(encoding_slow.labels, encoding_fast.labels, atol=1e-1, rtol=1e-3)
| MobileNetV2ImageProcessingTest |
python | PyCQA__pylint | tests/functional/n/no/no_member_augassign.py | {
"start": 386,
"end": 501
} | class ____:
value: int
obj_c = C()
obj_c.value += 1 # [no-member]
obj_c.value = 1 + obj_c.value # [no-member]
| C |
python | huggingface__transformers | tests/models/tapas/test_modeling_tapas.py | {
"start": 15848,
"end": 22032
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TapasModel,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
)
if is_torch_available()
else None
)
pipeline_model_mapping = (
{
"feature-extraction": TapasModel,
"fill-mask": TapasForMaskedLM,
"table-question-answering": TapasForQuestionAnswering,
"text-classification": TapasForSequenceClassification,
"zero-shot": TapasForSequenceClassification,
}
if is_torch_available()
else {}
)
test_resize_embeddings = True
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict = {
k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
if isinstance(v, torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
elif model_class in get_values(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["aggregation_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["numeric_values"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),
dtype=torch.float,
device=torch_device,
)
inputs_dict["numeric_values_scale"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),
dtype=torch.float,
device=torch_device,
)
inputs_dict["float_answer"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.float, device=torch_device
)
elif model_class in [
*get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
*get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING),
]:
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in [
*get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
*get_values(MODEL_FOR_CAUSAL_LM_MAPPING),
*get_values(MODEL_FOR_MASKED_LM_MAPPING),
*get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
]:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
def setUp(self):
self.model_tester = TapasModelTester(self)
self.config_tester = ConfigTester(self, config_class=TapasConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def prepare_tapas_single_inputs_for_inference():
# Here we prepare a single table-question pair to test TAPAS inference on:
data = {
"Footballer": ["Lionel Messi", "Cristiano Ronaldo"],
"Age": ["33", "35"],
}
queries = "Which footballer is 33 years old?"
table = pd.DataFrame.from_dict(data)
return table, queries
def prepare_tapas_batch_inputs_for_inference():
# Here we prepare a batch of 2 table-question pairs to test TAPAS inference on:
data = {
"Footballer": ["Lionel Messi", "Cristiano Ronaldo"],
"Age": ["33", "35"],
"Number of goals": ["712", "750"],
}
queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"]
table = pd.DataFrame.from_dict(data)
return table, queries
def prepare_tapas_batch_inputs_for_training():
# Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on:
data = {
"Footballer": ["Lionel Messi", "Cristiano Ronaldo"],
"Age": ["33", "35"],
"Number of goals": ["712", "750"],
}
queries = ["Which footballer is 33 years old?", "What's the total number of goals?"]
table = pd.DataFrame.from_dict(data)
answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]]
answer_text = [["Lionel Messi"], ["1462"]]
float_answer = [float("NaN"), float("1462")]
return table, queries, answer_coordinates, answer_text, float_answer
@require_torch
| TapasModelTest |
python | python-poetry__poetry | src/poetry/repositories/repository_pool.py | {
"start": 966,
"end": 6110
} | class ____(AbstractRepository):
def __init__(
self,
repositories: list[Repository] | None = None,
*,
config: Config | None = None,
) -> None:
super().__init__("poetry-repository-pool")
self._repositories: OrderedDict[str, PrioritizedRepository] = OrderedDict()
if repositories is None:
repositories = []
for repository in repositories:
self.add_repository(repository)
self._artifact_cache = ArtifactCache(
cache_dir=(config or Config.create()).artifacts_cache_directory
)
@staticmethod
def from_packages(packages: list[Package], config: Config | None) -> RepositoryPool:
pool = RepositoryPool(config=config)
for package in packages:
if package.is_direct_origin():
continue
repo_name = package.source_reference or "PyPI"
try:
repo = pool.repository(repo_name)
except IndexError:
repo = Repository(repo_name)
pool.add_repository(repo)
if not repo.has_package(package):
repo.add_package(package)
return pool
@property
def repositories(self) -> list[Repository]:
"""
Returns the repositories in the pool,
in the order they will be searched for packages.
ATTENTION: For backwards compatibility and practical reasons,
repositories with priority EXPLICIT are NOT included,
because they will not be searched.
"""
sorted_repositories = self._sorted_repositories
return [
prio_repo.repository
for prio_repo in sorted_repositories
if prio_repo.priority is not Priority.EXPLICIT
]
@property
def all_repositories(self) -> list[Repository]:
return [prio_repo.repository for prio_repo in self._sorted_repositories]
@property
def _sorted_repositories(self) -> list[PrioritizedRepository]:
return sorted(
self._repositories.values(), key=lambda prio_repo: prio_repo.priority
)
@property
def artifact_cache(self) -> ArtifactCache:
return self._artifact_cache
def has_primary_repositories(self) -> bool:
return self._contains_priority(Priority.PRIMARY)
def _contains_priority(self, priority: Priority) -> bool:
return any(
prio_repo.priority is priority for prio_repo in self._repositories.values()
)
def has_repository(self, name: str) -> bool:
return name.lower() in self._repositories
def repository(self, name: str) -> Repository:
return self._get_prioritized_repository(name).repository
def get_priority(self, name: str) -> Priority:
return self._get_prioritized_repository(name).priority
def _get_prioritized_repository(self, name: str) -> PrioritizedRepository:
name = name.lower()
if self.has_repository(name):
return self._repositories[name]
raise IndexError(f'Repository "{name}" does not exist.')
def add_repository(
self, repository: Repository, *, priority: Priority = Priority.PRIMARY
) -> RepositoryPool:
"""
Adds a repository to the pool.
"""
repository_name = repository.name.lower()
if self.has_repository(repository_name):
raise ValueError(
f"A repository with name {repository_name} was already added."
)
self._repositories[repository_name] = PrioritizedRepository(
repository, priority
)
return self
def remove_repository(self, name: str) -> RepositoryPool:
if not self.has_repository(name):
raise IndexError(
f"RepositoryPool can not remove unknown repository '{name}'."
)
del self._repositories[name.lower()]
return self
def package(
self, name: str, version: Version, repository_name: str | None = None
) -> Package:
if repository_name:
return self.repository(repository_name).package(name, version)
for repo in self.repositories:
try:
return repo.package(name, version)
except PackageNotFoundError:
continue
raise PackageNotFoundError(f"Package {name} ({version}) not found.")
def find_packages(self, dependency: Dependency) -> list[Package]:
repository_name = dependency.source_name
if repository_name:
return self.repository(repository_name).find_packages(dependency)
packages: list[Package] = []
for repo in self.repositories:
if packages and self.get_priority(repo.name) is Priority.SUPPLEMENTAL:
break
packages += repo.find_packages(dependency)
return packages
def search(self, query: str | list[str]) -> list[Package]:
results: list[Package] = []
for repo in self.repositories:
results += repo.search(query)
return results
| RepositoryPool |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/credentials.py | {
"start": 234,
"end": 2439
} | class ____(Block):
"""
Block used to manage Databricks authentication.
Attributes:
databricks_instance:
Databricks instance used in formatting the endpoint URL.
token: The token to authenticate with Databricks.
client_kwargs: Additional keyword arguments to pass to AsyncClient.
Examples:
Load stored Databricks credentials:
```python
from prefect_databricks import DatabricksCredentials
databricks_credentials_block = DatabricksCredentials.load("BLOCK_NAME")
```
"""
_block_type_name = "Databricks Credentials"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5GTHI1PH2dTiantfps6Fnc/1c750fab7f4c14ea1b93a62b9fea6a94/databricks_logo_icon_170295.png?h=250" # noqa
databricks_instance: str = Field(
default=...,
description="Databricks instance used in formatting the endpoint URL.",
)
token: SecretStr = Field(
default=..., description="The token to authenticate with Databricks."
)
client_kwargs: Optional[Dict[str, Any]] = Field(
default=None, description="Additional keyword arguments to pass to AsyncClient."
)
def get_client(self) -> AsyncClient:
"""
Gets an Databricks REST AsyncClient.
Returns:
An Databricks REST AsyncClient.
Example:
Gets a Databricks REST AsyncClient.
```python
from prefect import flow
from prefect_databricks import DatabricksCredentials
@flow
def example_get_client_flow():
token = "consumer_key"
databricks_credentials = DatabricksCredentials(token=token)
client = databricks_credentials.get_client()
return client
example_get_client_flow()
```
"""
base_url = f"https://{self.databricks_instance}/api/"
client_kwargs = self.client_kwargs or {}
client_kwargs["headers"] = {
"Authorization": f"Bearer {self.token.get_secret_value()}"
}
client = AsyncClient(base_url=base_url, **client_kwargs)
return client
| DatabricksCredentials |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/super_without_brackets.py | {
"start": 265,
"end": 431
} | class ____(Animal):
@staticmethod
def speak():
original_speak = super().speak() # OK
return f"{original_speak} But as a dog, it barks!"
| GoodDog |
python | walkccc__LeetCode | solutions/281. Zigzag Iterator/281.py | {
"start": 0,
"end": 357
} | class ____:
def __init__(self, v1: list[int], v2: list[int]):
def vals():
for i in itertools.count():
for v in v1, v2:
if i < len(v):
yield v[i]
self.vals = vals()
self.n = len(v1) + len(v2)
def next(self):
self.n -= 1
return next(self.vals)
def hasNext(self):
return self.n > 0
| ZigzagIterator |
python | google__pytype | pytype/abstract/_function_base.py | {
"start": 16977,
"end": 17926
} | class ____(_base.BaseValue):
"""Implements @classmethod methods in pyi."""
def __init__(
self,
name: str,
method: Function,
callself: "cfg.Variable",
ctx: "context.Context",
) -> None:
super().__init__(name, ctx)
self.cls = self.ctx.convert.function_type
self.method = method
self.method.is_attribute_of_class = True
# Rename to callcls to make clear that callself is the cls parameter.
self._callcls = callself
self.signatures = self.method.signatures
def call(
self,
node: "cfg.CFGNode",
func: "cfg.Binding",
args: function.Args,
alias_map: "datatypes.UnionFind | None" = None,
) -> "tuple[cfg.CFGNode, cfg.Variable]":
return self.method.call(
node, func, args.replace(posargs=(self._callcls,) + args.posargs)
)
def to_bound_function(self) -> BoundPyTDFunction:
return BoundPyTDFunction(self._callcls, self.method)
| ClassMethod |
python | RaRe-Technologies__gensim | gensim/test/test_similarities.py | {
"start": 29062,
"end": 32704
} | class ____(unittest.TestCase):
def setUp(self):
try:
import nmslib # noqa:F401
except ImportError as e:
raise unittest.SkipTest("NMSLIB library is not available: %s" % e)
from gensim.similarities.nmslib import NmslibIndexer
self.indexer = NmslibIndexer
def test_word2vec(self):
model = word2vec.Word2Vec(TEXTS, min_count=1)
index = self.indexer(model)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
@unittest.skipIf(sys.version_info[:2] == (3, 9), "Skip test on Python 3.9")
def test_fasttext(self):
class LeeReader:
def __init__(self, fn):
self.fn = fn
def __iter__(self):
with utils.open(self.fn, 'r', encoding="latin_1") as infile:
for line in infile:
yield line.lower().strip().split()
model = FastText(LeeReader(datapath('lee.cor')), bucket=5000)
index = self.indexer(model)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_indexing_keyedvectors(self):
from gensim.similarities.nmslib import NmslibIndexer
keyVectors_file = datapath('lee_fasttext.vec')
model = KeyedVectors.load_word2vec_format(keyVectors_file)
index = NmslibIndexer(model)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, model, index)
def test_load_missing_raises_error(self):
from gensim.similarities.nmslib import NmslibIndexer
self.assertRaises(IOError, NmslibIndexer.load, fname='test-index')
def assertVectorIsSimilarToItself(self, wv, index):
vector = wv.get_normed_vectors()[0]
label = wv.index_to_key[0]
approx_neighbors = index.most_similar(vector, 1)
word, similarity = approx_neighbors[0]
self.assertEqual(word, label)
self.assertAlmostEqual(similarity, 1.0, places=2)
def assertApproxNeighborsMatchExact(self, model, wv, index):
vector = wv.get_normed_vectors()[0]
approx_neighbors = model.most_similar([vector], topn=5, indexer=index)
exact_neighbors = model.most_similar([vector], topn=5)
approx_words = [word_id for word_id, similarity in approx_neighbors]
exact_words = [word_id for word_id, similarity in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def assertIndexSaved(self, index):
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def assertLoadedIndexEqual(self, index, model):
from gensim.similarities.nmslib import NmslibIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
index2 = NmslibIndexer.load(fname)
index2.model = model
self.assertEqual(index.labels, index2.labels)
self.assertEqual(index.index_params, index2.index_params)
self.assertEqual(index.query_time_params, index2.query_time_params)
@pytest.mark.skipif(
sys.version_info[:2] != (3, 9) or int(numpy.__version__.split('.')[0]) != 1,
reason="NMSLib works only with Python 3.9 and NumPy 1.x"
)
| TestWord2VecNmslibIndexer |
python | viewflow__viewflow | tests/workflow/test_nodes__join.py | {
"start": 1138,
"end": 1547
} | class ____(flow.Flow): # noqa: D101
start = flow.StartHandle().Next(this.split)
split = (
flow.Split()
.Next(this.first)
.Next(this.second)
)
first = flow.Function(this.func).Next(this.join)
second = flow.Function(this.func).Next(this.join)
join = flow.Join().Next(this.end)
end = flow.End()
def func(self, activation):
pass
| TestSyncWorkflow |
python | sympy__sympy | sympy/polys/numberfields/modules.py | {
"start": 23783,
"end": 27753
} | class ____(Module):
"""The module generated by the powers of an algebraic integer."""
def __init__(self, T):
"""
Parameters
==========
T : :py:class:`~.Poly`, :py:class:`~.AlgebraicField`
Either (1) the monic, irreducible, univariate polynomial over
:ref:`ZZ`, a root of which is the generator of the power basis,
or (2) an :py:class:`~.AlgebraicField` whose primitive element
is the generator of the power basis.
"""
K = None
if isinstance(T, AlgebraicField):
K, T = T, T.ext.minpoly_of_element()
# Sometimes incoming Polys are formally over QQ, although all their
# coeffs are integral. We want them to be formally over ZZ.
T = T.set_domain(ZZ)
self.K = K
self.T = T
self._n = T.degree()
self._mult_tab = None
@property
def number_field(self):
return self.K
def __repr__(self):
return f'PowerBasis({self.T.as_expr()})'
def __eq__(self, other):
if isinstance(other, PowerBasis):
return self.T == other.T
return NotImplemented
@property
def n(self):
return self._n
def mult_tab(self):
if self._mult_tab is None:
self.compute_mult_tab()
return self._mult_tab
def compute_mult_tab(self):
theta_pow = AlgIntPowers(self.T)
M = {}
n = self.n
for u in range(n):
M[u] = {}
for v in range(u, n):
M[u][v] = theta_pow[u + v]
self._mult_tab = M
def represent(self, elt):
r"""
Represent a module element as an integer-linear combination over the
generators of this module.
See Also
========
.Module.represent
.Submodule.represent
"""
if elt.module == self and elt.denom == 1:
return elt.column()
else:
raise ClosureFailure('Element not representable in ZZ[theta].')
def starts_with_unity(self):
return True
def element_from_rational(self, a):
return self(0) * a
def element_from_poly(self, f):
"""
Produce an element of this module, representing *f* after reduction mod
our defining minimal polynomial.
Parameters
==========
f : :py:class:`~.Poly` over :ref:`ZZ` in same var as our defining poly.
Returns
=======
:py:class:`~.PowerBasisElement`
"""
n, k = self.n, f.degree()
if k >= n:
f = f % self.T
if f == 0:
return self.zero()
d, c = dup_clear_denoms(f.rep.to_list(), QQ, convert=True)
c = list(reversed(c))
ell = len(c)
z = [ZZ(0)] * (n - ell)
col = to_col(c + z)
return self(col, denom=d)
def _element_from_rep_and_mod(self, rep, mod):
"""
Produce a PowerBasisElement representing a given algebraic number.
Parameters
==========
rep : list of coeffs
Represents the number as polynomial in the primitive element of the
field.
mod : list of coeffs
Represents the minimal polynomial of the primitive element of the
field.
Returns
=======
:py:class:`~.PowerBasisElement`
"""
if mod != self.T.rep.to_list():
raise UnificationFailed('Element does not appear to be in the same field.')
return self.element_from_poly(Poly(rep, self.T.gen))
def element_from_ANP(self, a):
"""Convert an ANP into a PowerBasisElement. """
return self._element_from_rep_and_mod(a.to_list(), a.mod_to_list())
def element_from_alg_num(self, a):
"""Convert an AlgebraicNumber into a PowerBasisElement. """
return self._element_from_rep_and_mod(a.rep.to_list(), a.minpoly.rep.to_list())
| PowerBasis |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 12469,
"end": 13110
} | class ____(_GenerativeProvider):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.OPENAI, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
frequencyPenaltyProperty: Optional[float]
presencePenaltyProperty: Optional[float]
maxTokensProperty: Optional[int]
temperatureProperty: Optional[float]
topPProperty: Optional[float]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
return ret_dict
| _GenerativeOpenAIConfigBase |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/atlassian/tests.py | {
"start": 246,
"end": 1316
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = AtlassianProvider.id
def get_mocked_response(self):
response_data = """
{
"account_type": "atlassian",
"account_id": "112233aa-bb11-cc22-33dd-445566abcabc",
"email": "mia@example.com",
"email_verified": true,
"name": "Mia Krystof",
"picture": "https://avatar-management--avatars.us-west-2.prod.public.atl-paas.net/112233aa-bb11-cc22-33dd-445566abcabc/1234abcd-9876-54aa-33aa-1234dfsade9487ds",
"account_status": "active",
"nickname": "mkrystof",
"zoneinfo": "Australia/Sydney",
"locale": "en-US",
"extended_profile": {
"job_title": "Designer",
"organization": "mia@example.com",
"department": "Design team",
"location": "Sydney"
}
}"""
return MockedResponse(HTTPStatus.OK, response_data)
def get_expected_to_str(self):
return "mia@example.com"
| AtlassianTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox27.py | {
"start": 315,
"end": 1457
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox27.xlsx")
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [97096064, 97098752]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.insert_textbox("E9", "This is some text")
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | openai__openai-python | src/openai/types/beta/realtime/transcription_session_update_param.py | {
"start": 6082,
"end": 6453
} | class ____(TypedDict, total=False):
session: Required[Session]
"""Realtime transcription session object configuration."""
type: Required[Literal["transcription_session.update"]]
"""The event type, must be `transcription_session.update`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| TranscriptionSessionUpdateParam |
python | sympy__sympy | sympy/physics/quantum/qft.py | {
"start": 5645,
"end": 6425
} | class ____(Fourier):
"""The inverse quantum Fourier transform."""
gate_name = 'IQFT'
gate_name_latex = '{QFT^{-1}}'
def decompose(self):
"""Decomposes IQFT into elementary gates."""
start = self.args[0]
finish = self.args[1]
circuit = 1
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
for level in range(start, finish):
for i in reversed(range(level - start)):
circuit = CGate(level - i - 1, RkGate(level, -i - 2))*circuit
circuit = HadamardGate(level)*circuit
return circuit
def _eval_inverse(self):
return QFT(*self.args)
@property
def omega(self):
return exp(-2*pi*I/self.size)
| IQFT |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module.py | {
"start": 689,
"end": 816
} | class ____(type):
def __new__(meta, name, bases, namespace):
return super().__new__(meta, name, bases, namespace)
| Meta |
python | keras-team__keras | keras/src/utils/file_utils_test.py | {
"start": 24245,
"end": 25832
} | class ____(test_case.TestCase):
def setUp(self):
self.tmp_file = tempfile.NamedTemporaryFile(delete=False)
self.tmp_file.write(b"Hello, World!")
self.tmp_file.close()
self.sha256_hash = (
"dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f"
)
self.md5_hash = "65a8e27d8879283831b664bd8b7f0ad4"
def test_validate_file_sha256(self):
"""Validate SHA256 hash of a file."""
self.assertTrue(
file_utils.validate_file(
self.tmp_file.name, self.sha256_hash, "sha256"
)
)
def test_validate_file_md5(self):
"""Validate MD5 hash of a file."""
self.assertTrue(
file_utils.validate_file(self.tmp_file.name, self.md5_hash, "md5")
)
def test_validate_file_auto_sha256(self):
"""Auto-detect and validate SHA256 hash."""
self.assertTrue(
file_utils.validate_file(
self.tmp_file.name, self.sha256_hash, "auto"
)
)
def test_validate_file_auto_md5(self):
"""Auto-detect and validate MD5 hash."""
self.assertTrue(
file_utils.validate_file(self.tmp_file.name, self.md5_hash, "auto")
)
def test_validate_file_wrong_hash(self):
"""Test validation with incorrect hash."""
wrong_hash = "deadbeef" * 8
self.assertFalse(
file_utils.validate_file(self.tmp_file.name, wrong_hash, "sha256")
)
def tearDown(self):
os.remove(self.tmp_file.name)
| TestValidateFile |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_organization_release_details.py | {
"start": 49701,
"end": 52644
} | class ____(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.repo_name = "repo/name"
self.repo2_name = "repo2/name"
self.commits = [{"id": "a" * 40}, {"id": "b" * 40}]
self.ref = "master"
self.url = "https://example.com"
self.dateReleased = "1000-10-10T06:06"
self.headCommits = [
{"currentId": "0" * 40, "repository": self.repo_name},
{"currentId": "0" * 40, "repository": self.repo2_name},
]
self.refs = [
{"commit": "a" * 40, "previousCommit": "", "repository": self.repo_name},
{"commit": "b" * 40, "previousCommit": "", "repository": self.repo2_name},
]
def test_simple(self) -> None:
serializer = OrganizationReleaseSerializer(
data={
"ref": self.ref,
"url": self.url,
"dateReleased": self.dateReleased,
"commits": self.commits,
"headCommits": self.headCommits,
"refs": self.refs,
}
)
assert serializer.is_valid()
assert set(serializer.fields.keys()) == {
"ref",
"url",
"dateReleased",
"commits",
"headCommits",
"refs",
"status",
}
result = serializer.validated_data
assert result["ref"] == self.ref
assert result["url"] == self.url
assert result["dateReleased"] == datetime(1000, 10, 10, 6, 6, tzinfo=UTC)
assert result["commits"] == self.commits
assert result["headCommits"] == self.headCommits
assert result["refs"] == self.refs
def test_fields_not_required(self) -> None:
serializer = OrganizationReleaseSerializer(data={})
assert serializer.is_valid()
def test_do_not_allow_null_commits(self) -> None:
serializer = OrganizationReleaseSerializer(data={"commits": None})
assert not serializer.is_valid()
def test_do_not_allow_null_head_commits(self) -> None:
serializer = OrganizationReleaseSerializer(data={"headCommits": None})
assert not serializer.is_valid()
def test_do_not_allow_null_refs(self) -> None:
serializer = OrganizationReleaseSerializer(data={"refs": None})
assert not serializer.is_valid()
def test_ref_limited_by_max_version_length(self) -> None:
serializer = OrganizationReleaseSerializer(data={"ref": "a" * MAX_VERSION_LENGTH})
assert serializer.is_valid()
serializer = OrganizationReleaseSerializer(data={"ref": "a" * (MAX_VERSION_LENGTH + 1)})
assert not serializer.is_valid()
def test_author_email_patch(self) -> None:
serializer = OrganizationReleaseSerializer(
data={"commits": [{"id": "a", "author_email": "email[test]@example.org"}]}
)
assert serializer.is_valid()
| ReleaseSerializerTest |
python | plotly__plotly.py | plotly/graph_objs/layout/annotation/hoverlabel/_font.py | {
"start": 235,
"end": 10043
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.annotation.hoverlabel"
_path_str = "layout.annotation.hoverlabel.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the hover label text font. By default uses the global
hover font and size, with color from `hoverlabel.bordercolor`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.annotat
ion.hoverlabel.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.annotation.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.annotation.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | matplotlib__matplotlib | lib/matplotlib/scale.py | {
"start": 20595,
"end": 21197
} | class ____(Transform):
"""Inverse hyperbolic-sine transformation used by `.AsinhScale`"""
input_dims = output_dims = 1
def __init__(self, linear_width):
super().__init__()
if linear_width <= 0.0:
raise ValueError("Scale parameter 'linear_width' " +
"must be strictly positive")
self.linear_width = linear_width
def transform_non_affine(self, values):
return self.linear_width * np.arcsinh(values / self.linear_width)
def inverted(self):
return InvertedAsinhTransform(self.linear_width)
| AsinhTransform |
python | jupyterlab__jupyterlab | packages/services/examples/node/main.py | {
"start": 372,
"end": 1075
} | class ____(ProcessApp):
name = __name__
serverapp_config = {"allow_origin": "*"}
def get_command(self):
"""Get the command and kwargs to run."""
# Run the node script with command arguments.
config = {
"baseUrl": "http://localhost:{}{}".format(
self.serverapp.port, self.settings["base_url"]
),
"token": self.settings["token"],
}
with open(osp.join(HERE, "config.json"), "w") as fid:
json.dump(config, fid)
cmd = [which("node"), "index.js", "--jupyter-config-data=./config.json"]
return cmd, {"cwd": HERE}
if __name__ == "__main__":
NodeApp.launch_instance()
| NodeApp |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/kw_only.py | {
"start": 228,
"end": 265
} | class ____:
a: int
b: Field
| Test2 |
python | ApeWorX__ape | src/ape/plugins/query.py | {
"start": 178,
"end": 689
} | class ____(PluginType):
"""
A plugin for querying chains.
"""
@hookspec
def query_engines(self) -> Iterator[type["QueryAPI"]]: # type: ignore[empty-body]
"""
A hook that returns an iterator of types of a ``QueryAPI`` subclasses
Usage example::
@plugins.register(plugins.QueryPlugin)
def query_engines():
yield PostgresEngine
Returns:
Iterator[type[:class:`~ape.api.query.QueryAPI`]]
"""
| QueryPlugin |
python | getsentry__sentry | src/sentry/search/events/builder/sessions.py | {
"start": 2379,
"end": 3302
} | class ____(SessionsV2QueryBuilder):
time_column = "bucketed_started"
def get_snql_query(self) -> Request:
self.validate_having_clause()
return Request(
dataset=self.dataset.value,
app_id="default",
query=Query(
match=Entity(self.dataset.value, sample=self.sample_rate),
select=[Column(self.time_column)] + self.columns,
array_join=self.array_join,
where=self.where,
having=self.having,
groupby=[Column(self.time_column)] + self.groupby,
orderby=self.orderby,
limit=self.limit,
offset=self.offset,
granularity=self.granularity,
limitby=self.limitby,
),
flags=Flags(turbo=self.turbo),
tenant_ids=self.tenant_ids,
)
| TimeseriesSessionsV2QueryBuilder |
python | astropy__astropy | astropy/units/quantity.py | {
"start": 80739,
"end": 87079
} | class ____(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
(
f"{type(self).__name__} instances require units equivalent to "
f"'{self._equivalent_unit}'"
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
allclose
"""
return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol), equal_nan)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
isclose
"""
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol), equal_nan)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=COPY_IF_NEEDED)
desired = Quantity(desired, subok=True, copy=COPY_IF_NEEDED)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=COPY_IF_NEEDED)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=COPY_IF_NEEDED)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
| SpecificTypeQuantity |
python | scipy__scipy | scipy/stats/tests/test_sampling.py | {
"start": 27825,
"end": 37687
} | class ____:
# Simple Custom Distribution
class dist0:
def pdf(self, x):
return 3/4 * (1-x*x)
def cdf(self, x):
return 3/4 * (x - x**3/3 + 2/3)
def support(self):
return -1, 1
# Standard Normal Distribution
class dist1:
def pdf(self, x):
return stats.norm._pdf(x / 0.1)
def cdf(self, x):
return stats.norm._cdf(x / 0.1)
# Sin 2 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 1
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class dist2:
def pdf(self, x):
return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
def cdf(self, x):
return (0.05*(x + 1) +
0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
(4.*np.pi))
def support(self):
return -1, 1
# Sin 10 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class dist3:
def pdf(self, x):
return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
def cdf(self, x):
return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
np.cos(2*np.pi*x))
def support(self):
return -5, 5
dists = [dist0(), dist1(), dist2(), dist3()]
# exact mean and variance of the distributions in the list dists
mv0 = [0., 4./15.]
mv1 = [0., 0.01]
mv2 = [-0.45/np.pi, 2/3*0.5 - 0.45**2/np.pi**2]
mv3 = [-0.45/np.pi, 0.2 * 250/3 * 0.5 - 0.45**2/np.pi**2]
mvs = [mv0, mv1, mv2, mv3]
@pytest.mark.thread_unsafe(reason="deadlocks for unknown reasons")
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
def test_basic(self, dist, mv_ex):
rng = NumericalInversePolynomial(dist, random_state=42)
check_cont_samples(rng, dist, mv_ex)
@pytest.mark.xslow
@pytest.mark.parametrize("distname, params", distcont)
def test_basic_all_scipy_dists(self, distname, params):
very_slow_dists = ['anglit', 'gausshyper', 'kappa4',
'ksone', 'kstwo', 'levy_l',
'levy_stable', 'studentized_range',
'trapezoid', 'triang', 'vonmises']
# for these distributions, some assertions fail due to minor
# numerical differences. They can be avoided either by changing
# the seed or by increasing the u_resolution.
fail_dists = ['chi2', 'fatiguelife', 'gibrat',
'halfgennorm', 'lognorm', 'ncf',
'ncx2', 'pareto', 't']
# for these distributions, skip the check for agreement between sample
# moments and true moments. We cannot expect them to pass due to the
# high variance of sample moments.
skip_sample_moment_check = ['rel_breitwigner']
if distname in very_slow_dists:
pytest.skip(f"PINV too slow for {distname}")
if distname in fail_dists:
pytest.skip(f"PINV fails for {distname}")
dist = (getattr(stats, distname)
if isinstance(distname, str)
else distname)
dist = dist(*params)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
rng = NumericalInversePolynomial(dist, random_state=42)
if distname in skip_sample_moment_check:
return
check_cont_samples(rng, dist, [dist.mean(), dist.var()])
@pytest.mark.parametrize("pdf, err, msg", bad_pdfs_common)
def test_bad_pdf(self, pdf, err, msg):
class dist:
pass
dist.pdf = pdf
with pytest.raises(err, match=msg):
NumericalInversePolynomial(dist, domain=[0, 5])
@pytest.mark.parametrize("logpdf, err, msg", bad_logpdfs_common)
def test_bad_logpdf(self, logpdf, err, msg):
class dist:
pass
dist.logpdf = logpdf
with pytest.raises(err, match=msg):
NumericalInversePolynomial(dist, domain=[0, 5])
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
NumericalInversePolynomial(StandardNormal(), domain=domain)
u = [
# test if quantile 0 and 1 return -inf and inf respectively and check
# the correctness of the PPF for equidistant points between 0 and 1.
np.linspace(0, 1, num=10000),
# test the PPF method for empty arrays
[], [[]],
# test if nans and infs return nan result.
[np.nan], [-np.inf, np.nan, np.inf],
# test if a scalar is returned for a scalar input.
0,
# test for arrays with nans, values greater than 1 and less than 0,
# and some valid values.
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
]
@pytest.mark.parametrize("u", u)
def test_ppf(self, u):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with warnings.catch_warnings():
msg = "invalid value encountered in "
warnings.filterwarnings("ignore", msg + "greater", RuntimeWarning)
warnings.filterwarnings("ignore", msg + "greater_equal", RuntimeWarning)
warnings.filterwarnings("ignore", msg + "less", RuntimeWarning)
warnings.filterwarnings("ignore", msg + "less_equal", RuntimeWarning)
res = rng.ppf(u)
expected = stats.norm.ppf(u)
assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
assert res.shape == expected.shape
x = [np.linspace(-10, 10, num=10000), [], [[]], [np.nan],
[-np.inf, np.nan, np.inf], 0,
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-np.inf, 3, 4]]]
@pytest.mark.parametrize("x", x)
def test_cdf(self, x):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with warnings.catch_warnings():
msg = "invalid value encountered in "
warnings.filterwarnings("ignore", msg + "greater", RuntimeWarning)
warnings.filterwarnings("ignore", msg + "greater_equal", RuntimeWarning)
warnings.filterwarnings("ignore", msg + "less", RuntimeWarning)
warnings.filterwarnings("ignore", msg + "less_equal", RuntimeWarning)
res = rng.cdf(x)
expected = stats.norm.cdf(x)
assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
assert res.shape == expected.shape
@pytest.mark.slow
def test_u_error(self):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-10)
max_error, mae = rng.u_error()
assert max_error < 1e-10
assert mae <= max_error
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
max_error, mae = rng.u_error()
assert max_error < 1e-14
assert mae <= max_error
bad_orders = [1, 4.5, 20, np.inf, np.nan]
bad_u_resolution = [1e-20, 1e-1, np.inf, np.nan]
@pytest.mark.parametrize("order", bad_orders)
def test_bad_orders(self, order):
dist = StandardNormal()
msg = r"`order` must be an integer in the range \[3, 17\]."
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(dist, order=order)
@pytest.mark.parametrize("u_resolution", bad_u_resolution)
def test_bad_u_resolution(self, u_resolution):
msg = r"`u_resolution` must be between 1e-15 and 1e-5."
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(StandardNormal(),
u_resolution=u_resolution)
def test_bad_args(self):
class BadDist:
def cdf(self, x):
return stats.norm._cdf(x)
dist = BadDist()
msg = r"Either of the methods `pdf` or `logpdf` must be specified"
with pytest.raises(ValueError, match=msg):
rng = NumericalInversePolynomial(dist)
dist = StandardNormal()
rng = NumericalInversePolynomial(dist)
msg = r"`sample_size` must be greater than or equal to 1000."
with pytest.raises(ValueError, match=msg):
rng.u_error(10)
class Distribution:
def pdf(self, x):
return np.exp(-0.5 * x*x)
dist = Distribution()
rng = NumericalInversePolynomial(dist)
msg = r"Exact CDF required but not found."
with pytest.raises(ValueError, match=msg):
rng.u_error()
def test_logpdf_pdf_consistency(self):
# 1. check that PINV works with pdf and logpdf only
# 2. check that generated ppf is the same (up to a small tolerance)
class MyDist:
pass
# create generator from dist with only pdf
dist_pdf = MyDist()
dist_pdf.pdf = lambda x: math.exp(-x*x/2)
rng1 = NumericalInversePolynomial(dist_pdf)
# create dist with only logpdf
dist_logpdf = MyDist()
dist_logpdf.logpdf = lambda x: -x*x/2
rng2 = NumericalInversePolynomial(dist_logpdf)
q = np.linspace(1e-5, 1-1e-5, num=100)
assert_allclose(rng1.ppf(q), rng2.ppf(q))
| TestNumericalInversePolynomial |
python | django__django | django/contrib/postgres/forms/array.py | {
"start": 349,
"end": 3730
} | class ____(forms.CharField):
default_error_messages = {
"item_invalid": _("Item %(nth)s in the array did not validate:"),
}
def __init__(
self, base_field, *, delimiter=",", max_length=None, min_length=None, **kwargs
):
self.base_field = base_field
self.delimiter = delimiter
super().__init__(**kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def clean(self, value):
value = super().clean(value)
return [self.base_field.clean(val) for val in value]
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(
str(self.base_field.prepare_value(v)) for v in value
)
return value
def to_python(self, value):
if isinstance(value, list):
items = value
elif value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for index, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as error:
errors.append(
prefix_validation_error(
error,
prefix=self.error_messages["item_invalid"],
code="item_invalid",
params={"nth": index + 1},
)
)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super().validate(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as error:
errors.append(
prefix_validation_error(
error,
prefix=self.error_messages["item_invalid"],
code="item_invalid",
params={"nth": index + 1},
)
)
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super().run_validators(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as error:
errors.append(
prefix_validation_error(
error,
prefix=self.error_messages["item_invalid"],
code="item_invalid",
params={"nth": index + 1},
)
)
if errors:
raise ValidationError(errors)
def has_changed(self, initial, data):
try:
value = self.to_python(data)
except ValidationError:
pass
else:
if initial in self.empty_values and value in self.empty_values:
return False
return super().has_changed(initial, data)
| SimpleArrayField |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 15710,
"end": 16108
} | class ____(TokenStreamException):
def __init__(self, *args):
if args and isinstance(args[0], Exception):
io = args[0]
TokenStreamException.__init__(self, str(io))
self.io = io
else:
TokenStreamException.__init__(self, *args)
self.io = self
# Wraps a RecognitionException in a TokenStreamException
| TokenStreamIOException |
python | django__django | tests/gis_tests/inspectapp/tests.py | {
"start": 521,
"end": 2424
} | class ____(TestCase):
def test_geom_columns(self):
"""
Test the geo-enabled inspectdb command.
"""
out = StringIO()
call_command(
"inspectdb",
table_name_filter=lambda tn: tn == "inspectapp_allogrfields",
stdout=out,
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn("geom = models.PolygonField()", output)
self.assertIn("point = models.PointField()", output)
else:
self.assertIn("geom = models.GeometryField(", output)
self.assertIn("point = models.GeometryField(", output)
@skipUnlessDBFeature("supports_3d_storage")
def test_3d_columns(self):
out = StringIO()
call_command(
"inspectdb",
table_name_filter=lambda tn: tn == "inspectapp_fields3d",
stdout=out,
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn("point = models.PointField(dim=3)", output)
if connection.features.supports_geography:
self.assertIn(
"pointg = models.PointField(geography=True, dim=3)", output
)
else:
self.assertIn("pointg = models.PointField(dim=3)", output)
self.assertIn("line = models.LineStringField(dim=3)", output)
self.assertIn("poly = models.PolygonField(dim=3)", output)
else:
self.assertIn("point = models.GeometryField(", output)
self.assertIn("pointg = models.GeometryField(", output)
self.assertIn("line = models.GeometryField(", output)
self.assertIn("poly = models.GeometryField(", output)
@modify_settings(
INSTALLED_APPS={"append": "django.contrib.gis"},
)
| InspectDbTests |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 5256,
"end": 6107
} | class ____(App[None]):
"""An app with no extra bindings but with a custom screen with a low-priority binding."""
SCREENS = {"main": ScreenWithLowBindings}
def on_mount(self) -> None:
self.push_screen("main")
async def test_app_screen_with_low_bindings() -> None:
"""Test a screen with a single low-priority key binding defined."""
async with AppWithScreenThatHasALowBinding().run_test() as pilot:
assert pilot.app.screen._bindings.get_bindings_for_key("a")[0].priority is False
##############################################################################
# From here on in we're going to start simulating keystrokes to ensure that
# any bindings that are in place actually fire the correct actions. To help
# with this let's build a simple key/binding/action recorder base app.
| AppWithScreenThatHasALowBinding |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-typesense/destination_typesense/writer.py | {
"start": 251,
"end": 1358
} | class ____:
write_buffer: list[tuple[str, Mapping]] = []
def __init__(self, client: Client, batch_size: int = 10000):
self.client = client
self.batch_size = batch_size or 10000
def queue_write_operation(self, stream_name: str, data: Mapping):
random_key = str(uuid4())
self.write_buffer.append(
(
stream_name,
{
"id": random_key,
**data,
},
)
)
if len(self.write_buffer) == self.batch_size:
self.flush()
def flush(self):
buffer_size = len(self.write_buffer)
if buffer_size == 0:
return
logger.info(f"flushing {buffer_size} records")
grouped_by_stream: defaultdict[str, list[Mapping]] = defaultdict(list)
for stream, data in self.write_buffer:
grouped_by_stream[stream].append(data)
for stream, data in grouped_by_stream.items():
self.client.collections[stream].documents.import_(data)
self.write_buffer.clear()
| TypesenseWriter |
python | plotly__plotly.py | tests/test_optional/test_figure_factory/test_figure_factory.py | {
"start": 64259,
"end": 69850
} | class ____(NumpyTestUtilsMixin, TestCaseNoTemplate):
def test_df_dataframe(self):
# validate dataframe has correct column names
df1 = pd.DataFrame([[2, "Apple"]], columns=["Numbers", "Fruit"])
self.assertRaises(PlotlyError, ff.create_gantt, df1)
def test_df_dataframe_all_args(self):
# check if gantt chart matches with expected output
df = pd.DataFrame(
[
["Job A", "2009-01-01", "2009-02-30"],
["Job B", "2009-03-05", "2009-04-15"],
],
columns=["Task", "Start", "Finish"],
)
test_gantt_chart = ff.create_gantt(df)
exp_gantt_chart = go.Figure(
**{
"data": [
{
"x": ("2009-03-05", "2009-04-15", "2009-04-15", "2009-03-05"),
"y": [0.8, 0.8, 1.2, 1.2],
"mode": "none",
"fill": "toself",
"hoverinfo": "name",
"fillcolor": "rgb(255, 127, 14)",
"name": "Job B",
"legendgroup": "rgb(255, 127, 14)",
},
{
"x": ("2009-01-01", "2009-02-30", "2009-02-30", "2009-01-01"),
"y": [-0.2, -0.2, 0.2, 0.2],
"mode": "none",
"fill": "toself",
"hoverinfo": "name",
"fillcolor": "rgb(31, 119, 180)",
"name": "Job A",
"legendgroup": "rgb(31, 119, 180)",
},
{
"x": ("2009-03-05", "2009-04-15"),
"y": [1, 1],
"mode": "markers",
"text": [None, None],
"marker": {
"color": "rgb(255, 127, 14)",
"size": 1,
"opacity": 0,
},
"name": "",
"showlegend": False,
"legendgroup": "rgb(255, 127, 14)",
},
{
"x": ("2009-01-01", "2009-02-30"),
"y": [0, 0],
"mode": "markers",
"text": [None, None],
"marker": {
"color": "rgb(31, 119, 180)",
"size": 1,
"opacity": 0,
},
"name": "",
"showlegend": False,
"legendgroup": "rgb(31, 119, 180)",
},
],
"layout": {
"title": "Gantt Chart",
"showlegend": False,
"height": 600,
"width": 900,
"shapes": [],
"hovermode": "closest",
"yaxis": {
"showgrid": False,
"ticktext": ["Job A", "Job B"],
"tickvals": [0, 1],
"range": [-1, 3],
"autorange": False,
"zeroline": False,
},
"xaxis": {
"showgrid": False,
"zeroline": False,
"rangeselector": {
"buttons": [
{
"count": 7,
"label": "1w",
"step": "day",
"stepmode": "backward",
},
{
"count": 1,
"label": "1m",
"step": "month",
"stepmode": "backward",
},
{
"count": 6,
"label": "6m",
"step": "month",
"stepmode": "backward",
},
{
"count": 1,
"label": "YTD",
"step": "year",
"stepmode": "todate",
},
{
"count": 1,
"label": "1y",
"step": "year",
"stepmode": "backward",
},
{"step": "all"},
]
},
"type": "date",
},
},
}
)
self.assert_fig_equal(test_gantt_chart["data"][1], exp_gantt_chart["data"][1])
self.assert_fig_equal(test_gantt_chart["data"][1], exp_gantt_chart["data"][1])
self.assert_fig_equal(test_gantt_chart["data"][2], exp_gantt_chart["data"][2])
self.assert_fig_equal(test_gantt_chart["data"][3], exp_gantt_chart["data"][3])
| TestGantt |
python | django__django | tests/select_related_onetoone/models.py | {
"start": 1023,
"end": 1098
} | class ____(models.Model):
name1 = models.CharField(max_length=50)
| Parent1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.